@@ -1509,6 +1509,20 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1509
1509
spin_unlock_irqrestore (& device_domain_lock , flags );
1510
1510
}
1511
1511
1512
+ static void domain_flush_piotlb (struct intel_iommu * iommu ,
1513
+ struct dmar_domain * domain ,
1514
+ u64 addr , unsigned long npages , bool ih )
1515
+ {
1516
+ u16 did = domain -> iommu_did [iommu -> seq_id ];
1517
+
1518
+ if (domain -> default_pasid )
1519
+ qi_flush_piotlb (iommu , did , domain -> default_pasid ,
1520
+ addr , npages , ih );
1521
+
1522
+ if (!list_empty (& domain -> devices ))
1523
+ qi_flush_piotlb (iommu , did , PASID_RID2PASID , addr , npages , ih );
1524
+ }
1525
+
1512
1526
static void iommu_flush_iotlb_psi (struct intel_iommu * iommu ,
1513
1527
struct dmar_domain * domain ,
1514
1528
unsigned long pfn , unsigned int pages ,
@@ -1522,18 +1536,23 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1522
1536
1523
1537
if (ih )
1524
1538
ih = 1 << 6 ;
1525
- /*
1526
- * Fallback to domain selective flush if no PSI support or the size is
1527
- * too big.
1528
- * PSI requires page size to be 2 ^ x, and the base address is naturally
1529
- * aligned to the size
1530
- */
1531
- if (!cap_pgsel_inv (iommu -> cap ) || mask > cap_max_amask_val (iommu -> cap ))
1532
- iommu -> flush .flush_iotlb (iommu , did , 0 , 0 ,
1533
- DMA_TLB_DSI_FLUSH );
1534
- else
1535
- iommu -> flush .flush_iotlb (iommu , did , addr | ih , mask ,
1536
- DMA_TLB_PSI_FLUSH );
1539
+
1540
+ if (domain_use_first_level (domain )) {
1541
+ domain_flush_piotlb (iommu , domain , addr , pages , ih );
1542
+ } else {
1543
+ /*
1544
+ * Fallback to domain selective flush if no PSI support or
1545
+ * the size is too big. PSI requires page size to be 2 ^ x,
1546
+ * and the base address is naturally aligned to the size.
1547
+ */
1548
+ if (!cap_pgsel_inv (iommu -> cap ) ||
1549
+ mask > cap_max_amask_val (iommu -> cap ))
1550
+ iommu -> flush .flush_iotlb (iommu , did , 0 , 0 ,
1551
+ DMA_TLB_DSI_FLUSH );
1552
+ else
1553
+ iommu -> flush .flush_iotlb (iommu , did , addr | ih , mask ,
1554
+ DMA_TLB_PSI_FLUSH );
1555
+ }
1537
1556
1538
1557
/*
1539
1558
* In caching mode, changes of pages from non-present to present require
@@ -1548,8 +1567,11 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
1548
1567
struct dmar_domain * domain ,
1549
1568
unsigned long pfn , unsigned int pages )
1550
1569
{
1551
- /* It's a non-present to present mapping. Only flush if caching mode */
1552
- if (cap_caching_mode (iommu -> cap ))
1570
+ /*
1571
+ * It's a non-present to present mapping. Only flush if caching mode
1572
+ * and second level.
1573
+ */
1574
+ if (cap_caching_mode (iommu -> cap ) && !domain_use_first_level (domain ))
1553
1575
iommu_flush_iotlb_psi (iommu , domain , pfn , pages , 0 , 1 );
1554
1576
else
1555
1577
iommu_flush_write_buffer (iommu );
@@ -1566,7 +1588,11 @@ static void iommu_flush_iova(struct iova_domain *iovad)
1566
1588
struct intel_iommu * iommu = g_iommus [idx ];
1567
1589
u16 did = domain -> iommu_did [iommu -> seq_id ];
1568
1590
1569
- iommu -> flush .flush_iotlb (iommu , did , 0 , 0 , DMA_TLB_DSI_FLUSH );
1591
+ if (domain_use_first_level (domain ))
1592
+ domain_flush_piotlb (iommu , domain , 0 , -1 , 0 );
1593
+ else
1594
+ iommu -> flush .flush_iotlb (iommu , did , 0 , 0 ,
1595
+ DMA_TLB_DSI_FLUSH );
1570
1596
1571
1597
if (!cap_caching_mode (iommu -> cap ))
1572
1598
iommu_flush_dev_iotlb (get_iommu_domain (iommu , did ),
0 commit comments