diff options
author | Vasant Hegde <vasant.hegde@amd.com> | 2024-08-28 13:10:27 +0200 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2024-09-04 11:35:56 +0200 |
commit | 964877dc26232835d4465d9565399fe8ca4525e8 (patch) | |
tree | 3677a2a5cef33335c07cae29ef5e364c54420387 | |
parent | iommu/amd: Make amd_iommu_dev_flush_pasid_all() static (diff) | |
download | linux-964877dc26232835d4465d9565399fe8ca4525e8.tar.xz linux-964877dc26232835d4465d9565399fe8ca4525e8.zip |
iommu/amd: Make amd_iommu_domain_flush_complete() static
AMD driver uses amd_iommu_domain_flush_complete() function to make sure
IOMMU processed invalidation commands before proceeding. Ideally this
should be called from functions which updates DTE/invalidates caches.
There is no need to call this function explicitly. This patches makes
below changes :
- Rename amd_iommu_domain_flush_complete() -> domain_flush_complete()
and make it as static function.
- Rearrage domain_flush_complete() to avoid forward declaration.
- Update amd_iommu_update_and_flush_device_table() to call
domain_flush_complete().
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20240828111029.5429-7-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/amd/amd_iommu.h | 1 | ||||
-rw-r--r-- | drivers/iommu/amd/io_pgtable.c | 1 | ||||
-rw-r--r-- | drivers/iommu/amd/iommu.c | 37 |
3 files changed, 19 insertions, 20 deletions
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index d0a24ec3ada2..94402b88789d 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -88,7 +88,6 @@ void amd_iommu_flush_all_caches(struct amd_iommu *iommu); void amd_iommu_update_and_flush_device_table(struct protection_domain *domain); void amd_iommu_domain_update(struct protection_domain *domain); void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set); -void amd_iommu_domain_flush_complete(struct protection_domain *domain); void amd_iommu_domain_flush_pages(struct protection_domain *domain, u64 address, size_t size); void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 1074ee25064d..bfbcec68efb9 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -175,7 +175,6 @@ static bool increase_address_space(struct protection_domain *domain, domain->iop.root = pte; domain->iop.mode += 1; amd_iommu_update_and_flush_device_table(domain); - amd_iommu_domain_flush_complete(domain); /* * Device Table needs to be updated and flushed before the new root can diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index ddd63c2b6594..9af084fa6dd4 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1249,6 +1249,22 @@ out_unlock: return ret; } +static void domain_flush_complete(struct protection_domain *domain) +{ + int i; + + for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { + if (domain && !domain->dev_iommu[i]) + continue; + + /* + * Devices of this domain are behind this IOMMU + * We need to wait for completion of all commands. + */ + iommu_completion_wait(amd_iommus[i]); + } +} + static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) { struct iommu_cmd cmd; @@ -1485,7 +1501,7 @@ void amd_iommu_domain_flush_pages(struct protection_domain *domain, __domain_flush_pages(domain, address, size); /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ - amd_iommu_domain_flush_complete(domain); + domain_flush_complete(domain); return; } @@ -1525,7 +1541,7 @@ void amd_iommu_domain_flush_pages(struct protection_domain *domain, } /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ - amd_iommu_domain_flush_complete(domain); + domain_flush_complete(domain); } /* Flush the whole IO/TLB for a given protection domain - including PDE */ @@ -1558,22 +1574,6 @@ static void dev_flush_pasid_all(struct iommu_dev_data *dev_data, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, pasid); } -void amd_iommu_domain_flush_complete(struct protection_domain *domain) -{ - int i; - - for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { - if (domain && !domain->dev_iommu[i]) - continue; - - /* - * Devices of this domain are behind this IOMMU - * We need to wait for completion of all commands. - */ - iommu_completion_wait(amd_iommus[i]); - } -} - /* Flush the not present cache if it exists */ static void domain_flush_np_cache(struct protection_domain *domain, dma_addr_t iova, size_t size) @@ -1615,6 +1615,7 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain) { update_device_table(domain); domain_flush_devices(domain); + domain_flush_complete(domain); } void amd_iommu_domain_update(struct protection_domain *domain) |