summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2024-10-30 11:02:48 +0100
committerJoerg Roedel <jroedel@suse.de>2024-10-30 11:02:48 +0100
commit556af583d256f0407b6e68e2586ce9b995b8d763 (patch)
treeb6be4b198b246389405b674b7f64346310ce1957 /drivers/iommu
parentiommu/amd: Fix corruption when mapping large pages from 0 (diff)
parentiommu: Restore iommu_flush_iotlb_all() (diff)
downloadlinux-556af583d256f0407b6e68e2586ce9b995b8d763.tar.xz
linux-556af583d256f0407b6e68e2586ce9b995b8d763.zip
Merge branch 'core' into amd/amd-vi
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd/amd_iommu.h8
-rw-r--r--drivers/iommu/amd/init.c18
-rw-r--r--drivers/iommu/amd/iommu.c141
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c6
-rw-r--r--drivers/iommu/iommu-sysfs.c2
-rw-r--r--drivers/iommu/iommu.c233
6 files changed, 229 insertions, 179 deletions
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 6386fa4556d9..38509e1019e9 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -46,6 +46,7 @@ extern int amd_iommu_gpt_level;
extern unsigned long amd_iommu_pgsize_bitmap;
/* Protection domain ops */
+void amd_iommu_init_identity_domain(void);
struct protection_domain *protection_domain_alloc(unsigned int type, int nid);
void protection_domain_free(struct protection_domain *domain);
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
@@ -118,9 +119,14 @@ static inline bool check_feature2(u64 mask)
return (amd_iommu_efr2 & mask);
}
+static inline bool amd_iommu_v2_pgtbl_supported(void)
+{
+ return (check_feature(FEATURE_GIOSUP) && check_feature(FEATURE_GT));
+}
+
static inline bool amd_iommu_gt_ppr_supported(void)
{
- return (check_feature(FEATURE_GT) &&
+ return (amd_iommu_v2_pgtbl_supported() &&
check_feature(FEATURE_PPR) &&
check_feature(FEATURE_EPHSUP));
}
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 3fa70169aace..ca7ae13968e3 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2070,14 +2070,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
init_iommu_perf_ctr(iommu);
- if (amd_iommu_pgtable == AMD_IOMMU_V2) {
- if (!check_feature(FEATURE_GIOSUP) ||
- !check_feature(FEATURE_GT)) {
- pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
- }
- }
-
if (is_rd890_iommu(iommu->dev)) {
int i, j;
@@ -2172,6 +2164,9 @@ static int __init amd_iommu_init_pci(void)
struct amd_iommu_pci_seg *pci_seg;
int ret;
+ /* Init global identity domain before registering IOMMU */
+ amd_iommu_init_identity_domain();
+
for_each_iommu(iommu) {
ret = iommu_init_pci(iommu);
if (ret) {
@@ -3087,6 +3082,13 @@ static int __init early_amd_iommu_init(void)
FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
+ if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (!amd_iommu_v2_pgtbl_supported()) {
+ pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+ }
+
/* Disable any previously enabled IOMMUs */
if (!is_kdump_kernel() || amd_iommu_disabled)
disable_iommus();
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 074effba7fbe..35af5b470421 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -74,6 +74,9 @@ struct kmem_cache *amd_iommu_irq_cache;
static void detach_device(struct device *dev);
+static int amd_iommu_attach_device(struct iommu_domain *dom,
+ struct device *dev);
+
static void set_dte_entry(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data);
@@ -2263,43 +2266,41 @@ void protection_domain_free(struct protection_domain *domain)
kfree(domain);
}
+static void protection_domain_init(struct protection_domain *domain, int nid)
+{
+ spin_lock_init(&domain->lock);
+ INIT_LIST_HEAD(&domain->dev_list);
+ INIT_LIST_HEAD(&domain->dev_data_list);
+ domain->iop.pgtbl.cfg.amd.nid = nid;
+}
+
struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
{
- struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
- int pgtable;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
domain->id = domain_id_alloc();
- if (!domain->id)
- goto err_free;
+ if (!domain->id) {
+ kfree(domain);
+ return NULL;
+ }
- spin_lock_init(&domain->lock);
- INIT_LIST_HEAD(&domain->dev_list);
- INIT_LIST_HEAD(&domain->dev_data_list);
- domain->iop.pgtbl.cfg.amd.nid = nid;
+ protection_domain_init(domain, nid);
+
+ return domain;
+}
+
+static int pdom_setup_pgtable(struct protection_domain *domain,
+ unsigned int type, int pgtable)
+{
+ struct io_pgtable_ops *pgtbl_ops;
- switch (type) {
/* No need to allocate io pgtable ops in passthrough mode */
- case IOMMU_DOMAIN_IDENTITY:
- case IOMMU_DOMAIN_SVA:
- return domain;
- case IOMMU_DOMAIN_DMA:
- pgtable = amd_iommu_pgtable;
- break;
- /*
- * Force IOMMU v1 page table when allocating
- * domain for pass-through devices.
- */
- case IOMMU_DOMAIN_UNMANAGED:
- pgtable = AMD_IOMMU_V1;
- break;
- default:
- goto err_id;
- }
+ if (!(type & __IOMMU_DOMAIN_PAGING))
+ return 0;
switch (pgtable) {
case AMD_IOMMU_V1:
@@ -2309,25 +2310,20 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
domain->pd_mode = PD_MODE_V2;
break;
default:
- goto err_id;
+ return -EINVAL;
}
pgtbl_ops =
alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain);
if (!pgtbl_ops)
- goto err_id;
+ return -ENOMEM;
- return domain;
-err_id:
- domain_id_free(domain->id);
-err_free:
- kfree(domain);
- return NULL;
+ return 0;
}
-static inline u64 dma_max_address(void)
+static inline u64 dma_max_address(int pgtable)
{
- if (amd_iommu_pgtable == AMD_IOMMU_V1)
+ if (pgtable == AMD_IOMMU_V1)
return ~0ULL;
/* V2 with 4/5 level page table */
@@ -2340,11 +2336,13 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
}
static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
- struct device *dev, u32 flags)
+ struct device *dev,
+ u32 flags, int pgtable)
{
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
struct protection_domain *domain;
struct amd_iommu *iommu = NULL;
+ int ret;
if (dev)
iommu = get_amd_iommu_from_dev(dev);
@@ -2356,16 +2354,20 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
return ERR_PTR(-EINVAL);
- if (dirty_tracking && !amd_iommu_hd_support(iommu))
- return ERR_PTR(-EOPNOTSUPP);
-
domain = protection_domain_alloc(type,
dev ? dev_to_node(dev) : NUMA_NO_NODE);
if (!domain)
return ERR_PTR(-ENOMEM);
+ ret = pdom_setup_pgtable(domain, type, pgtable);
+ if (ret) {
+ domain_id_free(domain->id);
+ kfree(domain);
+ return ERR_PTR(ret);
+ }
+
domain->domain.geometry.aperture_start = 0;
- domain->domain.geometry.aperture_end = dma_max_address();
+ domain->domain.geometry.aperture_end = dma_max_address(pgtable);
domain->domain.geometry.force_aperture = true;
domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
@@ -2383,8 +2385,16 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
{
struct iommu_domain *domain;
+ int pgtable = amd_iommu_pgtable;
- domain = do_iommu_domain_alloc(type, NULL, 0);
+ /*
+ * Force IOMMU v1 page table when allocating
+ * domain for pass-through devices.
+ */
+ if (type == IOMMU_DOMAIN_UNMANAGED)
+ pgtable = AMD_IOMMU_V1;
+
+ domain = do_iommu_domain_alloc(type, NULL, 0, pgtable);
if (IS_ERR(domain))
return NULL;
@@ -2398,11 +2408,36 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
{
unsigned int type = IOMMU_DOMAIN_UNMANAGED;
+ struct amd_iommu *iommu = NULL;
+ const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
+ IOMMU_HWPT_ALLOC_PASID;
+
+ if (dev)
+ iommu = get_amd_iommu_from_dev(dev);
- if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data)
+ if ((flags & ~supported_flags) || parent || user_data)
return ERR_PTR(-EOPNOTSUPP);
- return do_iommu_domain_alloc(type, dev, flags);
+ /* Allocate domain with v2 page table if IOMMU supports PASID. */
+ if (flags & IOMMU_HWPT_ALLOC_PASID) {
+ if (!amd_iommu_pasid_supported())
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V2);
+ }
+
+ /* Allocate domain with v1 page table for dirty tracking */
+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) {
+ if (iommu && amd_iommu_hd_support(iommu)) {
+ return do_iommu_domain_alloc(type, dev,
+ flags, AMD_IOMMU_V1);
+ }
+
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ /* If nothing specific is required use the kernel commandline default */
+ return do_iommu_domain_alloc(type, dev, 0, amd_iommu_pgtable);
}
void amd_iommu_domain_free(struct iommu_domain *dom)
@@ -2444,6 +2479,25 @@ static struct iommu_domain blocked_domain = {
}
};
+static struct protection_domain identity_domain;
+
+static const struct iommu_domain_ops identity_domain_ops = {
+ .attach_dev = amd_iommu_attach_device,
+};
+
+void amd_iommu_init_identity_domain(void)
+{
+ struct iommu_domain *domain = &identity_domain.domain;
+
+ domain->type = IOMMU_DOMAIN_IDENTITY;
+ domain->ops = &identity_domain_ops;
+ domain->owner = &amd_iommu_ops;
+
+ identity_domain.id = domain_id_alloc();
+
+ protection_domain_init(&identity_domain, NUMA_NO_NODE);
+}
+
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev)
{
@@ -2842,6 +2896,7 @@ static int amd_iommu_dev_disable_feature(struct device *dev,
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.blocked_domain = &blocked_domain,
+ .identity_domain = &identity_domain.domain,
.domain_alloc = amd_iommu_domain_alloc,
.domain_alloc_user = amd_iommu_domain_alloc_user,
.domain_alloc_sva = amd_iommu_domain_alloc_sva,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 737c5b882355..8a193141f003 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3084,7 +3084,8 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags,
const struct iommu_user_data *user_data)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
+ const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
+ IOMMU_HWPT_ALLOC_PASID;
struct arm_smmu_domain *smmu_domain;
int ret;
@@ -3093,6 +3094,9 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags,
if (parent || user_data)
return ERR_PTR(-EOPNOTSUPP);
+ if (flags & IOMMU_HWPT_ALLOC_PASID)
+ return arm_smmu_domain_alloc_paging(dev);
+
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index cbe378c34ba3..170022c09536 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -34,7 +34,7 @@ static void release_device(struct device *dev)
kfree(dev);
}
-static struct class iommu_class = {
+static const struct class iommu_class = {
.name = "iommu",
.dev_release = release_device,
.dev_groups = dev_groups,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 83c8e617a2c5..0d08a3256085 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -32,6 +32,7 @@
#include <trace/events/iommu.h>
#include <linux/sched/mm.h>
#include <linux/msi.h>
+#include <uapi/linux/iommufd.h>
#include "dma-iommu.h"
#include "iommu-priv.h"
@@ -93,12 +94,13 @@ static const char * const iommu_group_resv_type_string[] = {
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data);
static void iommu_release_device(struct device *dev);
-static struct iommu_domain *
-__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type);
static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
+static struct iommu_domain *__iommu_paging_domain_alloc_flags(struct device *dev,
+ unsigned int type,
+ unsigned int flags);
enum {
IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0,
@@ -133,6 +135,8 @@ static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
struct device *dev);
static void __iommu_group_free_device(struct iommu_group *group,
struct group_device *grp_dev);
+static void iommu_domain_init(struct iommu_domain *domain, unsigned int type,
+ const struct iommu_ops *ops);
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
@@ -1141,10 +1145,6 @@ map_end:
}
}
-
- if (!list_empty(&mappings) && iommu_is_dma_domain(domain))
- iommu_flush_iotlb_all(domain);
-
out:
iommu_put_resv_regions(dev, &mappings);
@@ -1586,12 +1586,59 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_group);
+static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_domain *domain;
+
+ if (ops->identity_domain)
+ return ops->identity_domain;
+
+ /* Older drivers create the identity domain via ops->domain_alloc() */
+ if (!ops->domain_alloc)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY);
+ if (IS_ERR(domain))
+ return domain;
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops);
+ return domain;
+}
+
static struct iommu_domain *
__iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
{
+ struct device *dev = iommu_group_first_dev(group);
+ struct iommu_domain *dom;
+
if (group->default_domain && group->default_domain->type == req_type)
return group->default_domain;
- return __iommu_group_domain_alloc(group, req_type);
+
+ /*
+ * When allocating the DMA API domain assume that the driver is going to
+ * use PASID and make sure the RID's domain is PASID compatible.
+ */
+ if (req_type & __IOMMU_DOMAIN_PAGING) {
+ dom = __iommu_paging_domain_alloc_flags(dev, req_type,
+ dev->iommu->max_pasids ? IOMMU_HWPT_ALLOC_PASID : 0);
+
+ /*
+ * If driver does not support PASID feature then
+ * try to allocate non-PASID domain
+ */
+ if (PTR_ERR(dom) == -EOPNOTSUPP)
+ dom = __iommu_paging_domain_alloc_flags(dev, req_type, 0);
+
+ return dom;
+ }
+
+ if (req_type == IOMMU_DOMAIN_IDENTITY)
+ return __iommu_alloc_identity_domain(dev);
+
+ return ERR_PTR(-EINVAL);
}
/*
@@ -1841,31 +1888,6 @@ int bus_iommu_probe(const struct bus_type *bus)
}
/**
- * iommu_present() - make platform-specific assumptions about an IOMMU
- * @bus: bus to check
- *
- * Do not use this function. You want device_iommu_mapped() instead.
- *
- * Return: true if some IOMMU is present and aware of devices on the given bus;
- * in general it may not be the only IOMMU, and it may not have anything to do
- * with whatever device you are ultimately interested in.
- */
-bool iommu_present(const struct bus_type *bus)
-{
- bool ret = false;
-
- for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
- if (iommu_buses[i] == bus) {
- spin_lock(&iommu_device_lock);
- ret = !list_empty(&iommu_device_list);
- spin_unlock(&iommu_device_lock);
- }
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_present);
-
-/**
* device_iommu_capable() - check for a general IOMMU capability
* @dev: device to which the capability would be relevant, if available
* @cap: IOMMU capability
@@ -1934,117 +1956,67 @@ void iommu_set_fault_handler(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
-static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
- struct device *dev,
- unsigned int type)
+static void iommu_domain_init(struct iommu_domain *domain, unsigned int type,
+ const struct iommu_ops *ops)
{
- struct iommu_domain *domain;
- unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS;
-
- if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain)
- return ops->identity_domain;
- else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain)
- return ops->blocked_domain;
- else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging)
- domain = ops->domain_alloc_paging(dev);
- else if (ops->domain_alloc)
- domain = ops->domain_alloc(alloc_type);
- else
- return ERR_PTR(-EOPNOTSUPP);
-
- /*
- * Many domain_alloc ops now return ERR_PTR, make things easier for the
- * driver by accepting ERR_PTR from all domain_alloc ops instead of
- * having two rules.
- */
- if (IS_ERR(domain))
- return domain;
- if (!domain)
- return ERR_PTR(-ENOMEM);
-
domain->type = type;
domain->owner = ops;
+ if (!domain->ops)
+ domain->ops = ops->default_domain_ops;
+
/*
* If not already set, assume all sizes by default; the driver
* may override this later
*/
if (!domain->pgsize_bitmap)
domain->pgsize_bitmap = ops->pgsize_bitmap;
-
- if (!domain->ops)
- domain->ops = ops->default_domain_ops;
-
- if (iommu_is_dma_domain(domain)) {
- int rc;
-
- rc = iommu_get_dma_cookie(domain);
- if (rc) {
- iommu_domain_free(domain);
- return ERR_PTR(rc);
- }
- }
- return domain;
}
static struct iommu_domain *
-__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
-{
- struct device *dev = iommu_group_first_dev(group);
-
- return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type);
-}
-
-static int __iommu_domain_alloc_dev(struct device *dev, void *data)
+__iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type,
+ unsigned int flags)
{
- const struct iommu_ops **ops = data;
+ const struct iommu_ops *ops;
+ struct iommu_domain *domain;
if (!dev_has_iommu(dev))
- return 0;
-
- if (WARN_ONCE(*ops && *ops != dev_iommu_ops(dev),
- "Multiple IOMMU drivers present for bus %s, which the public IOMMU API can't fully support yet. You will still need to disable one or more for this to work, sorry!\n",
- dev_bus_name(dev)))
- return -EBUSY;
-
- *ops = dev_iommu_ops(dev);
- return 0;
-}
+ return ERR_PTR(-ENODEV);
-/*
- * The iommu ops in bus has been retired. Do not use this interface in
- * new drivers.
- */
-struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
-{
- const struct iommu_ops *ops = NULL;
- int err = bus_for_each_dev(bus, NULL, &ops, __iommu_domain_alloc_dev);
- struct iommu_domain *domain;
+ ops = dev_iommu_ops(dev);
- if (err || !ops)
- return NULL;
+ if (ops->domain_alloc_paging && !flags)
+ domain = ops->domain_alloc_paging(dev);
+ else if (ops->domain_alloc_user)
+ domain = ops->domain_alloc_user(dev, flags, NULL, NULL);
+ else if (ops->domain_alloc && !flags)
+ domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
+ else
+ return ERR_PTR(-EOPNOTSUPP);
- domain = __iommu_domain_alloc(ops, NULL, IOMMU_DOMAIN_UNMANAGED);
if (IS_ERR(domain))
- return NULL;
+ return domain;
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ iommu_domain_init(domain, type, ops);
return domain;
}
-EXPORT_SYMBOL_GPL(iommu_domain_alloc);
/**
- * iommu_paging_domain_alloc() - Allocate a paging domain
+ * iommu_paging_domain_alloc_flags() - Allocate a paging domain
* @dev: device for which the domain is allocated
+ * @flags: Bitmap of iommufd_hwpt_alloc_flags
*
* Allocate a paging domain which will be managed by a kernel driver. Return
- * allocated domain if successful, or a ERR pointer for failure.
+ * allocated domain if successful, or an ERR pointer for failure.
*/
-struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
+struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
+ unsigned int flags)
{
- if (!dev_has_iommu(dev))
- return ERR_PTR(-ENODEV);
-
- return __iommu_domain_alloc(dev_iommu_ops(dev), dev, IOMMU_DOMAIN_UNMANAGED);
+ return __iommu_paging_domain_alloc_flags(dev,
+ IOMMU_DOMAIN_UNMANAGED, flags);
}
-EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc);
+EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags);
void iommu_domain_free(struct iommu_domain *domain)
{
@@ -2216,8 +2188,8 @@ EXPORT_SYMBOL_GPL(iommu_attach_group);
/**
* iommu_group_replace_domain - replace the domain that a group is attached to
- * @new_domain: new IOMMU domain to replace with
* @group: IOMMU group that will be attached to the new domain
+ * @new_domain: new IOMMU domain to replace with
*
* This API allows the group to switch domains without being forced to go to
* the blocking domain in-between.
@@ -2965,6 +2937,14 @@ static int iommu_setup_default_domain(struct iommu_group *group,
if (group->default_domain == dom)
return 0;
+ if (iommu_is_dma_domain(dom)) {
+ ret = iommu_get_dma_cookie(dom);
+ if (ret) {
+ iommu_domain_free(dom);
+ return ret;
+ }
+ }
+
/*
* IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be
* mapped before their device is attached, in order to guarantee
@@ -3152,22 +3132,25 @@ void iommu_device_unuse_default_domain(struct device *dev)
static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
{
+ struct device *dev = iommu_group_first_dev(group);
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_domain *domain;
if (group->blocking_domain)
return 0;
- domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
- if (IS_ERR(domain)) {
- /*
- * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
- * create an empty domain instead.
- */
- domain = __iommu_group_domain_alloc(group,
- IOMMU_DOMAIN_UNMANAGED);
- if (IS_ERR(domain))
- return PTR_ERR(domain);
+ if (ops->blocked_domain) {
+ group->blocking_domain = ops->blocked_domain;
+ return 0;
}
+
+ /*
+ * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED create an
+ * empty PAGING domain instead.
+ */
+ domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
group->blocking_domain = domain;
return 0;
}