diff options
author | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2021-07-27 12:48:17 +0200 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2021-07-27 12:48:17 +0200 |
commit | ca31fef11dc83e672415d5925a134749761329bd (patch) | |
tree | 8eb6a489e2d6dd117300f40ed8fc945a06bb6eee /drivers/infiniband/core/cache.c | |
parent | drm/plane: Move drm_plane_enable_fb_damage_clips into core (diff) | |
parent | efi: sysfb_efi: fix build when EFI is not set (diff) | |
download | linux-ca31fef11dc83e672415d5925a134749761329bd.tar.xz linux-ca31fef11dc83e672415d5925a134749761329bd.zip |
Backmerge remote-tracking branch 'drm/drm-next' into drm-misc-next
Required bump from v5.13-rc3 to v5.14-rc3, and to pick up sysfb compilation fixes.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'drivers/infiniband/core/cache.c')
-rw-r--r-- | drivers/infiniband/core/cache.c | 40 |
1 files changed, 21 insertions, 19 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 3b0991fedd81..c9e9fc81447e 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -240,7 +240,7 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry) u32 port_num = entry->attr.port_num; struct ib_gid_table *table = rdma_gid_table(device, port_num); - dev_dbg(&device->dev, "%s port=%u index=%d gid %pI6\n", __func__, + dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__, port_num, entry->attr.index, entry->attr.gid.raw); write_lock_irq(&table->rwlock); @@ -323,7 +323,7 @@ static void store_gid_entry(struct ib_gid_table *table, { entry->state = GID_TABLE_ENTRY_VALID; - dev_dbg(&entry->attr.device->dev, "%s port=%d index=%d gid %pI6\n", + dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n", __func__, entry->attr.port_num, entry->attr.index, entry->attr.gid.raw); @@ -354,7 +354,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry) int ret; if (!attr->ndev) { - dev_err(&attr->device->dev, "%s NULL netdev port=%d index=%d\n", + dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n", __func__, attr->port_num, attr->index); return -EINVAL; } @@ -362,7 +362,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry) ret = attr->device->ops.add_gid(attr, &entry->context); if (ret) { dev_err(&attr->device->dev, - "%s GID add failed port=%d index=%d\n", + "%s GID add failed port=%u index=%u\n", __func__, attr->port_num, attr->index); return ret; } @@ -805,7 +805,7 @@ static void release_gid_table(struct ib_device *device, continue; if (kref_read(&table->data_vec[i]->kref) > 1) { dev_err(&device->dev, - "GID entry ref leak for index %d ref=%d\n", i, + "GID entry ref leak for index %d ref=%u\n", i, kref_read(&table->data_vec[i]->kref)); leak = true; } @@ -1069,19 +1069,14 @@ int ib_get_cached_pkey(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_pkey); -int ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num, +void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num, u64 *sn_pfx) { unsigned long flags; - if (!rdma_is_port_valid(device, port_num)) - return -EINVAL; - read_lock_irqsave(&device->cache_lock, flags); *sn_pfx = device->port_data[port_num].cache.subnet_prefix; read_unlock_irqrestore(&device->cache_lock, flags); - - return 0; } EXPORT_SYMBOL(ib_get_cached_subnet_prefix); @@ -1465,10 +1460,12 @@ err: } static int -ib_cache_update(struct ib_device *device, u32 port, bool enforce_security) +ib_cache_update(struct ib_device *device, u32 port, bool update_gids, + bool update_pkeys, bool enforce_security) { struct ib_port_attr *tprops = NULL; - struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; + struct ib_pkey_cache *pkey_cache = NULL; + struct ib_pkey_cache *old_pkey_cache = NULL; int i; int ret; @@ -1485,14 +1482,16 @@ ib_cache_update(struct ib_device *device, u32 port, bool enforce_security) goto err; } - if (!rdma_protocol_roce(device, port)) { + if (!rdma_protocol_roce(device, port) && update_gids) { ret = config_non_roce_gid_cache(device, port, tprops->gid_tbl_len); if (ret) goto err; } - if (tprops->pkey_tbl_len) { + update_pkeys &= !!tprops->pkey_tbl_len; + + if (update_pkeys) { pkey_cache = kmalloc(struct_size(pkey_cache, table, tprops->pkey_tbl_len), GFP_KERNEL); @@ -1517,9 +1516,10 @@ ib_cache_update(struct ib_device *device, u32 port, bool enforce_security) write_lock_irq(&device->cache_lock); - old_pkey_cache = device->port_data[port].cache.pkey; - - device->port_data[port].cache.pkey = pkey_cache; + if (update_pkeys) { + old_pkey_cache = device->port_data[port].cache.pkey; + device->port_data[port].cache.pkey = pkey_cache; + } device->port_data[port].cache.lmc = tprops->lmc; device->port_data[port].cache.port_state = tprops->state; @@ -1551,6 +1551,8 @@ static void ib_cache_event_task(struct work_struct *_work) * the cache. */ ret = ib_cache_update(work->event.device, work->event.element.port_num, + work->event.event == IB_EVENT_GID_CHANGE, + work->event.event == IB_EVENT_PKEY_CHANGE, work->enforce_security); /* GID event is notified already for individual GID entries by @@ -1624,7 +1626,7 @@ int ib_cache_setup_one(struct ib_device *device) return err; rdma_for_each_port (device, p) { - err = ib_cache_update(device, p, true); + err = ib_cache_update(device, p, true, true, true); if (err) return err; } |