diff options
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 160 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 20 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srpt/ib_srpt.c | 291 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srpt/ib_srpt.h | 44 |
10 files changed, 397 insertions, 152 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 9006a13af1de..6d35570092d6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -66,7 +66,7 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev, ah->last_send = 0; kref_init(&ah->ref); - vah = rdma_create_ah(pd, attr); + vah = rdma_create_ah(pd, attr, RDMA_CREATE_AH_SLEEPABLE); if (IS_ERR(vah)) { kfree(ah); ah = (struct ipoib_ah *)vah; @@ -678,7 +678,7 @@ static void __ipoib_reap_ah(struct net_device *dev) list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) if ((int) priv->tx_tail - (int) ah->last_send >= 0) { list_del(&ah->list); - rdma_destroy_ah(ah->ah); + rdma_destroy_ah(ah->ah, 0); kfree(ah); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 6214d8c0d546..d932f99201d1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2453,8 +2453,8 @@ static struct net_device *ipoib_add_port(const char *format, return ERR_PTR(result); } - if (hca->rdma_netdev_get_params) { - int rc = hca->rdma_netdev_get_params(hca, port, + if (hca->ops.rdma_netdev_get_params) { + int rc = hca->ops.rdma_netdev_get_params(hca, port, RDMA_NETDEV_IPOIB, ¶ms); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 3fecd87c9f2b..8c707accd148 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -997,7 +997,6 @@ static struct scsi_host_template iscsi_iser_sht = { .eh_device_reset_handler= iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .target_alloc = iscsi_target_alloc, - .use_clustering = ENABLE_CLUSTERING, .slave_alloc = iscsi_iser_slave_alloc, .proc_name = "iscsi_iser", .this_id = -1, diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 009be8889d71..e9b7efc302d0 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -77,8 +77,8 @@ int iser_assign_reg_ops(struct iser_device *device) struct ib_device *ib_dev = device->ib_device; /* Assign function handles - based on FMR support */ - if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr && - ib_dev->map_phys_fmr && ib_dev->unmap_fmr) { + if (ib_dev->ops.alloc_fmr && ib_dev->ops.dealloc_fmr && + ib_dev->ops.map_phys_fmr && ib_dev->ops.unmap_fmr) { iser_info("FMR supported, using FMR for registration\n"); device->reg_ops = &fmr_ops; } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { @@ -277,16 +277,13 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir) { struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; - int ret; if (!reg->mem_h) return; iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h); - ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); - if (ret) - iser_err("ib_fmr_pool_unmap failed %d\n", ret); + ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); reg->mem_h = NULL; } diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c index 61558788b3fa..ae70cd18903e 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c @@ -330,10 +330,10 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, struct rdma_netdev *rn; int rc; - netdev = ibdev->alloc_rdma_netdev(ibdev, port_num, - RDMA_NETDEV_OPA_VNIC, - "veth%d", NET_NAME_UNKNOWN, - ether_setup); + netdev = ibdev->ops.alloc_rdma_netdev(ibdev, port_num, + RDMA_NETDEV_OPA_VNIC, + "veth%d", NET_NAME_UNKNOWN, + ether_setup); if (!netdev) return ERR_PTR(-ENOMEM); else if (IS_ERR(netdev)) diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c index d119d9afa845..560e4f2d466e 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c @@ -606,7 +606,7 @@ static void vema_set(struct opa_vnic_vema_port *port, static void vema_send(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_wc) { - rdma_destroy_ah(mad_wc->send_buf->ah); + rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(mad_wc->send_buf); } @@ -680,7 +680,7 @@ static void vema_recv(struct ib_mad_agent *mad_agent, ib_free_send_mad(rsp); err_rsp: - rdma_destroy_ah(ah); + rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); free_recv_mad: ib_free_recv_mad(mad_wc); } @@ -777,7 +777,7 @@ void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter, } rdma_ah_set_dlid(&ah_attr, trap_lid); - ah = rdma_create_ah(port->mad_agent->qp->pd, &ah_attr); + ah = rdma_create_ah(port->mad_agent->qp->pd, &ah_attr, 0); if (IS_ERR(ah)) { c_err("%s:Couldn't create new AH = %p\n", __func__, ah); c_err("%s:dlid = %d, sl = %d, port = %d\n", __func__, @@ -848,7 +848,7 @@ void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter, } err_sndbuf: - rdma_destroy_ah(ah); + rdma_destroy_ah(ah, 0); err_exit: v_err("Aborting trap\n"); } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index eed0eb3bb04c..31d91538bbf4 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -132,6 +132,15 @@ MODULE_PARM_DESC(dev_loss_tmo, " if fast_io_fail_tmo has not been set. \"off\" means that" " this functionality is disabled."); +static bool srp_use_imm_data = true; +module_param_named(use_imm_data, srp_use_imm_data, bool, 0644); +MODULE_PARM_DESC(use_imm_data, + "Whether or not to request permission to use immediate data during SRP login."); + +static unsigned int srp_max_imm_data = 8 * 1024; +module_param_named(max_imm_data, srp_max_imm_data, uint, 0644); +MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size."); + static unsigned ch_count; module_param(ch_count, uint, 0444); MODULE_PARM_DESC(ch_count, @@ -573,7 +582,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) init_attr->cap.max_send_wr = m * target->queue_size; init_attr->cap.max_recv_wr = target->queue_size + 1; init_attr->cap.max_recv_sge = 1; - init_attr->cap.max_send_sge = 1; + init_attr->cap.max_send_sge = SRP_MAX_SGE; init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; init_attr->qp_type = IB_QPT_RC; init_attr->send_cq = send_cq; @@ -823,7 +832,8 @@ static u8 srp_get_subnet_timeout(struct srp_host *host) return subnet_timeout; } -static int srp_send_req(struct srp_rdma_ch *ch, bool multich) +static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len, + bool multich) { struct srp_target_port *target = ch->target; struct { @@ -852,11 +862,15 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich) req->ib_req.opcode = SRP_LOGIN_REQ; req->ib_req.tag = 0; - req->ib_req.req_it_iu_len = cpu_to_be32(target->max_iu_len); + req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len); req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT); req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI : SRP_MULTICHAN_SINGLE); + if (srp_use_imm_data) { + req->ib_req.req_flags |= SRP_IMMED_REQUESTED; + req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET); + } if (target->using_rdma_cm) { req->rdma_param.flow_control = req->ib_param.flow_control; @@ -873,6 +887,7 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich) req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len; req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt; req->rdma_req.req_flags = req->ib_req.req_flags; + req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset; ipi = req->rdma_req.initiator_port_id; tpi = req->rdma_req.target_port_id; @@ -1145,7 +1160,8 @@ static int srp_connected_ch(struct srp_target_port *target) return c; } -static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) +static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len, + bool multich) { struct srp_target_port *target = ch->target; int ret; @@ -1158,7 +1174,7 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) while (1) { init_completion(&ch->done); - ret = srp_send_req(ch, multich); + ret = srp_send_req(ch, max_iu_len, multich); if (ret) goto out; ret = wait_for_completion_interruptible(&ch->done); @@ -1344,6 +1360,20 @@ static void srp_terminate_io(struct srp_rport *rport) } } +/* Calculate maximum initiator to target information unit length. */ +static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data) +{ + uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN + + sizeof(struct srp_indirect_buf) + + cmd_sg_cnt * sizeof(struct srp_direct_buf); + + if (use_imm_data) + max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET + + srp_max_imm_data); + + return max_iu_len; +} + /* * It is up to the caller to ensure that srp_rport_reconnect() calls are * serialized and that no concurrent srp_queuecommand(), srp_abort(), @@ -1357,6 +1387,8 @@ static int srp_rport_reconnect(struct srp_rport *rport) { struct srp_target_port *target = rport->lld_data; struct srp_rdma_ch *ch; + uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, + srp_use_imm_data); int i, j, ret = 0; bool multich = false; @@ -1402,7 +1434,7 @@ static int srp_rport_reconnect(struct srp_rport *rport) ch = &target->ch[i]; if (ret) break; - ret = srp_connect_ch(ch, multich); + ret = srp_connect_ch(ch, max_iu_len, multich); multich = true; } @@ -1764,25 +1796,29 @@ static void srp_check_mapping(struct srp_map_state *state, * @req: SRP request * * Returns the length in bytes of the SRP_CMD IU or a negative value if - * mapping failed. + * mapping failed. The size of any immediate data is not included in the + * return value. */ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, struct srp_request *req) { struct srp_target_port *target = ch->target; - struct scatterlist *scat; + struct scatterlist *scat, *sg; struct srp_cmd *cmd = req->cmd->buf; - int len, nents, count, ret; + int i, len, nents, count, ret; struct srp_device *dev; struct ib_device *ibdev; struct srp_map_state state; struct srp_indirect_buf *indirect_hdr; + u64 data_len; u32 idb_len, table_len; __be32 idb_rkey; u8 fmt; + req->cmd->num_sge = 1; + if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) - return sizeof (struct srp_cmd); + return sizeof(struct srp_cmd) + cmd->add_cdb_len; if (scmnd->sc_data_direction != DMA_FROM_DEVICE && scmnd->sc_data_direction != DMA_TO_DEVICE) { @@ -1794,6 +1830,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, nents = scsi_sg_count(scmnd); scat = scsi_sglist(scmnd); + data_len = scsi_bufflen(scmnd); dev = target->srp_host->srp_dev; ibdev = dev->dev; @@ -1802,8 +1839,31 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, if (unlikely(count == 0)) return -EIO; + if (ch->use_imm_data && + count <= SRP_MAX_IMM_SGE && + SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len && + scmnd->sc_data_direction == DMA_TO_DEVICE) { + struct srp_imm_buf *buf; + struct ib_sge *sge = &req->cmd->sge[1]; + + fmt = SRP_DATA_DESC_IMM; + len = SRP_IMM_DATA_OFFSET; + req->nmdesc = 0; + buf = (void *)cmd->add_data + cmd->add_cdb_len; + buf->len = cpu_to_be32(data_len); + WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len); + for_each_sg(scat, sg, count, i) { + sge[i].addr = ib_sg_dma_address(ibdev, sg); + sge[i].length = ib_sg_dma_len(ibdev, sg); + sge[i].lkey = target->lkey; + } + req->cmd->num_sge += count; + goto map_complete; + } + fmt = SRP_DATA_DESC_DIRECT; - len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); + len = sizeof(struct srp_cmd) + cmd->add_cdb_len + + sizeof(struct srp_direct_buf); if (count == 1 && target->global_rkey) { /* @@ -1812,8 +1872,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, * single entry. So a direct descriptor along with * the DMA MR suffices. */ - struct srp_direct_buf *buf = (void *) cmd->add_data; + struct srp_direct_buf *buf; + buf = (void *)cmd->add_data + cmd->add_cdb_len; buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); buf->key = cpu_to_be32(target->global_rkey); buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); @@ -1826,7 +1887,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, * We have more than one scatter/gather entry, so build our indirect * descriptor table, trying to merge as many entries as we can. */ - indirect_hdr = (void *) cmd->add_data; + indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len; ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, target->indirect_size, DMA_TO_DEVICE); @@ -1861,8 +1922,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, * Memory registration collapsed the sg-list into one entry, * so use a direct descriptor. */ - struct srp_direct_buf *buf = (void *) cmd->add_data; + struct srp_direct_buf *buf; + buf = (void *)cmd->add_data + cmd->add_cdb_len; *buf = req->indirect_desc[0]; goto map_complete; } @@ -1880,7 +1942,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, idb_len = sizeof(struct srp_indirect_buf) + table_len; fmt = SRP_DATA_DESC_INDIRECT; - len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); + len = sizeof(struct srp_cmd) + cmd->add_cdb_len + + sizeof(struct srp_indirect_buf); len += count * sizeof (struct srp_direct_buf); memcpy(indirect_hdr->desc_list, req->indirect_desc, @@ -2001,22 +2064,30 @@ static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc) list_add(&iu->list, &ch->free_tx); } +/** + * srp_post_send() - send an SRP information unit + * @ch: RDMA channel over which to send the information unit. + * @iu: Information unit to send. + * @len: Length of the information unit excluding immediate data. + */ static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) { struct srp_target_port *target = ch->target; - struct ib_sge list; struct ib_send_wr wr; - list.addr = iu->dma; - list.length = len; - list.lkey = target->lkey; + if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE)) + return -EINVAL; + + iu->sge[0].addr = iu->dma; + iu->sge[0].length = len; + iu->sge[0].lkey = target->lkey; iu->cqe.done = srp_send_done; wr.next = NULL; wr.wr_cqe = &iu->cqe; - wr.sg_list = &list; - wr.num_sge = 1; + wr.sg_list = &iu->sge[0]; + wr.num_sge = iu->num_sge; wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; @@ -2129,6 +2200,7 @@ static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, return 1; } + iu->num_sge = 1; ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); memcpy(iu->buf, rsp, len); ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); @@ -2312,7 +2384,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) req = &ch->req_ring[idx]; dev = target->srp_host->srp_dev->dev; - ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, + ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len, DMA_TO_DEVICE); scmnd->host_scribble = (void *) req; @@ -2324,6 +2396,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) int_to_scsilun(scmnd->device->lun, &cmd->lun); cmd->tag = tag; memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); + if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) { + cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb), + 4); + if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN)) + goto err_iu; + } req->scmnd = scmnd; req->cmd = iu; @@ -2343,11 +2421,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) goto err_iu; } - ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, + ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len, DMA_TO_DEVICE); if (srp_post_send(ch, iu, len)) { shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); + scmnd->result = DID_ERROR << 16; goto err_unmap; } @@ -2410,7 +2489,7 @@ static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) for (i = 0; i < target->queue_size; ++i) { ch->tx_ring[i] = srp_alloc_iu(target->srp_host, - target->max_iu_len, + ch->max_it_iu_len, GFP_KERNEL, DMA_TO_DEVICE); if (!ch->tx_ring[i]) goto err; @@ -2476,6 +2555,15 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, if (lrsp->opcode == SRP_LOGIN_RSP) { ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); + ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP; + ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, + ch->use_imm_data); + WARN_ON_ONCE(ch->max_it_iu_len > + be32_to_cpu(lrsp->max_it_iu_len)); + + if (ch->use_imm_data) + shost_printk(KERN_DEBUG, target->scsi_host, + PFX "using immediate data\n"); /* * Reserve credits for task management so we don't @@ -2864,6 +2952,8 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, return -1; } + iu->num_sge = 1; + ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, DMA_TO_DEVICE); tsk_mgmt = iu->buf; @@ -3215,7 +3305,6 @@ static struct scsi_host_template srp_template = { .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, .this_id = -1, .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, - .use_clustering = ENABLE_CLUSTERING, .shost_attrs = srp_host_attrs, .track_queue_depth = 1, }; @@ -3403,6 +3492,9 @@ static const match_table_t srp_opt_tokens = { /** * srp_parse_in - parse an IP address and port number combination + * @net: [in] Network namespace. + * @sa: [out] Address family, IP address and port number. + * @addr_port_str: [in] IP address and port number. * * Parse the following address formats: * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5. @@ -3720,6 +3812,7 @@ static ssize_t srp_create_target(struct device *dev, int ret, node_idx, node, cpu, i; unsigned int max_sectors_per_mr, mr_per_cmd = 0; bool multich = false; + uint32_t max_iu_len; target_host = scsi_host_alloc(&srp_template, sizeof (struct srp_target_port)); @@ -3825,9 +3918,7 @@ static ssize_t srp_create_target(struct device *dev, target->mr_per_cmd = mr_per_cmd; target->indirect_size = target->sg_tablesize * sizeof (struct srp_direct_buf); - target->max_iu_len = sizeof (struct srp_cmd) + - sizeof (struct srp_indirect_buf) + - target->cmd_sg_cnt * sizeof (struct srp_direct_buf); + max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, srp_use_imm_data); INIT_WORK(&target->tl_err_work, srp_tl_err_work); INIT_WORK(&target->remove_work, srp_remove_work); @@ -3882,7 +3973,7 @@ static ssize_t srp_create_target(struct device *dev, if (ret) goto err_disconnect; - ret = srp_connect_ch(ch, multich); + ret = srp_connect_ch(ch, max_iu_len, multich); if (ret) { char dst[64]; @@ -4063,8 +4154,10 @@ static void srp_add_one(struct ib_device *device) srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, max_pages_per_mr); - srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && - device->map_phys_fmr && device->unmap_fmr); + srp_dev->has_fmr = (device->ops.alloc_fmr && + device->ops.dealloc_fmr && + device->ops.map_phys_fmr && + device->ops.unmap_fmr); srp_dev->has_fr = (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS); if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) { @@ -4172,6 +4265,11 @@ static int __init srp_init_module(void) { int ret; + BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4); + BUILD_BUG_ON(sizeof(struct srp_login_req) != 64); + BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56); + BUILD_BUG_ON(sizeof(struct srp_cmd) != 48); + if (srp_sg_tablesize) { pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); if (!cmd_sg_entries) diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index a2706086b9c7..b2861cd2087a 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -67,6 +67,17 @@ enum { SRP_TAG_TSK_MGMT = 1U << 31, SRP_MAX_PAGES_PER_MR = 512, + + SRP_MAX_ADD_CDB_LEN = 16, + + SRP_MAX_IMM_SGE = 2, + SRP_MAX_SGE = SRP_MAX_IMM_SGE + 1, + /* + * Choose the immediate data offset such that a 32 byte CDB still fits. + */ + SRP_IMM_DATA_OFFSET = sizeof(struct srp_cmd) + + SRP_MAX_ADD_CDB_LEN + + sizeof(struct srp_imm_buf), }; enum srp_target_state { @@ -130,6 +141,8 @@ struct srp_request { /** * struct srp_rdma_ch * @comp_vector: Completion vector used by this RDMA channel. + * @max_it_iu_len: Maximum initiator-to-target information unit length. + * @max_ti_iu_len: Maximum target-to-initiator information unit length. */ struct srp_rdma_ch { /* These are RW in the hot path, and commonly used together */ @@ -146,6 +159,9 @@ struct srp_rdma_ch { struct ib_fmr_pool *fmr_pool; struct srp_fr_pool *fr_pool; }; + uint32_t max_it_iu_len; + uint32_t max_ti_iu_len; + bool use_imm_data; /* Everything above this point is used in the hot path of * command processing. Try to keep them packed into cachelines. @@ -169,7 +185,6 @@ struct srp_rdma_ch { struct srp_iu **tx_ring; struct srp_iu **rx_ring; struct srp_request *req_ring; - int max_ti_iu_len; int comp_vector; u64 tsk_mgmt_tag; @@ -194,7 +209,6 @@ struct srp_target_port { u32 ch_count; u32 lkey; enum srp_target_state state; - unsigned int max_iu_len; unsigned int cmd_sg_cnt; unsigned int indirect_size; bool allow_ext_sg; @@ -259,6 +273,8 @@ struct srp_iu { void *buf; size_t size; enum dma_data_direction direction; + u32 num_sge; + struct ib_sge sge[SRP_MAX_SGE]; struct ib_cqe cqe; }; diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 2357aa727dcf..e9c336cff8f5 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -51,8 +51,6 @@ /* Name of this kernel module. */ #define DRV_NAME "ib_srpt" -#define DRV_VERSION "2.0.0" -#define DRV_RELDATE "2011-02-14" #define SRPT_ID_STRING "Linux SRP target" @@ -60,8 +58,7 @@ #define pr_fmt(fmt) DRV_NAME " " fmt MODULE_AUTHOR("Vu Pham and Bart Van Assche"); -MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target " - "v" DRV_VERSION " (" DRV_RELDATE ")"); +MODULE_DESCRIPTION("SCSI RDMA Protocol target driver"); MODULE_LICENSE("Dual BSD/GPL"); /* @@ -89,8 +86,7 @@ static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp) module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid, 0444); MODULE_PARM_DESC(srpt_service_guid, - "Using this value for ioc_guid, id_ext, and cm_listen_id" - " instead of using the node_guid of the first HCA."); + "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA."); static struct ib_client srpt_client; /* Protects both rdma_cm_port and rdma_cm_id. */ @@ -462,7 +458,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_wc) { - rdma_destroy_ah(mad_wc->send_buf->ah); + rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(mad_wc->send_buf); } @@ -529,7 +525,7 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, ib_free_send_mad(rsp); err_rsp: - rdma_destroy_ah(ah); + rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); err: ib_free_recv_mad(mad_wc); } @@ -652,31 +648,33 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev) * srpt_alloc_ioctx - allocate a SRPT I/O context structure * @sdev: SRPT HCA pointer. * @ioctx_size: I/O context size. - * @dma_size: Size of I/O context DMA buffer. + * @buf_cache: I/O buffer cache. * @dir: DMA data direction. */ static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev, - int ioctx_size, int dma_size, + int ioctx_size, + struct kmem_cache *buf_cache, enum dma_data_direction dir) { struct srpt_ioctx *ioctx; - ioctx = kmalloc(ioctx_size, GFP_KERNEL); + ioctx = kzalloc(ioctx_size, GFP_KERNEL); if (!ioctx) goto err; - ioctx->buf = kmalloc(dma_size, GFP_KERNEL); + ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL); if (!ioctx->buf) goto err_free_ioctx; - ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir); + ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, + kmem_cache_size(buf_cache), dir); if (ib_dma_mapping_error(sdev->device, ioctx->dma)) goto err_free_buf; return ioctx; err_free_buf: - kfree(ioctx->buf); + kmem_cache_free(buf_cache, ioctx->buf); err_free_ioctx: kfree(ioctx); err: @@ -687,17 +685,19 @@ err: * srpt_free_ioctx - free a SRPT I/O context structure * @sdev: SRPT HCA pointer. * @ioctx: I/O context pointer. - * @dma_size: Size of I/O context DMA buffer. + * @buf_cache: I/O buffer cache. * @dir: DMA data direction. */ static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx, - int dma_size, enum dma_data_direction dir) + struct kmem_cache *buf_cache, + enum dma_data_direction dir) { if (!ioctx) return; - ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir); - kfree(ioctx->buf); + ib_dma_unmap_single(sdev->device, ioctx->dma, + kmem_cache_size(buf_cache), dir); + kmem_cache_free(buf_cache, ioctx->buf); kfree(ioctx); } @@ -706,33 +706,38 @@ static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx, * @sdev: Device to allocate the I/O context ring for. * @ring_size: Number of elements in the I/O context ring. * @ioctx_size: I/O context size. - * @dma_size: DMA buffer size. + * @buf_cache: I/O buffer cache. + * @alignment_offset: Offset in each ring buffer at which the SRP information + * unit starts. * @dir: DMA data direction. */ static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev, int ring_size, int ioctx_size, - int dma_size, enum dma_data_direction dir) + struct kmem_cache *buf_cache, + int alignment_offset, + enum dma_data_direction dir) { struct srpt_ioctx **ring; int i; - WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) - && ioctx_size != sizeof(struct srpt_send_ioctx)); + WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) && + ioctx_size != sizeof(struct srpt_send_ioctx)); ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL); if (!ring) goto out; for (i = 0; i < ring_size; ++i) { - ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir); + ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir); if (!ring[i]) goto err; ring[i]->index = i; + ring[i]->offset = alignment_offset; } goto out; err: while (--i >= 0) - srpt_free_ioctx(sdev, ring[i], dma_size, dir); + srpt_free_ioctx(sdev, ring[i], buf_cache, dir); kvfree(ring); ring = NULL; out: @@ -744,12 +749,13 @@ out: * @ioctx_ring: I/O context ring to be freed. * @sdev: SRPT HCA pointer. * @ring_size: Number of ring elements. - * @dma_size: Size of I/O context DMA buffer. + * @buf_cache: I/O buffer cache. * @dir: DMA data direction. */ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring, struct srpt_device *sdev, int ring_size, - int dma_size, enum dma_data_direction dir) + struct kmem_cache *buf_cache, + enum dma_data_direction dir) { int i; @@ -757,7 +763,7 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring, return; for (i = 0; i < ring_size; ++i) - srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir); + srpt_free_ioctx(sdev, ioctx_ring[i], buf_cache, dir); kvfree(ioctx_ring); } @@ -819,7 +825,7 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch, struct ib_recv_wr wr; BUG_ON(!sdev); - list.addr = ioctx->ioctx.dma; + list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset; list.length = srp_max_req_size; list.lkey = sdev->lkey; @@ -985,23 +991,28 @@ static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd) /** * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request - * @ioctx: Pointer to the I/O context associated with the request. + * @recv_ioctx: I/O context associated with the received command @srp_cmd. + * @ioctx: I/O context that will be used for responding to the initiator. * @srp_cmd: Pointer to the SRP_CMD request data. * @dir: Pointer to the variable to which the transfer direction will be * written. - * @sg: [out] scatterlist allocated for the parsed SRP_CMD. + * @sg: [out] scatterlist for the parsed SRP_CMD. * @sg_cnt: [out] length of @sg. * @data_len: Pointer to the variable to which the total data length of all * descriptors in the SRP_CMD request will be written. + * @imm_data_offset: [in] Offset in SRP_CMD requests at which immediate data + * starts. * * This function initializes ioctx->nrbuf and ioctx->r_bufs. * * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors; * -ENOMEM when memory allocation fails and zero upon success. */ -static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, +static int srpt_get_desc_tbl(struct srpt_recv_ioctx *recv_ioctx, + struct srpt_send_ioctx *ioctx, struct srp_cmd *srp_cmd, enum dma_data_direction *dir, - struct scatterlist **sg, unsigned *sg_cnt, u64 *data_len) + struct scatterlist **sg, unsigned int *sg_cnt, u64 *data_len, + u16 imm_data_offset) { BUG_ON(!dir); BUG_ON(!data_len); @@ -1025,7 +1036,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { - struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd); + struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd); *data_len = be32_to_cpu(db->len); return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt); @@ -1037,8 +1048,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, if (nbufs > (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) { - pr_err("received unsupported SRP_CMD request" - " type (%u out + %u in != %u / %zu)\n", + pr_err("received unsupported SRP_CMD request type (%u out + %u in != %u / %zu)\n", srp_cmd->data_out_desc_cnt, srp_cmd->data_in_desc_cnt, be32_to_cpu(idb->table_desc.len), @@ -1049,6 +1059,40 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, *data_len = be32_to_cpu(idb->len); return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs, sg, sg_cnt); + } else if ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_IMM) { + struct srp_imm_buf *imm_buf = srpt_get_desc_buf(srp_cmd); + void *data = (void *)srp_cmd + imm_data_offset; + uint32_t len = be32_to_cpu(imm_buf->len); + uint32_t req_size = imm_data_offset + len; + + if (req_size > srp_max_req_size) { + pr_err("Immediate data (length %d + %d) exceeds request size %d\n", + imm_data_offset, len, srp_max_req_size); + return -EINVAL; + } + if (recv_ioctx->byte_len < req_size) { + pr_err("Received too few data - %d < %d\n", + recv_ioctx->byte_len, req_size); + return -EIO; + } + /* + * The immediate data buffer descriptor must occur before the + * immediate data itself. + */ + if ((void *)(imm_buf + 1) > (void *)data) { + pr_err("Received invalid write request\n"); + return -EINVAL; + } + *data_len = len; + ioctx->recv_ioctx = recv_ioctx; + if ((uintptr_t)data & 511) { + pr_warn_once("Internal error - the receive buffers are not aligned properly.\n"); + return -EINVAL; + } + sg_init_one(&ioctx->imm_sg, data, len); + *sg = &ioctx->imm_sg; + *sg_cnt = 1; + return 0; } else { *data_len = 0; return 0; @@ -1191,6 +1235,7 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) BUG_ON(ioctx->ch != ch); ioctx->state = SRPT_STATE_NEW; + WARN_ON_ONCE(ioctx->recv_ioctx); ioctx->n_rdma = 0; ioctx->n_rw_ctx = 0; ioctx->queue_status_only = false; @@ -1352,8 +1397,8 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp)); max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); if (sense_data_len > max_sense_len) { - pr_warn("truncated sense data from %d to %d" - " bytes\n", sense_data_len, max_sense_len); + pr_warn("truncated sense data from %d to %d bytes\n", + sense_data_len, max_sense_len); sense_data_len = max_sense_len; } @@ -1433,7 +1478,7 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch, BUG_ON(!send_ioctx); - srp_cmd = recv_ioctx->ioctx.buf; + srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset; cmd = &send_ioctx->cmd; cmd->tag = srp_cmd->tag; @@ -1453,8 +1498,8 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch, break; } - rc = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &sg, &sg_cnt, - &data_len); + rc = srpt_get_desc_tbl(recv_ioctx, send_ioctx, srp_cmd, &dir, + &sg, &sg_cnt, &data_len, ch->imm_data_offset); if (rc) { if (rc != -EAGAIN) { pr_err("0x%llx: parsing SRP descriptor table failed.\n", @@ -1521,7 +1566,7 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, BUG_ON(!send_ioctx); - srp_tsk = recv_ioctx->ioctx.buf; + srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset; cmd = &send_ioctx->cmd; pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n", @@ -1564,10 +1609,11 @@ srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx) goto push; ib_dma_sync_single_for_cpu(ch->sport->sdev->device, - recv_ioctx->ioctx.dma, srp_max_req_size, + recv_ioctx->ioctx.dma, + recv_ioctx->ioctx.offset + srp_max_req_size, DMA_FROM_DEVICE); - srp_cmd = recv_ioctx->ioctx.buf; + srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset; opcode = srp_cmd->opcode; if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) { send_ioctx = srpt_get_send_ioctx(ch); @@ -1604,7 +1650,8 @@ srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx) break; } - srpt_post_recv(ch->sport->sdev, ch, recv_ioctx); + if (!send_ioctx || !send_ioctx->recv_ioctx) + srpt_post_recv(ch->sport->sdev, ch, recv_ioctx); res = true; out: @@ -1630,6 +1677,7 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc) req_lim = atomic_dec_return(&ch->req_lim); if (unlikely(req_lim < 0)) pr_err("req_lim = %d < 0\n", req_lim); + ioctx->byte_len = wc->byte_len; srpt_handle_new_iu(ch, ioctx); } else { pr_info_ratelimited("receiving failed for ioctx %p with status %d\n", @@ -1693,14 +1741,14 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc) atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail); if (wc->status != IB_WC_SUCCESS) - pr_info("sending response for ioctx 0x%p failed" - " with status %d\n", ioctx, wc->status); + pr_info("sending response for ioctx 0x%p failed with status %d\n", + ioctx, wc->status); if (state != SRPT_STATE_DONE) { transport_generic_free_cmd(&ioctx->cmd, 0); } else { - pr_err("IB completion has been received too late for" - " wr_id = %u.\n", ioctx->ioctx.index); + pr_err("IB completion has been received too late for wr_id = %u.\n", + ioctx->ioctx.index); } srpt_process_wait_list(ch); @@ -1754,6 +1802,8 @@ retry: qp_init->cap.max_rdma_ctxs = sq_size / 2; qp_init->cap.max_send_sge = min(attrs->max_send_sge, SRPT_MAX_SG_PER_WQE); + qp_init->cap.max_recv_sge = min(attrs->max_recv_sge, + SRPT_MAX_SG_PER_WQE); qp_init->port_num = ch->sport->port; if (sdev->use_srq) { qp_init->srq = sdev->srq; @@ -2010,6 +2060,14 @@ static void srpt_free_ch(struct kref *kref) kfree_rcu(ch, rcu); } +/* + * Shut down the SCSI target session, tell the connection manager to + * disconnect the associated RDMA channel, transition the QP to the error + * state and remove the channel from the channel list. This function is + * typically called from inside srpt_zerolength_write_done(). Concurrent + * srpt_zerolength_write() calls from inside srpt_close_ch() are possible + * as long as the channel is on sport->nexus_list. + */ static void srpt_release_channel_work(struct work_struct *w) { struct srpt_rdma_ch *ch; @@ -2037,20 +2095,24 @@ static void srpt_release_channel_work(struct work_struct *w) else ib_destroy_cm_id(ch->ib_cm.cm_id); + sport = ch->sport; + mutex_lock(&sport->mutex); + list_del_rcu(&ch->list); + mutex_unlock(&sport->mutex); + srpt_destroy_ch_ib(ch); srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, ch->sport->sdev, ch->rq_size, - ch->max_rsp_size, DMA_TO_DEVICE); + ch->rsp_buf_cache, DMA_TO_DEVICE); + + kmem_cache_destroy(ch->rsp_buf_cache); srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring, sdev, ch->rq_size, - srp_max_req_size, DMA_FROM_DEVICE); + ch->req_buf_cache, DMA_FROM_DEVICE); - sport = ch->sport; - mutex_lock(&sport->mutex); - list_del_rcu(&ch->list); - mutex_unlock(&sport->mutex); + kmem_cache_destroy(ch->req_buf_cache); wake_up(&sport->ch_releaseQ); @@ -2174,14 +2236,19 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, INIT_LIST_HEAD(&ch->cmd_wait_list); ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size; + ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size, + 512, 0, NULL); + if (!ch->rsp_buf_cache) + goto free_ch; + ch->ioctx_ring = (struct srpt_send_ioctx **) srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, sizeof(*ch->ioctx_ring[0]), - ch->max_rsp_size, DMA_TO_DEVICE); + ch->rsp_buf_cache, 0, DMA_TO_DEVICE); if (!ch->ioctx_ring) { pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n"); rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); - goto free_ch; + goto free_rsp_cache; } INIT_LIST_HEAD(&ch->free_list); @@ -2190,16 +2257,39 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list); } if (!sdev->use_srq) { + u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ? + be16_to_cpu(req->imm_data_offset) : 0; + u16 alignment_offset; + u32 req_sz; + + if (req->req_flags & SRP_IMMED_REQUESTED) + pr_debug("imm_data_offset = %d\n", + be16_to_cpu(req->imm_data_offset)); + if (imm_data_offset >= sizeof(struct srp_cmd)) { + ch->imm_data_offset = imm_data_offset; + rsp->rsp_flags |= SRP_LOGIN_RSP_IMMED_SUPP; + } else { + ch->imm_data_offset = 0; + } + alignment_offset = round_up(imm_data_offset, 512) - + imm_data_offset; + req_sz = alignment_offset + imm_data_offset + srp_max_req_size; + ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz, + 512, 0, NULL); + if (!ch->req_buf_cache) + goto free_rsp_ring; + ch->ioctx_recv_ring = (struct srpt_recv_ioctx **) srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, sizeof(*ch->ioctx_recv_ring[0]), - srp_max_req_size, + ch->req_buf_cache, + alignment_offset, DMA_FROM_DEVICE); if (!ch->ioctx_recv_ring) { pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n"); rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); - goto free_ring; + goto free_recv_cache; } for (i = 0; i < ch->rq_size; i++) INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list); @@ -2249,17 +2339,15 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) { struct srpt_rdma_ch *ch2; - rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN; - list_for_each_entry(ch2, &nexus->ch_list, list) { if (srpt_disconnect_ch(ch2) < 0) continue; pr_info("Relogin - closed existing channel %s\n", ch2->sess_name); - rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_TERMINATED; + rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_TERMINATED; } } else { - rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED; + rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED; } list_add_tail_rcu(&ch->list, &nexus->ch_list); @@ -2289,7 +2377,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, /* create srp_login_response */ rsp->opcode = SRP_LOGIN_RSP; rsp->tag = req->tag; - rsp->max_it_iu_len = req->req_it_iu_len; + rsp->max_it_iu_len = cpu_to_be32(srp_max_req_size); rsp->max_ti_iu_len = req->req_it_iu_len; ch->max_ti_iu_len = it_iu_len; rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | @@ -2353,12 +2441,18 @@ destroy_ib: free_recv_ring: srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring, ch->sport->sdev, ch->rq_size, - srp_max_req_size, DMA_FROM_DEVICE); + ch->req_buf_cache, DMA_FROM_DEVICE); + +free_recv_cache: + kmem_cache_destroy(ch->req_buf_cache); -free_ring: +free_rsp_ring: srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, ch->sport->sdev, ch->rq_size, - ch->max_rsp_size, DMA_TO_DEVICE); + ch->rsp_buf_cache, DMA_TO_DEVICE); + +free_rsp_cache: + kmem_cache_destroy(ch->rsp_buf_cache); free_ch: if (rdma_cm_id) @@ -2439,6 +2533,7 @@ static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id, req.req_flags = req_rdma->req_flags; memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16); memcpy(req.target_port_id, req_rdma->target_port_id, 16); + req.imm_data_offset = req_rdma->imm_data_offset; snprintf(src_addr, sizeof(src_addr), "%pIS", &cm_id->route.addr.src_addr); @@ -2629,6 +2724,12 @@ static int srpt_write_pending(struct se_cmd *se_cmd) enum srpt_command_state new_state; int ret, i; + if (ioctx->recv_ioctx) { + srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN); + target_execute_cmd(&ioctx->cmd); + return 0; + } + new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA); WARN_ON(new_state == SRPT_STATE_DONE); @@ -2908,7 +3009,9 @@ static void srpt_free_srq(struct srpt_device *sdev) ib_destroy_srq(sdev->srq); srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, - sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE); + sdev->srq_size, sdev->req_buf_cache, + DMA_FROM_DEVICE); + kmem_cache_destroy(sdev->req_buf_cache); sdev->srq = NULL; } @@ -2935,14 +3038,17 @@ static int srpt_alloc_srq(struct srpt_device *sdev) pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size, sdev->device->attrs.max_srq_wr, dev_name(&device->dev)); + sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf", + srp_max_req_size, 0, 0, NULL); + if (!sdev->req_buf_cache) + goto free_srq; + sdev->ioctx_ring = (struct srpt_recv_ioctx **) srpt_alloc_ioctx_ring(sdev, sdev->srq_size, sizeof(*sdev->ioctx_ring[0]), - srp_max_req_size, DMA_FROM_DEVICE); - if (!sdev->ioctx_ring) { - ib_destroy_srq(srq); - return -ENOMEM; - } + sdev->req_buf_cache, 0, DMA_FROM_DEVICE); + if (!sdev->ioctx_ring) + goto free_cache; sdev->use_srq = true; sdev->srq = srq; @@ -2953,6 +3059,13 @@ static int srpt_alloc_srq(struct srpt_device *sdev) } return 0; + +free_cache: + kmem_cache_destroy(sdev->req_buf_cache); + +free_srq: + ib_destroy_srq(srq); + return -ENOMEM; } static int srpt_use_srq(struct srpt_device *sdev, bool use_srq) @@ -3015,9 +3128,8 @@ static void srpt_add_one(struct ib_device *device) } /* print out target login information */ - pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx," - "pkey=ffff,service_id=%016llx\n", srpt_service_guid, - srpt_service_guid, srpt_service_guid); + pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,pkey=ffff,service_id=%016llx\n", + srpt_service_guid, srpt_service_guid, srpt_service_guid); /* * We do not have a consistent service_id (ie. also id_ext of target_id) @@ -3147,11 +3259,6 @@ static int srpt_check_false(struct se_portal_group *se_tpg) return 0; } -static char *srpt_get_fabric_name(void) -{ - return "srpt"; -} - static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg) { return tpg->se_tpg_wwn->priv; @@ -3182,11 +3289,18 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) struct srpt_send_ioctx *ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); struct srpt_rdma_ch *ch = ioctx->ch; + struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx; unsigned long flags; WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE && !(ioctx->cmd.transport_state & CMD_T_ABORTED)); + if (recv_ioctx) { + WARN_ON_ONCE(!list_empty(&recv_ioctx->wait_list)); + ioctx->recv_ioctx = NULL; + srpt_post_recv(ch->sport->sdev, ch, recv_ioctx); + } + if (ioctx->n_rw_ctx) { srpt_free_rw_ctxs(ch, ioctx); ioctx->n_rw_ctx = 0; @@ -3572,7 +3686,7 @@ static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page) struct se_portal_group *se_tpg = to_tpg(item); struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); - return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0); + return snprintf(page, PAGE_SIZE, "%d\n", sport->enabled); } static ssize_t srpt_tpg_enable_store(struct config_item *item, @@ -3581,7 +3695,7 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item, struct se_portal_group *se_tpg = to_tpg(item); struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); unsigned long tmp; - int ret; + int ret; ret = kstrtoul(page, 0, &tmp); if (ret < 0) { @@ -3617,7 +3731,7 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, const char *name) { struct srpt_port *sport = wwn->priv; - static struct se_portal_group *tpg; + struct se_portal_group *tpg; int res; WARN_ON_ONCE(wwn != &sport->port_guid_wwn && @@ -3666,7 +3780,7 @@ static void srpt_drop_tport(struct se_wwn *wwn) static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); + return scnprintf(buf, PAGE_SIZE, "\n"); } CONFIGFS_ATTR_RO(srpt_wwn_, version); @@ -3678,8 +3792,7 @@ static struct configfs_attribute *srpt_wwn_attrs[] = { static const struct target_core_fabric_ops srpt_template = { .module = THIS_MODULE, - .name = "srpt", - .get_fabric_name = srpt_get_fabric_name, + .fabric_name = "srpt", .tpg_get_wwn = srpt_get_fabric_wwn, .tpg_get_tag = srpt_get_tag, .tpg_check_demo_mode = srpt_check_false, @@ -3730,16 +3843,14 @@ static int __init srpt_init_module(void) ret = -EINVAL; if (srp_max_req_size < MIN_MAX_REQ_SIZE) { - pr_err("invalid value %d for kernel module parameter" - " srp_max_req_size -- must be at least %d.\n", + pr_err("invalid value %d for kernel module parameter srp_max_req_size -- must be at least %d.\n", srp_max_req_size, MIN_MAX_REQ_SIZE); goto out; } if (srpt_srq_size < MIN_SRPT_SRQ_SIZE || srpt_srq_size > MAX_SRPT_SRQ_SIZE) { - pr_err("invalid value %d for kernel module parameter" - " srpt_srq_size -- must be in the range [%d..%d].\n", + pr_err("invalid value %d for kernel module parameter srpt_srq_size -- must be in the range [%d..%d].\n", srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE); goto out; } diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index 444dfd7281b5..39b3e50baf3d 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -104,10 +104,6 @@ enum { SRP_CMD_ORDERED_Q = 0x2, SRP_CMD_ACA = 0x4, - SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0, - SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1, - SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, - SRPT_DEF_SG_TABLESIZE = 128, /* * An experimentally determined value that avoids that QP creation @@ -124,11 +120,18 @@ enum { MAX_SRPT_RDMA_SIZE = 1U << 24, MAX_SRPT_RSP_SIZE = 1024, + SRP_MAX_ADD_CDB_LEN = 16, + SRP_MAX_IMM_DATA_OFFSET = 80, + SRP_MAX_IMM_DATA = 8 * 1024, MIN_MAX_REQ_SIZE = 996, - DEFAULT_MAX_REQ_SIZE - = sizeof(struct srp_cmd)/*48*/ - + sizeof(struct srp_indirect_buf)/*20*/ - + 128 * sizeof(struct srp_direct_buf)/*16*/, + DEFAULT_MAX_REQ_SIZE_1 = sizeof(struct srp_cmd)/*48*/ + + SRP_MAX_ADD_CDB_LEN + + sizeof(struct srp_indirect_buf)/*20*/ + + 128 * sizeof(struct srp_direct_buf)/*16*/, + DEFAULT_MAX_REQ_SIZE_2 = SRP_MAX_IMM_DATA_OFFSET + + sizeof(struct srp_imm_buf) + SRP_MAX_IMM_DATA, + DEFAULT_MAX_REQ_SIZE = DEFAULT_MAX_REQ_SIZE_1 > DEFAULT_MAX_REQ_SIZE_2 ? + DEFAULT_MAX_REQ_SIZE_1 : DEFAULT_MAX_REQ_SIZE_2, MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4, DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */ @@ -165,12 +168,14 @@ enum srpt_command_state { * @cqe: Completion queue element. * @buf: Pointer to the buffer. * @dma: DMA address of the buffer. + * @offset: Offset of the first byte in @buf and @dma that is actually used. * @index: Index of the I/O context in its ioctx_ring array. */ struct srpt_ioctx { struct ib_cqe cqe; void *buf; dma_addr_t dma; + uint32_t offset; uint32_t index; }; @@ -178,12 +183,14 @@ struct srpt_ioctx { * struct srpt_recv_ioctx - SRPT receive I/O context * @ioctx: See above. * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list. + * @byte_len: Number of bytes in @ioctx.buf. */ struct srpt_recv_ioctx { struct srpt_ioctx ioctx; struct list_head wait_list; + int byte_len; }; - + struct srpt_rw_ctx { struct rdma_rw_ctx rw; struct scatterlist *sg; @@ -194,8 +201,11 @@ struct srpt_rw_ctx { * struct srpt_send_ioctx - SRPT send I/O context * @ioctx: See above. * @ch: Channel pointer. + * @recv_ioctx: Receive I/O context associated with this send I/O context. + * Only used for processing immediate data. * @s_rw_ctx: @rw_ctxs points here if only a single rw_ctx is needed. * @rw_ctxs: RDMA read/write contexts. + * @imm_sg: Scatterlist for immediate data. * @rdma_cqe: RDMA completion queue element. * @free_list: Node in srpt_rdma_ch.free_list. * @state: I/O context state. @@ -209,10 +219,13 @@ struct srpt_rw_ctx { struct srpt_send_ioctx { struct srpt_ioctx ioctx; struct srpt_rdma_ch *ch; + struct srpt_recv_ioctx *recv_ioctx; struct srpt_rw_ctx s_rw_ctx; struct srpt_rw_ctx *rw_ctxs; + struct scatterlist imm_sg; + struct ib_cqe rdma_cqe; struct list_head free_list; enum srpt_command_state state; @@ -245,7 +258,10 @@ enum rdma_ch_state { * struct srpt_rdma_ch - RDMA channel * @nexus: I_T nexus this channel is associated with. * @qp: IB queue pair used for communicating over this channel. - * @cm_id: IB CM ID associated with the channel. + * @ib_cm: See below. + * @ib_cm.cm_id: IB CM ID associated with the channel. + * @rdma_cm: See below. + * @rdma_cm.cm_id: RDMA CM ID associated with the channel. * @cq: IB completion queue for this channel. * @zw_cqe: Zero-length write CQE. * @rcu: RCU head. @@ -259,12 +275,15 @@ enum rdma_ch_state { * @req_lim: request limit: maximum number of requests that may be sent * by the initiator without having received a response. * @req_lim_delta: Number of credits not yet sent back to the initiator. + * @imm_data_offset: Offset from start of SRP_CMD for immediate data. * @spinlock: Protects free_list and state. * @free_list: Head of list with free send I/O contexts. * @state: channel state. See also enum rdma_ch_state. * @using_rdma_cm: Whether the RDMA/CM or IB/CM is used for this channel. * @processing_wait_list: Whether or not cmd_wait_list is being processed. + * @rsp_buf_cache: kmem_cache for @ioctx_ring. * @ioctx_ring: Send ring. + * @req_buf_cache: kmem_cache for @ioctx_recv_ring. * @ioctx_recv_ring: Receive I/O context ring. * @list: Node in srpt_nexus.ch_list. * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This @@ -297,10 +316,13 @@ struct srpt_rdma_ch { int max_ti_iu_len; atomic_t req_lim; atomic_t req_lim_delta; + u16 imm_data_offset; spinlock_t spinlock; struct list_head free_list; enum rdma_ch_state state; + struct kmem_cache *rsp_buf_cache; struct srpt_send_ioctx **ioctx_ring; + struct kmem_cache *req_buf_cache; struct srpt_recv_ioctx **ioctx_recv_ring; struct list_head list; struct list_head cmd_wait_list; @@ -395,6 +417,7 @@ struct srpt_port { * @srq_size: SRQ size. * @sdev_mutex: Serializes use_srq changes. * @use_srq: Whether or not to use SRQ. + * @req_buf_cache: kmem_cache for @ioctx_ring buffers. * @ioctx_ring: Per-HCA SRQ. * @event_handler: Per-HCA asynchronous IB event handler. * @list: Node in srpt_dev_list. @@ -409,6 +432,7 @@ struct srpt_device { int srq_size; struct mutex sdev_mutex; bool use_srq; + struct kmem_cache *req_buf_cache; struct srpt_recv_ioctx **ioctx_ring; struct ib_event_handler event_handler; struct list_head list; |