diff options
Diffstat (limited to 'drivers/infiniband/hw/hfi1')
21 files changed, 550 insertions, 352 deletions
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 9b1fb84a3d45..e0b1238d31df 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1685,6 +1685,14 @@ static u64 access_sw_pio_drain(const struct cntr_entry *entry, return dd->verbs_dev.n_piodrain; } +static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry, + void *context, int vl, int mode, u64 data) +{ + struct hfi1_devdata *dd = context; + + return dd->ctx0_seq_drop; +} + static u64 access_sw_vtx_wait(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { @@ -4106,6 +4114,7 @@ def_access_ibp_counter(rc_crwaits); static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH), +[C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH), [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH), [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH), [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT, @@ -4249,6 +4258,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { access_sw_cpu_intr), [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL, access_sw_cpu_rcv_limit), +[C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL, + access_sw_ctx0_seq_drop), [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL, access_sw_vtx_wait), [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL, @@ -6862,7 +6873,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) } rcvmask = HFI1_RCVCTRL_CTXT_ENB; /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ - rcvmask |= rcd->rcvhdrtail_kvaddr ? + rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ? HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; hfi1_rcvctrl(dd, rcvmask, rcd); hfi1_rcd_put(rcd); @@ -8394,20 +8405,62 @@ void force_recv_intr(struct hfi1_ctxtdata *rcd) static inline int check_packet_present(struct hfi1_ctxtdata *rcd) { u32 tail; - int present; - if (!rcd->rcvhdrtail_kvaddr) - present = (rcd->seq_cnt == - rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)))); - else /* is RDMA rtail */ - present = (rcd->head != get_rcvhdrtail(rcd)); - - if (present) + if (hfi1_packet_present(rcd)) return 1; /* fall back to a CSR read, correct indpendent of DMA_RTAIL */ tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); - return rcd->head != tail; + return hfi1_rcd_head(rcd) != tail; +} + +/** + * Common code for receive contexts interrupt handlers. + * Update traces, increment kernel IRQ counter and + * setup ASPM when needed. + */ +static void receive_interrupt_common(struct hfi1_ctxtdata *rcd) +{ + struct hfi1_devdata *dd = rcd->dd; + + trace_hfi1_receive_interrupt(dd, rcd); + this_cpu_inc(*dd->int_counter); + aspm_ctx_disable(rcd); +} + +/** + * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt + * when there are packets present in the queue. When calling + * with interrupts enabled please use hfi1_rcd_eoi_intr. + * + * @rcd: valid receive context + */ +static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) +{ + clear_recv_intr(rcd); + if (check_packet_present(rcd)) + force_recv_intr(rcd); +} + +/** + * hfi1_rcd_eoi_intr() - End of Interrupt processing action + * + * @rcd: Ptr to hfi1_ctxtdata of receive context + * + * Hold IRQs so we can safely clear the interrupt and + * recheck for a packet that may have arrived after the previous + * check and the interrupt clear. If a packet arrived, force another + * interrupt. This routine can be called at the end of receive packet + * processing in interrupt service routines, interrupt service thread + * and softirqs + */ +static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) +{ + unsigned long flags; + + local_irq_save(flags); + __hfi1_rcd_eoi_intr(rcd); + local_irq_restore(flags); } /* @@ -8421,13 +8474,9 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd) irqreturn_t receive_context_interrupt(int irq, void *data) { struct hfi1_ctxtdata *rcd = data; - struct hfi1_devdata *dd = rcd->dd; int disposition; - int present; - trace_hfi1_receive_interrupt(dd, rcd); - this_cpu_inc(*dd->int_counter); - aspm_ctx_disable(rcd); + receive_interrupt_common(rcd); /* receive interrupt remains blocked while processing packets */ disposition = rcd->do_interrupt(rcd, 0); @@ -8440,17 +8489,7 @@ irqreturn_t receive_context_interrupt(int irq, void *data) if (disposition == RCV_PKT_LIMIT) return IRQ_WAKE_THREAD; - /* - * The packet processor detected no more packets. Clear the receive - * interrupt and recheck for a packet packet that may have arrived - * after the previous check and interrupt clear. If a packet arrived, - * force another interrupt. - */ - clear_recv_intr(rcd); - present = check_packet_present(rcd); - if (present) - force_recv_intr(rcd); - + __hfi1_rcd_eoi_intr(rcd); return IRQ_HANDLED; } @@ -8461,24 +8500,11 @@ irqreturn_t receive_context_interrupt(int irq, void *data) irqreturn_t receive_context_thread(int irq, void *data) { struct hfi1_ctxtdata *rcd = data; - int present; /* receive interrupt is still blocked from the IRQ handler */ (void)rcd->do_interrupt(rcd, 1); - /* - * The packet processor will only return if it detected no more - * packets. Hold IRQs here so we can safely clear the interrupt and - * recheck for a packet that may have arrived after the previous - * check and the interrupt clear. If a packet arrived, force another - * interrupt. - */ - local_irq_disable(); - clear_recv_intr(rcd); - present = check_packet_present(rcd); - if (present) - force_recv_intr(rcd); - local_irq_enable(); + hfi1_rcd_eoi_intr(rcd); return IRQ_HANDLED; } @@ -10049,7 +10075,7 @@ u32 lrh_max_header_bytes(struct hfi1_devdata *dd) * the first kernel context would have been allocated by now so * we are guaranteed a valid value. */ - return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; + return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; } /* @@ -10094,7 +10120,7 @@ static void set_send_length(struct hfi1_pportdata *ppd) thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu, - dd->rcd[0]->rcvhdrqentsize)); + get_hdrqentsize(dd->rcd[0]))); for (j = 0; j < INIT_SC_PER_VL; j++) sc_set_cr_threshold( pio_select_send_context_vl(dd, j, i), @@ -11821,7 +11847,7 @@ u32 hdrqempty(struct hfi1_ctxtdata *rcd) head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT; - if (rcd->rcvhdrtail_kvaddr) + if (hfi1_rcvhdrtail_kvaddr(rcd)) tail = get_rcvhdrtail(rcd); else tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); @@ -11865,6 +11891,84 @@ static u32 encoded_size(u32 size) return 0x1; /* if invalid, go with the minimum size */ } +/** + * encode_rcv_header_entry_size - return chip specific encoding for size + * @size: size in dwords + * + * Convert a receive header entry size that to the encoding used in the CSR. + * + * Return a zero if the given size is invalid, otherwise the encoding. + */ +u8 encode_rcv_header_entry_size(u8 size) +{ + /* there are only 3 valid receive header entry sizes */ + if (size == 2) + return 1; + if (size == 16) + return 2; + if (size == 32) + return 4; + return 0; /* invalid */ +} + +/** + * hfi1_validate_rcvhdrcnt - validate hdrcnt + * @dd: the device data + * @thecnt: the header count + */ +int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) +{ + if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { + dd_dev_err(dd, "Receive header queue count too small\n"); + return -EINVAL; + } + + if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { + dd_dev_err(dd, + "Receive header queue count cannot be greater than %u\n", + HFI1_MAX_HDRQ_EGRBUF_CNT); + return -EINVAL; + } + + if (thecnt % HDRQ_INCREMENT) { + dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n", + thecnt, HDRQ_INCREMENT); + return -EINVAL; + } + + return 0; +} + +/** + * set_hdrq_regs - set header queue registers for context + * @dd: the device data + * @ctxt: the context + * @entsize: the dword entry size + * @hdrcnt: the number of header entries + */ +void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt) +{ + u64 reg; + + reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) << + RCV_HDR_CNT_CNT_SHIFT; + write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg); + reg = ((u64)encode_rcv_header_entry_size(entsize) & + RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) << + RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; + write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg); + reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) << + RCV_HDR_SIZE_HDR_SIZE_SHIFT; + write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg); + + /* + * Program dummy tail address for every receive context + * before enabling any receive context + */ + write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, + dd->rcvhdrtail_dummy_dma); +} + void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, struct hfi1_ctxtdata *rcd) { @@ -11886,13 +11990,13 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, /* reset the tail and hdr addresses, and sequence count */ write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, rcd->rcvhdrq_dma); - if (rcd->rcvhdrtail_kvaddr) + if (hfi1_rcvhdrtail_kvaddr(rcd)) write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, rcd->rcvhdrqtailaddr_dma); - rcd->seq_cnt = 1; + hfi1_set_seq_cnt(rcd, 1); /* reset the cached receive header queue head value */ - rcd->head = 0; + hfi1_set_rcd_head(rcd, 0); /* * Zero the receive header queue so we don't get false @@ -11972,7 +12076,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, IS_RCVAVAIL_START + rcd->ctxt, false); rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; } - if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr) + if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd)) rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; if (op & HFI1_RCVCTRL_TAILUPD_DIS) { /* See comment on RcvCtxtCtrl.TailUpd above */ diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index 4ca5ac8d7e9e..725509261016 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -358,6 +358,8 @@ #define MAX_EAGER_BUFFER (256 * 1024) #define MAX_EAGER_BUFFER_TOTAL (64 * (1 << 20)) /* max per ctxt 64MB */ #define MAX_EXPECTED_BUFFER (2048 * 1024) +#define HFI1_MIN_HDRQ_EGRBUF_CNT 32 +#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 /* * Receive expected base and count and eager base and count increment - @@ -699,6 +701,10 @@ static inline u32 chip_rcv_array_count(struct hfi1_devdata *dd) return read_csr(dd, RCV_ARRAY_CNT); } +u8 encode_rcv_header_entry_size(u8 size); +int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt); +void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt); + u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl, u32 dw_len); @@ -859,6 +865,7 @@ static inline int idx_from_vl(int vl) enum { C_RCV_OVF = 0, C_RX_LEN_ERR, + C_RX_SHORT_ERR, C_RX_ICRC_ERR, C_RX_EBP, C_RX_TID_FULL, @@ -926,6 +933,7 @@ enum { C_DC_PG_STS_TX_MBE_CNT, C_SW_CPU_INTR, C_SW_CPU_RCV_LIM, + C_SW_CTX0_SEQ_DROP, C_SW_VTX_WAIT, C_SW_PIO_WAIT, C_SW_PIO_DRAIN, diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h index ab3589d17aee..fb3ec9bff7a2 100644 --- a/drivers/infiniband/hw/hfi1/chip_registers.h +++ b/drivers/infiniband/hw/hfi1/chip_registers.h @@ -381,6 +381,7 @@ #define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468) #define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0) #define RCV_LENGTH_ERR_CNT 0 +#define RCV_SHORT_ERR_CNT 2 #define RCV_ICRC_ERR_CNT 6 #define RCV_EBP_CNT 9 #define RCV_BUF_OVFL_CNT 10 diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h index d47da7b0438f..40a1ff0c8a8e 100644 --- a/drivers/infiniband/hw/hfi1/common.h +++ b/drivers/infiniband/hw/hfi1/common.h @@ -323,6 +323,9 @@ struct diag_pkt { /* RHF receive type error - bypass packet errors */ #define RHF_RTE_BYPASS_NO_ERR 0x0 +/* MAX RcvSEQ */ +#define RHF_MAX_SEQ 13 + /* IB - LRH header constants */ #define HFI1_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */ #define HFI1_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */ diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index d268bf9c42ee..4633a0ce1a8c 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c @@ -379,7 +379,7 @@ static void *_rcds_seq_next(struct seq_file *s, void *v, loff_t *pos) struct hfi1_devdata *dd = dd_from_dev(ibd); ++*pos; - if (!dd->rcd || *pos >= dd->n_krcv_queues) + if (!dd->rcd || *pos >= dd->num_rcv_contexts) return NULL; return pos; } diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 01aa1f132f55..049d15befe58 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -411,14 +411,14 @@ drop: static inline void init_packet(struct hfi1_ctxtdata *rcd, struct hfi1_packet *packet) { - packet->rsize = rcd->rcvhdrqentsize; /* words */ - packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */ + packet->rsize = get_hdrqentsize(rcd); /* words */ + packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */ packet->rcd = rcd; packet->updegr = 0; packet->etail = -1; packet->rhf_addr = get_rhf_addr(rcd); packet->rhf = rhf_to_cpu(packet->rhf_addr); - packet->rhqoff = rcd->head; + packet->rhqoff = hfi1_rcd_head(rcd); packet->numpkt = 0; } @@ -551,22 +551,22 @@ static inline void init_ps_mdata(struct ps_mdata *mdata, mdata->maxcnt = packet->maxcnt; mdata->ps_head = packet->rhqoff; - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { + if (get_dma_rtail_setting(rcd)) { mdata->ps_tail = get_rcvhdrtail(rcd); if (rcd->ctxt == HFI1_CTRL_CTXT) - mdata->ps_seq = rcd->seq_cnt; + mdata->ps_seq = hfi1_seq_cnt(rcd); else mdata->ps_seq = 0; /* not used with DMA_RTAIL */ } else { mdata->ps_tail = 0; /* used only with DMA_RTAIL*/ - mdata->ps_seq = rcd->seq_cnt; + mdata->ps_seq = hfi1_seq_cnt(rcd); } } static inline int ps_done(struct ps_mdata *mdata, u64 rhf, struct hfi1_ctxtdata *rcd) { - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) + if (get_dma_rtail_setting(rcd)) return mdata->ps_head == mdata->ps_tail; return mdata->ps_seq != rhf_rcv_seq(rhf); } @@ -592,11 +592,9 @@ static inline void update_ps_mdata(struct ps_mdata *mdata, mdata->ps_head = 0; /* Control context must do seq counting */ - if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || - (rcd->ctxt == HFI1_CTRL_CTXT)) { - if (++mdata->ps_seq > 13) - mdata->ps_seq = 1; - } + if (!get_dma_rtail_setting(rcd) || + rcd->ctxt == HFI1_CTRL_CTXT) + mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq); } /* @@ -734,6 +732,7 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread) { int ret; + packet->rcd->dd->ctx0_seq_drop++; /* Set up for the next packet */ packet->rhqoff += packet->rsize; if (packet->rhqoff >= packet->maxcnt) @@ -769,7 +768,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) * The +2 is the size of the RHF. */ prefetch_range(packet->ebuf, - packet->tlen - ((packet->rcd->rcvhdrqentsize - + packet->tlen - ((get_hdrqentsize(packet->rcd) - (rhf_hdrq_offset(packet->rhf) + 2)) * 4)); } @@ -823,7 +822,7 @@ static inline void finish_packet(struct hfi1_packet *packet) * The only thing we need to do is a final update and call for an * interrupt */ - update_usrhead(packet->rcd, packet->rcd->head, packet->updegr, + update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr, packet->etail, rcv_intr_dynamic, packet->numpkt); } @@ -832,13 +831,11 @@ static inline void finish_packet(struct hfi1_packet *packet) */ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) { - u32 seq; int last = RCV_PKT_OK; struct hfi1_packet packet; init_packet(rcd, &packet); - seq = rhf_rcv_seq(packet.rhf); - if (seq != rcd->seq_cnt) { + if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { last = RCV_PKT_DONE; goto bail; } @@ -847,15 +844,12 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) while (last == RCV_PKT_OK) { last = process_rcv_packet(&packet, thread); - seq = rhf_rcv_seq(packet.rhf); - if (++rcd->seq_cnt > 13) - rcd->seq_cnt = 1; - if (seq != rcd->seq_cnt) + if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) last = RCV_PKT_DONE; process_rcv_update(last, &packet); } process_rcv_qp_work(&packet); - rcd->head = packet.rhqoff; + hfi1_set_rcd_head(rcd, packet.rhqoff); bail: finish_packet(&packet); return last; @@ -884,15 +878,14 @@ int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) process_rcv_update(last, &packet); } process_rcv_qp_work(&packet); - rcd->head = packet.rhqoff; + hfi1_set_rcd_head(rcd, packet.rhqoff); bail: finish_packet(&packet); return last; } -static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt) +static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) { - struct hfi1_ctxtdata *rcd; u16 i; /* @@ -900,50 +893,17 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt) * interrupt handler only for that context. Otherwise, switch * interrupt handler for all statically allocated kernel contexts. */ - if (ctxt >= dd->first_dyn_alloc_ctxt) { - rcd = hfi1_rcd_get_by_index_safe(dd, ctxt); - if (rcd) { - rcd->do_interrupt = - &handle_receive_interrupt_nodma_rtail; - hfi1_rcd_put(rcd); - } - return; - } - - for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) { - rcd = hfi1_rcd_get_by_index(dd, i); - if (rcd) - rcd->do_interrupt = - &handle_receive_interrupt_nodma_rtail; + if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) { + hfi1_rcd_get(rcd); + hfi1_set_fast(rcd); hfi1_rcd_put(rcd); - } -} - -static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt) -{ - struct hfi1_ctxtdata *rcd; - u16 i; - - /* - * For dynamically allocated kernel contexts (like vnic) switch - * interrupt handler only for that context. Otherwise, switch - * interrupt handler for all statically allocated kernel contexts. - */ - if (ctxt >= dd->first_dyn_alloc_ctxt) { - rcd = hfi1_rcd_get_by_index_safe(dd, ctxt); - if (rcd) { - rcd->do_interrupt = - &handle_receive_interrupt_dma_rtail; - hfi1_rcd_put(rcd); - } return; } - for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) { + for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { rcd = hfi1_rcd_get_by_index(dd, i); - if (rcd) - rcd->do_interrupt = - &handle_receive_interrupt_dma_rtail; + if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)) + hfi1_set_fast(rcd); hfi1_rcd_put(rcd); } } @@ -959,17 +919,14 @@ void set_all_slowpath(struct hfi1_devdata *dd) if (!rcd) continue; if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic) - rcd->do_interrupt = &handle_receive_interrupt; + rcd->do_interrupt = rcd->slow_handler; hfi1_rcd_put(rcd); } } -static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, - struct hfi1_packet *packet, - struct hfi1_devdata *dd) +static bool __set_armed_to_active(struct hfi1_packet *packet) { - struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; u8 etype = rhf_rcv_type(packet->rhf); u8 sc = SC15_PACKET; @@ -984,19 +941,34 @@ static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, sc = hfi1_16B_get_sc(hdr); } if (sc != SC15_PACKET) { - int hwstate = driver_lstate(rcd->ppd); + int hwstate = driver_lstate(packet->rcd->ppd); + struct work_struct *lsaw = + &packet->rcd->ppd->linkstate_active_work; if (hwstate != IB_PORT_ACTIVE) { - dd_dev_info(dd, + dd_dev_info(packet->rcd->dd, "Unexpected link state %s\n", opa_lstate_name(hwstate)); - return 0; + return false; } - queue_work(rcd->ppd->link_wq, lsaw); - return 1; + queue_work(packet->rcd->ppd->link_wq, lsaw); + return true; } - return 0; + return false; +} + +/** + * armed to active - the fast path for armed to active + * @packet: the packet structure + * + * Return true if packet processing needs to bail. + */ +static bool set_armed_to_active(struct hfi1_packet *packet) +{ + if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED)) + return false; + return __set_armed_to_active(packet); } /* @@ -1019,10 +991,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) init_packet(rcd, &packet); - if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { - u32 seq = rhf_rcv_seq(packet.rhf); - - if (seq != rcd->seq_cnt) { + if (!get_dma_rtail_setting(rcd)) { + if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { last = RCV_PKT_DONE; goto bail; } @@ -1039,22 +1009,15 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) * Control context can potentially receive an invalid * rhf. Drop such packets. */ - if (rcd->ctxt == HFI1_CTRL_CTXT) { - u32 seq = rhf_rcv_seq(packet.rhf); - - if (seq != rcd->seq_cnt) + if (rcd->ctxt == HFI1_CTRL_CTXT) + if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) skip_pkt = 1; - } } prescan_rxq(rcd, &packet); while (last == RCV_PKT_OK) { - if (unlikely(dd->do_drop && - atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) == - DROP_PACKET_ON)) { - dd->do_drop = 0; - + if (hfi1_need_drop(dd)) { /* On to the next packet */ packet.rhqoff += packet.rsize; packet.rhf_addr = (__le32 *)rcd->rcvhdrq + @@ -1066,26 +1029,14 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) last = skip_rcv_packet(&packet, thread); skip_pkt = 0; } else { - /* Auto activate link on non-SC15 packet receive */ - if (unlikely(rcd->ppd->host_link_state == - HLS_UP_ARMED) && - set_armed_to_active(rcd, &packet, dd)) + if (set_armed_to_active(&packet)) goto bail; last = process_rcv_packet(&packet, thread); } - if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { - u32 seq = rhf_rcv_seq(packet.rhf); - - if (++rcd->seq_cnt > 13) - rcd->seq_cnt = 1; - if (seq != rcd->seq_cnt) + if (!get_dma_rtail_setting(rcd)) { + if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) last = RCV_PKT_DONE; - if (needset) { - dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n"); - set_nodma_rtail(dd, rcd->ctxt); - needset = 0; - } } else { if (packet.rhqoff == hdrqtail) last = RCV_PKT_DONE; @@ -1094,27 +1045,24 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) * rhf. Drop such packets. */ if (rcd->ctxt == HFI1_CTRL_CTXT) { - u32 seq = rhf_rcv_seq(packet.rhf); + bool lseq; - if (++rcd->seq_cnt > 13) - rcd->seq_cnt = 1; - if (!last && (seq != rcd->seq_cnt)) + lseq = hfi1_seq_incr(rcd, + rhf_rcv_seq(packet.rhf)); + if (!last && lseq) skip_pkt = 1; } - - if (needset) { - dd_dev_info(dd, - "Switching to DMA_RTAIL\n"); - set_dma_rtail(dd, rcd->ctxt); - needset = 0; - } } + if (needset) { + needset = false; + set_all_fastpath(dd, rcd); + } process_rcv_update(last, &packet); } process_rcv_qp_work(&packet); - rcd->head = packet.rhqoff; + hfi1_set_rcd_head(rcd, packet.rhqoff); bail: /* @@ -1606,23 +1554,22 @@ void handle_eflags(struct hfi1_packet *packet) * The following functions are called by the interrupt handler. They are type * specific handlers for each packet type. */ -static int process_receive_ib(struct hfi1_packet *packet) +static void process_receive_ib(struct hfi1_packet *packet) { if (hfi1_setup_9B_packet(packet)) - return RHF_RCV_CONTINUE; + return; if (unlikely(hfi1_dbg_should_fault_rx(packet))) - return RHF_RCV_CONTINUE; + return; trace_hfi1_rcvhdr(packet); if (unlikely(rhf_err_flags(packet->rhf))) { handle_eflags(packet); - return RHF_RCV_CONTINUE; + return; } hfi1_ib_rcv(packet); - return RHF_RCV_CONTINUE; } static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet) @@ -1638,23 +1585,23 @@ static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet) return false; } -static int process_receive_bypass(struct hfi1_packet *packet) +static void process_receive_bypass(struct hfi1_packet *packet) { struct hfi1_devdata *dd = packet->rcd->dd; if (hfi1_is_vnic_packet(packet)) { hfi1_vnic_bypass_rcv(packet); - return RHF_RCV_CONTINUE; + return; } if (hfi1_setup_bypass_packet(packet)) - return RHF_RCV_CONTINUE; + return; trace_hfi1_rcvhdr(packet); if (unlikely(rhf_err_flags(packet->rhf))) { handle_eflags(packet); - return RHF_RCV_CONTINUE; + return; } if (hfi1_16B_get_l2(packet->hdr) == 0x2) { @@ -1677,17 +1624,16 @@ static int process_receive_bypass(struct hfi1_packet *packet) (OPA_EI_STATUS_SMASK | BAD_L2_ERR); } } - return RHF_RCV_CONTINUE; } -static int process_receive_error(struct hfi1_packet *packet) +static void process_receive_error(struct hfi1_packet *packet) { /* KHdrHCRCErr -- KDETH packet with a bad HCRC */ if (unlikely( hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) && (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR || packet->rhf & RHF_DC_ERR))) - return RHF_RCV_CONTINUE; + return; hfi1_setup_ib_header(packet); handle_eflags(packet); @@ -1695,32 +1641,29 @@ static int process_receive_error(struct hfi1_packet *packet) if (unlikely(rhf_err_flags(packet->rhf))) dd_dev_err(packet->rcd->dd, "Unhandled error packet received. Dropping.\n"); - - return RHF_RCV_CONTINUE; } -static int kdeth_process_expected(struct hfi1_packet *packet) +static void kdeth_process_expected(struct hfi1_packet *packet) { hfi1_setup_9B_packet(packet); if (unlikely(hfi1_dbg_should_fault_rx(packet))) - return RHF_RCV_CONTINUE; + return; if (unlikely(rhf_err_flags(packet->rhf))) { struct hfi1_ctxtdata *rcd = packet->rcd; if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) - return RHF_RCV_CONTINUE; + return; } hfi1_kdeth_expected_rcv(packet); - return RHF_RCV_CONTINUE; } -static int kdeth_process_eager(struct hfi1_packet *packet) +static void kdeth_process_eager(struct hfi1_packet *packet) { hfi1_setup_9B_packet(packet); if (unlikely(hfi1_dbg_should_fault_rx(packet))) - return RHF_RCV_CONTINUE; + return; trace_hfi1_rcvhdr(packet); if (unlikely(rhf_err_flags(packet->rhf))) { @@ -1728,37 +1671,41 @@ static int kdeth_process_eager(struct hfi1_packet *packet) show_eflags_errs(packet); if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) - return RHF_RCV_CONTINUE; + return; } hfi1_kdeth_eager_rcv(packet); - return RHF_RCV_CONTINUE; } -static int process_receive_invalid(struct hfi1_packet *packet) +static void process_receive_invalid(struct hfi1_packet *packet) { dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", rhf_rcv_type(packet->rhf)); - return RHF_RCV_CONTINUE; } +#define HFI1_RCVHDR_DUMP_MAX 5 + void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd) { struct hfi1_packet packet; struct ps_mdata mdata; + int i; - seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s head %llu tail %llu\n", - rcd->ctxt, rcd->rcvhdrq_cnt, rcd->rcvhdrqentsize, - HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? + seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu sw head %u\n", + rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd), + get_dma_rtail_setting(rcd) ? "dma_rtail" : "nodma_rtail", + read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL), + read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS), read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & RCV_HDR_HEAD_HEAD_MASK, - read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL)); + read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL), + rcd->head); init_packet(rcd, &packet); init_ps_mdata(&mdata, &packet); - while (1) { + for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) { __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + rcd->rhf_offset; struct ib_header *hdr; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 7c5e3fb22413..bef6946861b2 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -505,12 +505,12 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) ret = -EINVAL; goto done; } - if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) { + if ((flags & VM_WRITE) || !hfi1_rcvhdrtail_kvaddr(uctxt)) { ret = -EPERM; goto done; } memlen = PAGE_SIZE; - memvirt = (void *)uctxt->rcvhdrtail_kvaddr; + memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt); flags &= ~VM_MAYWRITE; break; case SUBCTXT_UREGS: @@ -1090,7 +1090,7 @@ static void user_init(struct hfi1_ctxtdata *uctxt) * don't have to wait to be sure the DMA update has happened * (chip resets head/tail to 0 on transition to enable). */ - if (uctxt->rcvhdrtail_kvaddr) + if (hfi1_rcvhdrtail_kvaddr(uctxt)) clear_rcvhdrtail(uctxt); /* Setup J_KEY before enabling the context */ @@ -1154,8 +1154,8 @@ static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len) cinfo.send_ctxt = uctxt->sc->hw_context; cinfo.egrtids = uctxt->egrbufs.alloced; - cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt; - cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2; + cinfo.rcvhdrq_cnt = get_hdrq_cnt(uctxt); + cinfo.rcvhdrq_entsize = get_hdrqentsize(uctxt) << 2; cinfo.sdma_ring_size = fd->cq->nentries; cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size; @@ -1543,7 +1543,7 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, * always resets it's tail register back to 0 on a * transition from disabled to enabled. */ - if (uctxt->rcvhdrtail_kvaddr) + if (hfi1_rcvhdrtail_kvaddr(uctxt)) clear_rcvhdrtail(uctxt); rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB; } else { diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index fc10d65fc3e1..6365e8ffed9d 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -197,7 +197,9 @@ struct exp_tid_set { u32 count; }; -typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); +struct hfi1_ctxtdata; +typedef int (*intr_handler)(struct hfi1_ctxtdata *rcd, int data); +typedef void (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); struct tid_queue { struct list_head queue_head; @@ -226,7 +228,11 @@ struct hfi1_ctxtdata { * be valid. Worst case is we process an extra interrupt and up to 64 * packets with the wrong interrupt handler. */ - int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded); + intr_handler do_interrupt; + /** fast handler after autoactive */ + intr_handler fast_handler; + /** slow handler */ + intr_handler slow_handler; /* verbs rx_stats per rcd */ struct hfi1_opcode_stats_perctx *opstats; /* clear interrupt mask */ @@ -1153,6 +1159,8 @@ struct hfi1_devdata { char *boardname; /* human readable board info */ + u64 ctx0_seq_drop; + /* reset value */ u64 z_int_counter; u64 z_rcv_limit; @@ -1310,7 +1318,7 @@ struct hfi1_devdata { struct err_info_constraint err_info_xmit_constraint; atomic_t drop_packet; - u8 do_drop; + bool do_drop; u8 err_info_uncorrectable; u8 err_info_fmconfig; @@ -1507,12 +1515,148 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, #define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */ #define RCV_PKT_DONE 0x2 /* stop, no more packets detected */ +/** + * hfi1_rcd_head - add accessor for rcd head + * @rcd: the context + */ +static inline u32 hfi1_rcd_head(struct hfi1_ctxtdata *rcd) +{ + return rcd->head; +} + +/** + * hfi1_set_rcd_head - add accessor for rcd head + * @rcd: the context + * @head: the new head + */ +static inline void hfi1_set_rcd_head(struct hfi1_ctxtdata *rcd, u32 head) +{ + rcd->head = head; +} + /* calculate the current RHF address */ static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd) { return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset; } +/* return DMA_RTAIL configuration */ +static inline bool get_dma_rtail_setting(struct hfi1_ctxtdata *rcd) +{ + return !!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL); +} + +/** + * hfi1_seq_incr_wrap - wrapping increment for sequence + * @seq: the current sequence number + * + * Returns: the incremented seq + */ +static inline u8 hfi1_seq_incr_wrap(u8 seq) +{ + if (++seq > RHF_MAX_SEQ) + seq = 1; + return seq; +} + +/** + * hfi1_seq_cnt - return seq_cnt member + * @rcd: the receive context + * + * Return seq_cnt member + */ +static inline u8 hfi1_seq_cnt(struct hfi1_ctxtdata *rcd) +{ + return rcd->seq_cnt; +} + +/** + * hfi1_set_seq_cnt - return seq_cnt member + * @rcd: the receive context + * + * Return seq_cnt member + */ +static inline void hfi1_set_seq_cnt(struct hfi1_ctxtdata *rcd, u8 cnt) +{ + rcd->seq_cnt = cnt; +} + +/** + * last_rcv_seq - is last + * @rcd: the receive context + * @seq: sequence + * + * return true if last packet + */ +static inline bool last_rcv_seq(struct hfi1_ctxtdata *rcd, u32 seq) +{ + return seq != rcd->seq_cnt; +} + +/** + * rcd_seq_incr - increment context sequence number + * @rcd: the receive context + * @seq: the current sequence number + * + * Returns: true if the this was the last packet + */ +static inline bool hfi1_seq_incr(struct hfi1_ctxtdata *rcd, u32 seq) +{ + rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt); + return last_rcv_seq(rcd, seq); +} + +/** + * get_hdrqentsize - return hdrq entry size + * @rcd: the receive context + */ +static inline u8 get_hdrqentsize(struct hfi1_ctxtdata *rcd) +{ + return rcd->rcvhdrqentsize; +} + +/** + * get_hdrq_cnt - return hdrq count + * @rcd: the receive context + */ +static inline u16 get_hdrq_cnt(struct hfi1_ctxtdata *rcd) +{ + return rcd->rcvhdrq_cnt; +} + +/** + * hfi1_is_slowpath - check if this context is slow path + * @rcd: the receive context + */ +static inline bool hfi1_is_slowpath(struct hfi1_ctxtdata *rcd) +{ + return rcd->do_interrupt == rcd->slow_handler; +} + +/** + * hfi1_is_fastpath - check if this context is fast path + * @rcd: the receive context + */ +static inline bool hfi1_is_fastpath(struct hfi1_ctxtdata *rcd) +{ + if (rcd->ctxt == HFI1_CTRL_CTXT) + return false; + + return rcd->do_interrupt == rcd->fast_handler; +} + +/** + * hfi1_set_fast - change to the fast handler + * @rcd: the receive context + */ +static inline void hfi1_set_fast(struct hfi1_ctxtdata *rcd) +{ + if (unlikely(!rcd)) + return; + if (unlikely(!hfi1_is_fastpath(rcd))) + rcd->do_interrupt = rcd->fast_handler; +} + int hfi1_reset_device(int); void receive_interrupt_work(struct work_struct *work); @@ -2015,9 +2159,21 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, size_t npages, bool dirty); +/** + * hfi1_rcvhdrtail_kvaddr - return tail kvaddr + * @rcd - the receive context + */ +static inline __le64 *hfi1_rcvhdrtail_kvaddr(const struct hfi1_ctxtdata *rcd) +{ + return (__le64 *)rcd->rcvhdrtail_kvaddr; +} + static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd) { - *((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL; + u64 *kv = (u64 *)hfi1_rcvhdrtail_kvaddr(rcd); + + if (kv) + *kv = 0ULL; } static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd) @@ -2026,7 +2182,17 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd) * volatile because it's a DMA target from the chip, routine is * inlined, and don't want register caching or reordering. */ - return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr); + return (u32)le64_to_cpu(*hfi1_rcvhdrtail_kvaddr(rcd)); +} + +static inline bool hfi1_packet_present(struct hfi1_ctxtdata *rcd) +{ + if (likely(!rcd->rcvhdrtail_kvaddr)) { + u32 seq = rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))); + + return !last_rcv_seq(rcd, seq); + } + return hfi1_rcd_head(rcd) != get_rcvhdrtail(rcd); } /* @@ -2298,6 +2464,25 @@ static inline bool is_integrated(struct hfi1_devdata *dd) return dd->pcidev->device == PCI_DEVICE_ID_INTEL1; } +/** + * hfi1_need_drop - detect need for drop + * @dd: - the device + * + * In some cases, the first packet needs to be dropped. + * + * Return true is the current packet needs to be dropped and false otherwise. + */ +static inline bool hfi1_need_drop(struct hfi1_devdata *dd) +{ + if (unlikely(dd->do_drop && + atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) == + DROP_PACKET_ON)) { + dd->do_drop = false; + return true; + } + return false; +} + int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp); #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev)) diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 26b792bb1027..e3acda7a0800 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -78,8 +78,6 @@ */ #define HFI1_MIN_USER_CTXT_BUFCNT 7 -#define HFI1_MIN_HDRQ_EGRBUF_CNT 2 -#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ @@ -122,8 +120,6 @@ unsigned int user_credit_return_threshold = 33; /* default is 33% */ module_param(user_credit_return_threshold, uint, S_IRUGO); MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); -static inline u64 encode_rcv_header_entry_size(u16 size); - DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); static int hfi1_create_kctxt(struct hfi1_devdata *dd, @@ -154,7 +150,12 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd, /* Control context must use DMA_RTAIL */ if (rcd->ctxt == HFI1_CTRL_CTXT) rcd->flags |= HFI1_CAP_DMA_RTAIL; - rcd->seq_cnt = 1; + rcd->fast_handler = get_dma_rtail_setting(rcd) ? + handle_receive_interrupt_dma_rtail : + handle_receive_interrupt_nodma_rtail; + rcd->slow_handler = handle_receive_interrupt; + + hfi1_set_seq_cnt(rcd, 1); rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); if (!rcd->sc) { @@ -511,23 +512,6 @@ void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) } /* - * Convert a receive header entry size that to the encoding used in the CSR. - * - * Return a zero if the given size is invalid. - */ -static inline u64 encode_rcv_header_entry_size(u16 size) -{ - /* there are only 3 valid receive header entry sizes */ - if (size == 2) - return 1; - if (size == 16) - return 2; - else if (size == 32) - return 4; - return 0; /* invalid */ -} - -/* * Select the largest ccti value over all SLs to determine the intra- * packet gap for the link. * @@ -892,10 +876,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) if (is_ax(dd)) { atomic_set(&dd->drop_packet, DROP_PACKET_ON); - dd->do_drop = 1; + dd->do_drop = true; } else { atomic_set(&dd->drop_packet, DROP_PACKET_OFF); - dd->do_drop = 0; + dd->do_drop = false; } /* make sure the link is not "up" */ @@ -1149,9 +1133,9 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), rcd->rcvhdrq, rcd->rcvhdrq_dma); rcd->rcvhdrq = NULL; - if (rcd->rcvhdrtail_kvaddr) { + if (hfi1_rcvhdrtail_kvaddr(rcd)) { dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, - (void *)rcd->rcvhdrtail_kvaddr, + (void *)hfi1_rcvhdrtail_kvaddr(rcd), rcd->rcvhdrqtailaddr_dma); rcd->rcvhdrtail_kvaddr = NULL; } @@ -1611,29 +1595,6 @@ static void postinit_cleanup(struct hfi1_devdata *dd) hfi1_free_devdata(dd); } -static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) -{ - if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { - dd_dev_err(dd, "Receive header queue count too small\n"); - return -EINVAL; - } - - if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { - dd_dev_err(dd, - "Receive header queue count cannot be greater than %u\n", - HFI1_MAX_HDRQ_EGRBUF_CNT); - return -EINVAL; - } - - if (thecnt % HDRQ_INCREMENT) { - dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n", - thecnt, HDRQ_INCREMENT); - return -EINVAL; - } - - return 0; -} - static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret = 0, j, pidx, initfail; @@ -1661,7 +1622,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Validate some global module parameters */ - ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt); + ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt); if (ret) goto bail; @@ -1842,7 +1803,6 @@ static void shutdown_one(struct pci_dev *pdev) int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) { unsigned amt; - u64 reg; if (!rcd->rcvhdrq) { gfp_t gfp_flags; @@ -1874,30 +1834,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) goto bail_free; } } - /* - * These values are per-context: - * RcvHdrCnt - * RcvHdrEntSize - * RcvHdrSize - */ - reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) - & RCV_HDR_CNT_CNT_MASK) - << RCV_HDR_CNT_CNT_SHIFT; - write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); - reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) - & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) - << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; - write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); - reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) - << RCV_HDR_SIZE_HDR_SIZE_SHIFT; - write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); - /* - * Program dummy tail address for every receive context - * before enabling any receive context - */ - write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, - dd->rcvhdrtail_dummy_dma); + set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize, + rcd->rcvhdrq_cnt); return 0; diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c index adb4a1ba921b..5836fe7b2817 100644 --- a/drivers/infiniband/hw/hfi1/iowait.c +++ b/drivers/infiniband/hw/hfi1/iowait.c @@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit, void iowait_cancel_work(struct iowait *w) { cancel_work_sync(&iowait_get_ib_work(w)->iowork); - cancel_work_sync(&iowait_get_tid_work(w)->iowork); + /* Make sure that the iowork for TID RDMA is used */ + if (iowait_get_tid_work(w)->iowork.func) + cancel_work_sync(&iowait_get_tid_work(w)->iowork); } /** diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c index d920b165d696..db82db497b2c 100644 --- a/drivers/infiniband/hw/hfi1/msix.c +++ b/drivers/infiniband/hw/hfi1/msix.c @@ -115,13 +115,11 @@ int msix_initialize(struct hfi1_devdata *dd) */ static int msix_request_irq(struct hfi1_devdata *dd, void *arg, irq_handler_t handler, irq_handler_t thread, - u32 idx, enum irq_type type) + enum irq_type type, const char *name) { unsigned long nr; int irq; int ret; - const char *err_info; - char name[MAX_NAME_SIZE]; struct hfi1_msix_entry *me; /* Allocate an MSIx vector */ @@ -135,43 +133,15 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg, if (nr == dd->msix_info.max_requested) return -ENOSPC; - /* Specific verification and determine the name */ - switch (type) { - case IRQ_GENERAL: - /* general interrupt must be MSIx vector 0 */ - if (nr) { - spin_lock(&dd->msix_info.msix_lock); - __clear_bit(nr, dd->msix_info.in_use_msix); - spin_unlock(&dd->msix_info.msix_lock); - dd_dev_err(dd, "Invalid index %lu for GENERAL IRQ\n", - nr); - return -EINVAL; - } - snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit); - err_info = "general"; - break; - case IRQ_SDMA: - snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d", - dd->unit, idx); - err_info = "sdma"; - break; - case IRQ_RCVCTXT: - snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d", - dd->unit, idx); - err_info = "receive context"; - break; - case IRQ_OTHER: - default: + if (type < IRQ_SDMA || type >= IRQ_OTHER) return -EINVAL; - } - name[sizeof(name) - 1] = 0; irq = pci_irq_vector(dd->pcidev, nr); ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name); if (ret) { dd_dev_err(dd, - "%s: request for IRQ %d failed, MSIx %d, err %d\n", - err_info, irq, idx, ret); + "%s: request for IRQ %d failed, MSIx %lu, err %d\n", + name, irq, nr, ret); spin_lock(&dd->msix_info.msix_lock); __clear_bit(nr, dd->msix_info.in_use_msix); spin_unlock(&dd->msix_info.msix_lock); @@ -195,17 +165,13 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg, return nr; } -/** - * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs - * @rcd: valid rcd context - * - */ -int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd) +static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd, + irq_handler_t handler, + irq_handler_t thread, + const char *name) { - int nr; - - nr = msix_request_irq(rcd->dd, rcd, receive_context_interrupt, - receive_context_thread, rcd->ctxt, IRQ_RCVCTXT); + int nr = msix_request_irq(rcd->dd, rcd, handler, thread, + IRQ_RCVCTXT, name); if (nr < 0) return nr; @@ -222,6 +188,22 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd) } /** + * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs + * @rcd: valid rcd context + * + */ +int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd) +{ + char name[MAX_NAME_SIZE]; + + snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d", + rcd->dd->unit, rcd->ctxt); + + return msix_request_rcd_irq_common(rcd, receive_context_interrupt, + receive_context_thread, name); +} + +/** * msix_request_smda_ira() - Helper for getting SDMA IRQ resources * @sde: valid sdma engine * @@ -229,9 +211,12 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd) int msix_request_sdma_irq(struct sdma_engine *sde) { int nr; + char name[MAX_NAME_SIZE]; + snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d", + sde->dd->unit, sde->this_idx); nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL, - sde->this_idx, IRQ_SDMA); + IRQ_SDMA, name); if (nr < 0) return nr; sde->msix_intr = nr; @@ -241,6 +226,32 @@ int msix_request_sdma_irq(struct sdma_engine *sde) } /** + * msix_request_general_irq(void) - Helper for getting general IRQ + * resources + * @dd: valid device data + */ +int msix_request_general_irq(struct hfi1_devdata *dd) +{ + int nr; + char name[MAX_NAME_SIZE]; + + snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit); + nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL, + name); + if (nr < 0) + return nr; + + /* general interrupt must be MSIx vector 0 */ + if (nr) { + msix_free_irq(dd, (u8)nr); + dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr); + return -EINVAL; + } + + return 0; +} + +/** * enable_sdma_src() - Helper to enable SDMA IRQ srcs * @dd: valid devdata structure * @i: index of SDMA engine @@ -265,10 +276,9 @@ static void enable_sdma_srcs(struct hfi1_devdata *dd, int i) int msix_request_irqs(struct hfi1_devdata *dd) { int i; - int ret; + int ret = msix_request_general_irq(dd); - ret = msix_request_irq(dd, dd, general_interrupt, NULL, 0, IRQ_GENERAL); - if (ret < 0) + if (ret) return ret; for (i = 0; i < dd->num_sdma; i++) { diff --git a/drivers/infiniband/hw/hfi1/msix.h b/drivers/infiniband/hw/hfi1/msix.h index a514881632a4..1a02ab7971c8 100644 --- a/drivers/infiniband/hw/hfi1/msix.h +++ b/drivers/infiniband/hw/hfi1/msix.h @@ -54,6 +54,7 @@ int msix_initialize(struct hfi1_devdata *dd); int msix_request_irqs(struct hfi1_devdata *dd); void msix_clean_up_interrupts(struct hfi1_devdata *dd); +int msix_request_general_irq(struct hfi1_devdata *dd); int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd); int msix_request_sdma_irq(struct sdma_engine *sde); void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr); diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 61362bd6d3ce..1a6268d61977 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -161,7 +161,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) return -EINVAL; } - dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY); + dd->kregbase1 = ioremap(addr, RCV_ARRAY); if (!dd->kregbase1) { dd_dev_err(dd, "UC mapping of kregbase1 failed\n"); return -ENOMEM; @@ -179,7 +179,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count); dd->base2_start = RCV_ARRAY + rcv_array_count * 8; - dd->kregbase2 = ioremap_nocache( + dd->kregbase2 = ioremap( addr + dd->base2_start, TXE_PIO_SEND - dd->base2_start); if (!dd->kregbase2) { diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 1a3c647675a7..f1734e5e9ac4 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -2599,7 +2599,7 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data, * to be sent before sending this one. */ e = NULL; - old_req = 1; + old_req = true; ibp->rvp.n_rc_dupreq++; spin_lock_irqsave(&qp->s_lock, flags); diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index e53f542b60af..8a2e0d9351e9 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) */ fpsn = full_flow_psn(flow, flow->flow_state.spsn); req->r_ack_psn = psn; + /* + * If resync_psn points to the last flow PSN for a + * segment and the new segment (likely from a new + * request) starts with a new generation number, we + * need to adjust resync_psn accordingly. + */ + if (flow->flow_state.generation != + (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT)) + resync_psn = mask_psn(fpsn - 1); flow->resync_npkts += delta_psn(mask_psn(resync_psn + 1), fpsn); /* diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h index e00c8a7d559c..b5fc5c6cd52f 100644 --- a/drivers/infiniband/hw/hfi1/trace_ctxts.h +++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h @@ -80,7 +80,7 @@ TRACE_EVENT(hfi1_uctxtdata, __entry->credits = uctxt->sc->credits; __entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free); __entry->piobase = uctxt->sc->base_addr; - __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt; + __entry->rcvhdrq_cnt = get_hdrq_cnt(uctxt); __entry->rcvhdrq_dma = uctxt->rcvhdrq_dma; __entry->eager_cnt = uctxt->egrbufs.alloced; __entry->rcvegr_dma = uctxt->egrbufs.rcvtids[0].dma; diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h index 3cec960e9674..168079ed122c 100644 --- a/drivers/infiniband/hw/hfi1/trace_rx.h +++ b/drivers/infiniband/hw/hfi1/trace_rx.h @@ -106,19 +106,8 @@ TRACE_EVENT(hfi1_receive_interrupt, ), TP_fast_assign(DD_DEV_ASSIGN(dd); __entry->ctxt = rcd->ctxt; - if (rcd->do_interrupt == - &handle_receive_interrupt) { - __entry->slow_path = 1; - __entry->dma_rtail = 0xFF; - } else if (rcd->do_interrupt == - &handle_receive_interrupt_dma_rtail){ - __entry->dma_rtail = 1; - __entry->slow_path = 0; - } else if (rcd->do_interrupt == - &handle_receive_interrupt_nodma_rtail) { - __entry->dma_rtail = 0; - __entry->slow_path = 0; - } + __entry->slow_path = hfi1_is_slowpath(rcd); + __entry->dma_rtail = get_dma_rtail_setting(rcd); ), TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d", __get_str(dev), diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h index 343fb9894a82..985ffa9cc958 100644 --- a/drivers/infiniband/hw/hfi1/trace_tid.h +++ b/drivers/infiniband/hw/hfi1/trace_tid.h @@ -138,10 +138,10 @@ TRACE_EVENT(/* put_tid */ TP_ARGS(dd, index, type, pa, order), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd) - __field(unsigned long, pa); - __field(u32, index); - __field(u32, type); - __field(u16, order); + __field(unsigned long, pa) + __field(u32, index) + __field(u32, type) + __field(u16, order) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd); diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h index 09eb0c9ada00..769e5e4710c6 100644 --- a/drivers/infiniband/hw/hfi1/trace_tx.h +++ b/drivers/infiniband/hw/hfi1/trace_tx.h @@ -588,7 +588,7 @@ TRACE_EVENT(hfi1_sdma_user_reqinfo, TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i), TP_ARGS(dd, ctxt, subctxt, i), TP_STRUCT__entry( - DD_DEV_ENTRY(dd); + DD_DEV_ENTRY(dd) __field(u16, ctxt) __field(u8, subctxt) __field(u8, ver_opcode) diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c index 469acb961fbd..3b505006c0a6 100644 --- a/drivers/infiniband/hw/hfi1/user_pages.c +++ b/drivers/infiniband/hw/hfi1/user_pages.c @@ -106,7 +106,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np int ret; unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0); - ret = get_user_pages_fast(vaddr, npages, gup_flags, pages); + ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); if (ret < 0) return ret; @@ -118,7 +118,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, size_t npages, bool dirty) { - put_user_pages_dirty_lock(p, npages, dirty); + unpin_user_pages_dirty_lock(p, npages, dirty); if (mm) { /* during close after signal, mm can be NULL */ atomic64_sub(npages, &mm->pinned_vm); diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index b49e60e8397d..6b14581b9965 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -78,7 +78,7 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt) if (ret) goto done; - if (uctxt->rcvhdrtail_kvaddr) + if (hfi1_rcvhdrtail_kvaddr(uctxt)) clear_rcvhdrtail(uctxt); rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB; |