diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 567 |
1 files changed, 441 insertions, 126 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5d21c14853ac..3aa80da973d7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -118,6 +118,7 @@ enum board_idx { NETXTREME_E_VF, NETXTREME_C_VF, NETXTREME_S_VF, + NETXTREME_E_P5_VF, }; /* indexed by enum above */ @@ -160,6 +161,7 @@ static const struct { [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, + [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { @@ -210,6 +212,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, #endif { 0 } @@ -237,7 +240,7 @@ static struct workqueue_struct *bnxt_pf_wq; static bool bnxt_vf_pciid(enum board_idx idx) { return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || - idx == NETXTREME_S_VF); + idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) @@ -1809,7 +1812,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) case CMPL_BASE_TYPE_HWRM_DONE: seq_id = le16_to_cpu(h_cmpl->sequence_id); if (seq_id == bp->hwrm_intr_seq_id) - bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; + bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; else netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); break; @@ -2372,7 +2375,11 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) rmem->pg_arr[i] = NULL; } if (rmem->pg_tbl) { - dma_free_coherent(&pdev->dev, rmem->nr_pages * 8, + size_t pg_tbl_size = rmem->nr_pages * 8; + + if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) + pg_tbl_size = rmem->page_size; + dma_free_coherent(&pdev->dev, pg_tbl_size, rmem->pg_tbl, rmem->pg_tbl_map); rmem->pg_tbl = NULL; } @@ -2390,9 +2397,12 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) valid_bit = PTU_PTE_VALID; - if (rmem->nr_pages > 1) { - rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, - rmem->nr_pages * 8, + if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { + size_t pg_tbl_size = rmem->nr_pages * 8; + + if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) + pg_tbl_size = rmem->page_size; + rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, &rmem->pg_tbl_map, GFP_KERNEL); if (!rmem->pg_tbl) @@ -2409,7 +2419,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) if (!rmem->pg_arr[i]) return -ENOMEM; - if (rmem->nr_pages > 1) { + if (rmem->nr_pages > 1 || rmem->depth > 0) { if (i == rmem->nr_pages - 2 && (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) extra_bits |= PTU_PTE_NEXT_TO_LAST; @@ -3276,6 +3286,27 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp) bp->hwrm_cmd_resp_dma_addr); bp->hwrm_cmd_resp_addr = NULL; } + + if (bp->hwrm_cmd_kong_resp_addr) { + dma_free_coherent(&pdev->dev, PAGE_SIZE, + bp->hwrm_cmd_kong_resp_addr, + bp->hwrm_cmd_kong_resp_dma_addr); + bp->hwrm_cmd_kong_resp_addr = NULL; + } +} + +static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + bp->hwrm_cmd_kong_resp_addr = + dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &bp->hwrm_cmd_kong_resp_dma_addr, + GFP_KERNEL); + if (!bp->hwrm_cmd_kong_resp_addr) + return -ENOMEM; + + return 0; } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) @@ -3317,9 +3348,8 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) return 0; } -static void bnxt_free_stats(struct bnxt *bp) +static void bnxt_free_port_stats(struct bnxt *bp) { - u32 size, i; struct pci_dev *pdev = bp->pdev; bp->flags &= ~BNXT_FLAG_PORT_STATS; @@ -3345,6 +3375,12 @@ static void bnxt_free_stats(struct bnxt *bp) bp->hw_rx_port_stats_ext_map); bp->hw_rx_port_stats_ext = NULL; } +} + +static void bnxt_free_ring_stats(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + int size, i; if (!bp->bnapi) return; @@ -3384,6 +3420,9 @@ static int bnxt_alloc_stats(struct bnxt *bp) } if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { + if (bp->hw_rx_port_stats) + goto alloc_ext_stats; + bp->hw_port_stats_size = sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024; @@ -3400,11 +3439,15 @@ static int bnxt_alloc_stats(struct bnxt *bp) sizeof(struct rx_port_stats) + 512; bp->flags |= BNXT_FLAG_PORT_STATS; +alloc_ext_stats: /* Display extended statistics only if FW supports it */ if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) return 0; + if (bp->hw_rx_port_stats_ext) + goto alloc_tx_ext_stats; + bp->hw_rx_port_stats_ext = dma_zalloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), @@ -3413,6 +3456,10 @@ static int bnxt_alloc_stats(struct bnxt *bp) if (!bp->hw_rx_port_stats_ext) return 0; +alloc_tx_ext_stats: + if (bp->hw_tx_port_stats_ext) + return 0; + if (bp->hwrm_spec_code >= 0x10902) { bp->hw_tx_port_stats_ext = dma_zalloc_coherent(&pdev->dev, @@ -3520,7 +3567,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_cp_rings(bp); bnxt_free_ntp_fltrs(bp, irq_re_init); if (irq_re_init) { - bnxt_free_stats(bp); + bnxt_free_ring_stats(bp); bnxt_free_ring_grps(bp); bnxt_free_vnics(bp); kfree(bp->tx_ring_map); @@ -3721,7 +3768,10 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, req->req_type = cpu_to_le16(req_type); req->cmpl_ring = cpu_to_le16(cmpl_ring); req->target_id = cpu_to_le16(target_id); - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); + if (bnxt_kong_hwrm_message(bp, req)) + req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); + else + req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); } static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, @@ -3736,11 +3786,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; struct hwrm_short_input short_input = {0}; - - req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); - memset(resp, 0, PAGE_SIZE); - cp_ring_id = le16_to_cpu(req->cmpl_ring); - intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; + u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; + u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; + u16 dst = BNXT_HWRM_CHNL_CHIMP; if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { if (msg_len > bp->hwrm_max_ext_req_len || @@ -3748,6 +3797,23 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, return -EINVAL; } + if (bnxt_hwrm_kong_chnl(bp, req)) { + dst = BNXT_HWRM_CHNL_KONG; + bar_offset = BNXT_GRCPF_REG_KONG_COMM; + doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; + resp = bp->hwrm_cmd_kong_resp_addr; + resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; + } + + memset(resp, 0, PAGE_SIZE); + cp_ring_id = le16_to_cpu(req->cmpl_ring); + intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; + + req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); + /* currently supports only one outstanding message */ + if (intr_process) + bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || msg_len > BNXT_HWRM_MAX_REQ_LEN) { void *short_cmd_req = bp->hwrm_short_cmd_req_addr; @@ -3781,17 +3847,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } /* Write request msg to hwrm channel */ - __iowrite32_copy(bp->bar0, data, msg_len / 4); + __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); for (i = msg_len; i < max_req_len; i += 4) - writel(0, bp->bar0 + i); - - /* currently supports only one outstanding message */ - if (intr_process) - bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); + writel(0, bp->bar0 + bar_offset + i); /* Ring channel doorbell */ - writel(1, bp->bar0 + 0x100); + writel(1, bp->bar0 + doorbell_offset); if (!timeout) timeout = DFLT_HWRM_CMD_TIMEOUT; @@ -3806,10 +3868,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); - resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; + resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); + if (intr_process) { + u16 seq_id = bp->hwrm_intr_seq_id; + /* Wait until hwrm response cmpl interrupt is processed */ - while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && + while (bp->hwrm_intr_seq_id != (u16)~seq_id && i++ < tmo_count) { /* on first few passes, just barely sleep */ if (i < HWRM_SHORT_TIMEOUT_COUNTER) @@ -3820,14 +3885,14 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, HWRM_MAX_TIMEOUT); } - if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { + if (bp->hwrm_intr_seq_id != (u16)~seq_id) { netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", le16_to_cpu(req->req_type)); return -1; } len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; - valid = bp->hwrm_cmd_resp_addr + len - 1; + valid = resp_addr + len - 1; } else { int j; @@ -3855,7 +3920,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } /* Last byte of resp contains valid bit */ - valid = bp->hwrm_cmd_resp_addr + len - 1; + valid = resp_addr + len - 1; for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { /* make sure we read from updated DMA memory */ dma_rmb(); @@ -3990,6 +4055,10 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); } + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req.flags |= cpu_to_le32( + FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); + mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) @@ -4118,12 +4187,11 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { - int rc = 0; + struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; - struct hwrm_cfa_ntuple_filter_alloc_output *resp = - bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct flow_keys *keys = &fltr->fkeys; - struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; + int rc = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; @@ -4169,8 +4237,10 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.dst_id = cpu_to_le16(vnic->fw_vnic_id); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) + if (!rc) { + resp = bnxt_get_hwrm_resp_addr(bp, &req); fltr->filter_id = resp->ntuple_filter_id; + } mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -5161,7 +5231,6 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); cp = le16_to_cpu(resp->alloc_cmpl_rings); stats = le16_to_cpu(resp->alloc_stat_ctx); - cp = min_t(u16, cp, stats); hw_resc->resv_irqs = cp; if (bp->flags & BNXT_FLAG_CHIP_P5) { int rx = hw_resc->resv_rx_rings; @@ -5180,6 +5249,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) hw_resc->resv_hw_ring_grps = rx; } hw_resc->resv_cp_rings = cp; + hw_resc->resv_stat_ctxs = stats; } mutex_unlock(&bp->hwrm_cmd_lock); return 0; @@ -5209,7 +5279,7 @@ static bool bnxt_rfs_supported(struct bnxt *bp); static void __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, int tx_rings, int rx_rings, int ring_grps, - int cp_rings, int vnics) + int cp_rings, int stats, int vnics) { u32 enables = 0; @@ -5251,7 +5321,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, req->num_rsscos_ctxs = cpu_to_le16(ring_grps + 1); } - req->num_stat_ctxs = req->num_cmpl_rings; + req->num_stat_ctxs = cpu_to_le16(stats); req->num_vnics = cpu_to_le16(vnics); } req->enables = cpu_to_le32(enables); @@ -5261,7 +5331,7 @@ static void __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct hwrm_func_vf_cfg_input *req, int tx_rings, int rx_rings, int ring_grps, int cp_rings, - int vnics) + int stats, int vnics) { u32 enables = 0; @@ -5294,7 +5364,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, req->num_hw_ring_grps = cpu_to_le16(ring_grps); req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); } - req->num_stat_ctxs = req->num_cmpl_rings; + req->num_stat_ctxs = cpu_to_le16(stats); req->num_vnics = cpu_to_le16(vnics); req->enables = cpu_to_le32(enables); @@ -5302,13 +5372,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, static int bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, int vnics) { struct hwrm_func_cfg_input req = {0}; int rc; __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); if (!req.enables) return 0; @@ -5325,7 +5395,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, static int bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, int vnics) { struct hwrm_func_vf_cfg_input req = {0}; int rc; @@ -5336,7 +5406,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -5346,15 +5416,17 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, - int cp, int vnic) + int cp, int stat, int vnic) { if (BNXT_PF(bp)) - return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic); + return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, + vnic); else - return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); + return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, + vnic); } -static int bnxt_nq_rings_in_use(struct bnxt *bp) +int bnxt_nq_rings_in_use(struct bnxt *bp) { int cp = bp->cp_nr_rings; int ulp_msix, ulp_base; @@ -5380,12 +5452,17 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp) return cp; } +static int bnxt_get_func_stat_ctxs(struct bnxt *bp) +{ + return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); +} + static bool bnxt_need_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int cp = bnxt_cp_rings_in_use(bp); int nq = bnxt_nq_rings_in_use(bp); - int rx = bp->rx_nr_rings; + int rx = bp->rx_nr_rings, stat; int vnic = 1, grp = rx; if (bp->hwrm_spec_code < 0x10601) @@ -5398,9 +5475,11 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; + stat = bnxt_get_func_stat_ctxs(bp); if (BNXT_NEW_RM(bp) && (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic || + hw_resc->resv_stat_ctxs != stat || (hw_resc->resv_hw_ring_grps != grp && !(bp->flags & BNXT_FLAG_CHIP_P5)))) return true; @@ -5414,8 +5493,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp) int tx = bp->tx_nr_rings; int rx = bp->rx_nr_rings; int grp, rx_rings, rc; + int vnic = 1, stat; bool sh = false; - int vnic = 1; if (!bnxt_need_reserve_rings(bp)) return 0; @@ -5427,8 +5506,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp) if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; grp = bp->rx_nr_rings; + stat = bnxt_get_func_stat_ctxs(bp); - rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic); + rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); if (rc) return rc; @@ -5438,6 +5518,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) cp = hw_resc->resv_irqs; grp = hw_resc->resv_hw_ring_grps; vnic = hw_resc->resv_vnics; + stat = hw_resc->resv_stat_ctxs; } rx_rings = rx; @@ -5456,6 +5537,10 @@ static int __bnxt_reserve_rings(struct bnxt *bp) } } rx_rings = min_t(int, rx_rings, grp); + cp = min_t(int, cp, bp->cp_nr_rings); + if (stat > bnxt_get_ulp_stat_ctxs(bp)) + stat -= bnxt_get_ulp_stat_ctxs(bp); + cp = min_t(int, cp, stat); rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); if (bp->flags & BNXT_FLAG_AGG_RINGS) rx = rx_rings << 1; @@ -5464,14 +5549,15 @@ static int __bnxt_reserve_rings(struct bnxt *bp) bp->rx_nr_rings = rx_rings; bp->cp_nr_rings = cp; - if (!tx || !rx || !cp || !grp || !vnic) + if (!tx || !rx || !cp || !grp || !vnic || !stat) return -ENOMEM; return rc; } static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, + int vnics) { struct hwrm_func_vf_cfg_input req = {0}; u32 flags; @@ -5481,7 +5567,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return 0; __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | @@ -5499,14 +5585,15 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, + int vnics) { struct hwrm_func_cfg_input req = {0}; u32 flags; int rc; __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; if (BNXT_NEW_RM(bp)) { flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | @@ -5527,17 +5614,19 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, + int vnics) { if (bp->hwrm_spec_code < 0x10801) return 0; if (BNXT_PF(bp)) return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, - ring_grps, cp_rings, vnics); + ring_grps, cp_rings, stats, + vnics); return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); } static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) @@ -5962,8 +6051,11 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, pg_size = 2 << 4; *pg_attr = pg_size; - if (rmem->nr_pages > 1) { - *pg_attr |= 1; + if (rmem->depth >= 1) { + if (rmem->depth == 2) + *pg_attr |= 2; + else + *pg_attr |= 1; *pg_dir = cpu_to_le64(rmem->pg_tbl_map); } else { *pg_dir = cpu_to_le64(rmem->dma_arr[0]); @@ -6040,6 +6132,22 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) &req.stat_pg_size_stat_lvl, &req.stat_page_dir); } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { + ctx_pg = &ctx->mrav_mem; + req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); + req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.mrav_pg_size_mrav_lvl, + &req.mrav_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { + ctx_pg = &ctx->tim_mem; + req.tim_num_entries = cpu_to_le32(ctx_pg->entries); + req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.tim_pg_size_tim_lvl, + &req.tim_page_dir); + } for (i = 0, num_entries = &req.tqm_sp_num_entries, pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, pg_dir = &req.tqm_sp_page_dir, @@ -6060,25 +6168,104 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) } static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, - struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size) + struct bnxt_ctx_pg_info *ctx_pg) { struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; - if (!mem_size) - return 0; - - rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); - if (rmem->nr_pages > MAX_CTX_PAGES) { - rmem->nr_pages = 0; - return -EINVAL; - } rmem->page_size = BNXT_PAGE_SIZE; rmem->pg_arr = ctx_pg->ctx_pg_arr; rmem->dma_arr = ctx_pg->ctx_dma_arr; rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; + if (rmem->depth >= 1) + rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; return bnxt_alloc_ring(bp, rmem); } +static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, + u8 depth) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + int rc; + + if (!mem_size) + return 0; + + ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { + ctx_pg->nr_pages = 0; + return -EINVAL; + } + if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { + int nr_tbls, i; + + rmem->depth = 2; + ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), + GFP_KERNEL); + if (!ctx_pg->ctx_pg_tbl) + return -ENOMEM; + nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); + rmem->nr_pages = nr_tbls; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); + if (rc) + return rc; + for (i = 0; i < nr_tbls; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + + pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); + if (!pg_tbl) + return -ENOMEM; + ctx_pg->ctx_pg_tbl[i] = pg_tbl; + rmem = &pg_tbl->ring_mem; + rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; + rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; + rmem->depth = 1; + rmem->nr_pages = MAX_CTX_PAGES; + if (i == (nr_tbls - 1)) + rmem->nr_pages = ctx_pg->nr_pages % + MAX_CTX_PAGES; + rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); + if (rc) + break; + } + } else { + rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (rmem->nr_pages > 1 || depth) + rmem->depth = 1; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); + } + return rc; +} + +static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + + if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || + ctx_pg->ctx_pg_tbl) { + int i, nr_tbls = rmem->nr_pages; + + for (i = 0; i < nr_tbls; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + struct bnxt_ring_mem_info *rmem2; + + pg_tbl = ctx_pg->ctx_pg_tbl[i]; + if (!pg_tbl) + continue; + rmem2 = &pg_tbl->ring_mem; + bnxt_free_ring(bp, rmem2); + ctx_pg->ctx_pg_arr[i] = NULL; + kfree(pg_tbl); + ctx_pg->ctx_pg_tbl[i] = NULL; + } + kfree(ctx_pg->ctx_pg_tbl); + ctx_pg->ctx_pg_tbl = NULL; + } + bnxt_free_ring(bp, rmem); + ctx_pg->nr_pages = 0; +} + static void bnxt_free_ctx_mem(struct bnxt *bp) { struct bnxt_ctx_mem_info *ctx = bp->ctx; @@ -6089,16 +6276,18 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) if (ctx->tqm_mem[0]) { for (i = 0; i < bp->max_q + 1; i++) - bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem); + bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); kfree(ctx->tqm_mem[0]); ctx->tqm_mem[0] = NULL; } - bnxt_free_ring(bp, &ctx->stat_mem.ring_mem); - bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem); - bnxt_free_ring(bp, &ctx->cq_mem.ring_mem); - bnxt_free_ring(bp, &ctx->srq_mem.ring_mem); - bnxt_free_ring(bp, &ctx->qp_mem.ring_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); ctx->flags &= ~BNXT_CTX_FLAG_INITED; } @@ -6107,6 +6296,9 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; u32 mem_size, ena, entries; + u32 extra_srqs = 0; + u32 extra_qps = 0; + u8 pg_lvl = 1; int i, rc; rc = bnxt_hwrm_func_backing_store_qcaps(bp); @@ -6119,24 +6311,31 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) return 0; + if (bp->flags & BNXT_FLAG_ROCE_CAP) { + pg_lvl = 2; + extra_qps = 65536; + extra_srqs = 8192; + } + ctx_pg = &ctx->qp_mem; - ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; + ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + + extra_qps; mem_size = ctx->qp_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); if (rc) return rc; ctx_pg = &ctx->srq_mem; - ctx_pg->entries = ctx->srq_max_l2_entries; + ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; mem_size = ctx->srq_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); if (rc) return rc; ctx_pg = &ctx->cq_mem; - ctx_pg->entries = ctx->cq_max_l2_entries; + ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; mem_size = ctx->cq_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); if (rc) return rc; @@ -6144,26 +6343,47 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ctx_pg->entries = ctx->vnic_max_vnic_entries + ctx->vnic_max_ring_table_entries; mem_size = ctx->vnic_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); if (rc) return rc; ctx_pg = &ctx->stat_mem; ctx_pg->entries = ctx->stat_max_entries; mem_size = ctx->stat_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); + if (rc) + return rc; + + ena = 0; + if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) + goto skip_rdma; + + ctx_pg = &ctx->mrav_mem; + ctx_pg->entries = extra_qps * 4; + mem_size = ctx->mrav_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2); if (rc) return rc; + ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; - entries = ctx->qp_max_l2_entries; + ctx_pg = &ctx->tim_mem; + ctx_pg->entries = ctx->qp_mem.entries; + mem_size = ctx->tim_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); + if (rc) + return rc; + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; + +skip_rdma: + entries = ctx->qp_max_l2_entries + extra_qps; entries = roundup(entries, ctx->tqm_entries_multiple); entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, ctx->tqm_max_entries_per_ring); - for (i = 0, ena = 0; i < bp->max_q + 1; i++) { + for (i = 0; i < bp->max_q + 1; i++) { ctx_pg = ctx->tqm_mem[i]; ctx_pg->entries = entries; mem_size = ctx->tqm_entry_size * entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); if (rc) return rc; ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; @@ -6190,7 +6410,8 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) req.fid = cpu_to_le16(0xffff); mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); if (rc) { rc = -EIO; goto hwrm_func_resc_qcaps_exit; @@ -6220,7 +6441,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) if (bp->flags & BNXT_FLAG_CHIP_P5) { u16 max_msix = le16_to_cpu(resp->max_msix); - hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix); + hw_resc->max_nqs = max_msix; hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; } @@ -6442,6 +6663,13 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; + if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; + + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; + hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -6488,6 +6716,7 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) { struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; struct hwrm_port_qstats_ext_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; int rc; @@ -6510,6 +6739,34 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) bp->fw_rx_stats_ext_size = 0; bp->fw_tx_stats_ext_size = 0; } + if (bp->fw_tx_stats_ext_size <= + offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { + mutex_unlock(&bp->hwrm_cmd_lock); + bp->pri2cos_valid = 0; + return rc; + } + + bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); + req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + + rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); + if (!rc) { + struct hwrm_queue_pri2cos_qcfg_output *resp2; + u8 *pri2cos; + int i, j; + + resp2 = bp->hwrm_cmd_resp_addr; + pri2cos = &resp2->pri0_cos_queue_id; + for (i = 0; i < 8; i++) { + u8 queue_id = pri2cos[i]; + + for (j = 0; j < bp->max_q; j++) { + if (bp->q_ids[j] == queue_id) + bp->pri2cos[i] = j; + } + } + bp->pri2cos_valid = 1; + } mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -7034,17 +7291,12 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) return bp->hw_resc.max_stat_ctxs; } -void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) -{ - bp->hw_resc.max_stat_ctxs = max; -} - unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) { return bp->hw_resc.max_cp_rings; } -unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) +static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) { unsigned int cp = bp->hw_resc.max_cp_rings; @@ -7058,6 +7310,9 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + if (bp->flags & BNXT_FLAG_CHIP_P5) + return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); + return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); } @@ -7066,6 +7321,26 @@ static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) bp->hw_resc.max_irqs = max_irqs; } +unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) +{ + unsigned int cp; + + cp = bnxt_get_max_func_cp_rings_for_en(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5) + return cp - bp->rx_nr_rings - bp->tx_nr_rings; + else + return cp - bp->cp_nr_rings; +} + +unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) +{ + unsigned int stat; + + stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp); + stat -= bp->cp_nr_rings; + return stat; +} + int bnxt_get_avail_msix(struct bnxt *bp, int num) { int max_cp = bnxt_get_max_func_cp_rings(bp); @@ -7203,23 +7478,26 @@ static void bnxt_clear_int_mode(struct bnxt *bp) int bnxt_reserve_rings(struct bnxt *bp) { int tcs = netdev_get_num_tc(bp->dev); + bool reinit_irq = false; int rc; if (!bnxt_need_reserve_rings(bp)) return 0; - rc = __bnxt_reserve_rings(bp); - if (rc) { - netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc); - return rc; - } if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) { bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); - rc = bnxt_init_int_mode(bp); + reinit_irq = true; + } + rc = __bnxt_reserve_rings(bp); + if (reinit_irq) { + if (!rc) + rc = bnxt_init_int_mode(bp); bnxt_ulp_irq_restart(bp, rc); - if (rc) - return rc; + } + if (rc) { + netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); + return rc; } if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { netdev_err(bp->dev, "tx ring reservation failure\n"); @@ -7227,7 +7505,6 @@ int bnxt_reserve_rings(struct bnxt *bp) bp->tx_nr_rings_per_tc = bp->tx_nr_rings; return -ENOMEM; } - bp->num_stat_ctxs = bp->cp_nr_rings; return 0; } @@ -7821,6 +8098,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) rc = bnxt_hwrm_func_resc_qcaps(bp, true); hw_resc->resv_cp_rings = 0; + hw_resc->resv_stat_ctxs = 0; hw_resc->resv_irqs = 0; hw_resc->resv_tx_rings = 0; hw_resc->resv_rx_rings = 0; @@ -8260,6 +8538,9 @@ static bool bnxt_drv_busy(struct bnxt *bp) test_bit(BNXT_STATE_READ_STATS, &bp->state)); } +static void bnxt_get_ring_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats); + static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { @@ -8285,6 +8566,9 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, del_timer_sync(&bp->timer); bnxt_free_skbs(bp); + /* Save ring stats before shutdown */ + if (bp->bnapi) + bnxt_get_ring_stats(bp, &bp->net_stats_prev); if (irq_re_init) { bnxt_free_irq(bp); bnxt_del_napi(bp); @@ -8346,23 +8630,12 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -static void -bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +static void bnxt_get_ring_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats) { - u32 i; - struct bnxt *bp = netdev_priv(dev); + int i; - set_bit(BNXT_STATE_READ_STATS, &bp->state); - /* Make sure bnxt_close_nic() sees that we are reading stats before - * we check the BNXT_STATE_OPEN flag. - */ - smp_mb__after_atomic(); - if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { - clear_bit(BNXT_STATE_READ_STATS, &bp->state); - return; - } - /* TODO check if we need to synchronize with bnxt_close path */ for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -8391,6 +8664,40 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); } +} + +static void bnxt_add_prev_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats) +{ + struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; + + stats->rx_packets += prev_stats->rx_packets; + stats->tx_packets += prev_stats->tx_packets; + stats->rx_bytes += prev_stats->rx_bytes; + stats->tx_bytes += prev_stats->tx_bytes; + stats->rx_missed_errors += prev_stats->rx_missed_errors; + stats->multicast += prev_stats->multicast; + stats->tx_dropped += prev_stats->tx_dropped; +} + +static void +bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct bnxt *bp = netdev_priv(dev); + + set_bit(BNXT_STATE_READ_STATS, &bp->state); + /* Make sure bnxt_close_nic() sees that we are reading stats before + * we check the BNXT_STATE_OPEN flag. + */ + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_READ_STATS, &bp->state); + *stats = bp->net_stats_prev; + return; + } + + bnxt_get_ring_stats(bp, stats); + bnxt_add_prev_stats(bp, stats); if (bp->flags & BNXT_FLAG_PORT_STATS) { struct rx_port_stats *rx = bp->hw_rx_port_stats; @@ -8626,12 +8933,12 @@ static bool bnxt_rfs_capable(struct bnxt *bp) if (vnics == bp->hw_resc.resv_vnics) return true; - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics); + bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); if (vnics <= bp->hw_resc.resv_vnics) return true; netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1); + bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); return false; #else return false; @@ -9042,7 +9349,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int tx_xdp) { int max_rx, max_tx, tx_sets = 1; - int tx_rings_needed; + int tx_rings_needed, stats; int rx_rings = rx; int cp, vnics, rc; @@ -9067,10 +9374,13 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (bp->flags & BNXT_FLAG_AGG_RINGS) rx_rings <<= 1; cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; - if (BNXT_NEW_RM(bp)) + stats = cp; + if (BNXT_NEW_RM(bp)) { cp += bnxt_get_ulp_msix_num(bp); + stats += bnxt_get_ulp_stat_ctxs(bp); + } return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, - vnics); + stats, vnics); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -9106,7 +9416,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) * 1 coal_buf x bufs_per_record = 1 completion record. */ coal = &bp->rx_coal; - coal->coal_ticks = 14; + coal->coal_ticks = 10; coal->coal_bufs = 30; coal->coal_ticks_irq = 1; coal->coal_bufs_irq = 2; @@ -9294,7 +9604,6 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) bp->tx_nr_rings += bp->tx_nr_rings_xdp; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; - bp->num_stat_ctxs = bp->cp_nr_rings; if (netif_running(bp->dev)) return bnxt_open_nic(bp, true, false); @@ -9617,7 +9926,7 @@ static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, } static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, - u16 flags) + u16 flags, struct netlink_ext_ack *extack) { struct bnxt *bp = netdev_priv(dev); struct nlattr *attr, *br_spec; @@ -9760,6 +10069,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) kfree(bp->ctx); bp->ctx = NULL; bnxt_cleanup_pci(bp); + bnxt_free_port_stats(bp); free_netdev(dev); } @@ -9834,7 +10144,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - bnxt_get_ulp_msix_num(bp), - bnxt_get_max_func_stat_ctxs(bp)); + hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); if (!(bp->flags & BNXT_FLAG_CHIP_P5)) *max_cp = min_t(int, *max_cp, max_irq); max_ring_grps = hw_resc->max_hw_ring_grps; @@ -9965,7 +10275,6 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) netdev_warn(bp->dev, "2nd rings reservation failed.\n"); bp->tx_nr_rings_per_tc = bp->tx_nr_rings; } - bp->num_stat_ctxs = bp->cp_nr_rings; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bp->rx_nr_rings++; bp->cp_nr_rings++; @@ -10099,6 +10408,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; + if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { + rc = bnxt_alloc_kong_hwrm_resources(bp); + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; + } + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { rc = bnxt_alloc_hwrm_short_cmd_req(bp); |