diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 286 |
1 files changed, 190 insertions, 96 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4ec4934a4edd..b6f844cac80e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1531,7 +1531,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, if (TPA_START_IS_IPV6(tpa_start1)) tpa_info->gso_type = SKB_GSO_TCPV6; /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ - else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP && + else if (!BNXT_CHIP_P4_PLUS(bp) && TPA_START_HASH_TYPE(tpa_start) == 3) tpa_info->gso_type = SKB_GSO_TCPV6; tpa_info->rss_hash = @@ -2226,15 +2226,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { type = bnxt_rss_ext_op(bp, rxcmp); } else { - u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); + u32 itypes = RX_CMP_ITYPES(rxcmp); - /* RSS profiles 1 and 3 with extract code 0 for inner - * 4-tuple - */ - if (hash_type != 1 && hash_type != 3) - type = PKT_HASH_TYPE_L3; - else + if (itypes == RX_CMP_FLAGS_ITYPE_TCP || + itypes == RX_CMP_FLAGS_ITYPE_UDP) type = PKT_HASH_TYPE_L4; + else + type = PKT_HASH_TYPE_L3; } skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); } @@ -2899,6 +2897,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) return 0; } +static bool bnxt_vnic_is_active(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + + return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0; +} + static irqreturn_t bnxt_msix(int irq, void *dev_instance) { struct bnxt_napi *bnapi = dev_instance; @@ -3166,7 +3171,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget) break; } } - if (bp->flags & BNXT_FLAG_DIM) { + if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { struct dim_sample dim_sample = {}; dim_update_sample(cpr->event_ctr, @@ -3297,7 +3302,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) poll_done: cpr_rx = &cpr->cp_ring_arr[0]; if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && - (bp->flags & BNXT_FLAG_DIM)) { + (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { struct dim_sample dim_sample = {}; dim_update_sample(cpr->event_ctr, @@ -3421,15 +3426,11 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info } } -static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) +static void bnxt_free_one_tpa_info_data(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - struct bnxt_tpa_idx_map *map; int i; - if (!rxr->rx_tpa) - goto skip_rx_tpa_free; - for (i = 0; i < bp->max_tpa; i++) { struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; u8 *data = tpa_info->data; @@ -3440,6 +3441,17 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) tpa_info->data = NULL; page_pool_free_va(rxr->head_pool, data, false); } +} + +static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_tpa_idx_map *map; + + if (!rxr->rx_tpa) + goto skip_rx_tpa_free; + + bnxt_free_one_tpa_info_data(bp, rxr); skip_rx_tpa_free: if (!rxr->rx_buf_ring) @@ -3467,7 +3479,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) return; for (i = 0; i < bp->rx_nr_rings; i++) - bnxt_free_one_rx_ring_skbs(bp, i); + bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]); } static void bnxt_free_skbs(struct bnxt *bp) @@ -3608,29 +3620,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) return 0; } +static void bnxt_free_one_tpa_info(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + int i; + + kfree(rxr->rx_tpa_idx_map); + rxr->rx_tpa_idx_map = NULL; + if (rxr->rx_tpa) { + for (i = 0; i < bp->max_tpa; i++) { + kfree(rxr->rx_tpa[i].agg_arr); + rxr->rx_tpa[i].agg_arr = NULL; + } + } + kfree(rxr->rx_tpa); + rxr->rx_tpa = NULL; +} + static void bnxt_free_tpa_info(struct bnxt *bp) { - int i, j; + int i; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - kfree(rxr->rx_tpa_idx_map); - rxr->rx_tpa_idx_map = NULL; - if (rxr->rx_tpa) { - for (j = 0; j < bp->max_tpa; j++) { - kfree(rxr->rx_tpa[j].agg_arr); - rxr->rx_tpa[j].agg_arr = NULL; - } - } - kfree(rxr->rx_tpa); - rxr->rx_tpa = NULL; + bnxt_free_one_tpa_info(bp, rxr); } } +static int bnxt_alloc_one_tpa_info(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct rx_agg_cmp *agg; + int i; + + rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), + GFP_KERNEL); + if (!rxr->rx_tpa) + return -ENOMEM; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return 0; + for (i = 0; i < bp->max_tpa; i++) { + agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); + if (!agg) + return -ENOMEM; + rxr->rx_tpa[i].agg_arr = agg; + } + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), + GFP_KERNEL); + if (!rxr->rx_tpa_idx_map) + return -ENOMEM; + + return 0; +} + static int bnxt_alloc_tpa_info(struct bnxt *bp) { - int i, j; + int i, rc; bp->max_tpa = MAX_TPA; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { @@ -3641,25 +3688,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp) for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct rx_agg_cmp *agg; - rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), - GFP_KERNEL); - if (!rxr->rx_tpa) - return -ENOMEM; - - if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) - continue; - for (j = 0; j < bp->max_tpa; j++) { - agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); - if (!agg) - return -ENOMEM; - rxr->rx_tpa[j].agg_arr = agg; - } - rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), - GFP_KERNEL); - if (!rxr->rx_tpa_idx_map) - return -ENOMEM; + rc = bnxt_alloc_one_tpa_info(bp, rxr); + if (rc) + return rc; } return 0; } @@ -3683,7 +3715,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp) xdp_rxq_info_unreg(&rxr->xdp_rxq); page_pool_destroy(rxr->page_pool); - if (rxr->page_pool != rxr->head_pool) + if (bnxt_separate_head_pool()) page_pool_destroy(rxr->head_pool); rxr->page_pool = rxr->head_pool = NULL; @@ -3737,6 +3769,19 @@ err_destroy_pp: return PTR_ERR(pool); } +static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + u16 mem_size; + + rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; + mem_size = rxr->rx_agg_bmap_size / 8; + rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); + if (!rxr->rx_agg_bmap) + return -ENOMEM; + + return 0; +} + static int bnxt_alloc_rx_rings(struct bnxt *bp) { int numa_node = dev_to_node(&bp->pdev->dev); @@ -3781,19 +3826,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) ring->grp_idx = i; if (agg_rings) { - u16 mem_size; - ring = &rxr->rx_agg_ring_struct; rc = bnxt_alloc_ring(bp, &ring->ring_mem); if (rc) return rc; ring->grp_idx = i; - rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; - mem_size = rxr->rx_agg_bmap_size / 8; - rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); - if (!rxr->rx_agg_bmap) - return -ENOMEM; + rc = bnxt_alloc_rx_agg_bmap(bp, rxr); + if (rc) + return rc; } } if (bp->flags & BNXT_FLAG_TPA) @@ -4268,10 +4309,31 @@ static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp, rxr->rx_agg_prod = prod; } +static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + dma_addr_t mapping; + u8 *data; + int i; + + for (i = 0; i < bp->max_tpa; i++) { + data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, + GFP_KERNEL); + if (!data) + return -ENOMEM; + + rxr->rx_tpa[i].data = data; + rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; + rxr->rx_tpa[i].mapping = mapping; + } + + return 0; +} + static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - int i; + int rc; bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr); @@ -4281,19 +4343,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr); if (rxr->rx_tpa) { - dma_addr_t mapping; - u8 *data; - - for (i = 0; i < bp->max_tpa; i++) { - data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, - GFP_KERNEL); - if (!data) - return -ENOMEM; - - rxr->rx_tpa[i].data = data; - rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; - rxr->rx_tpa[i].mapping = mapping; - } + rc = bnxt_alloc_one_tpa_info_data(bp, rxr); + if (rc) + return rc; } return 0; } @@ -4656,7 +4708,7 @@ void bnxt_set_ring_params(struct bnxt *bp) /* Changing allocation mode of RX rings. * TODO: Update when extending xdp_rxq_info to support allocation modes. */ -int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) +static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) { struct net_device *dev = bp->dev; @@ -4677,15 +4729,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) bp->rx_skb_func = bnxt_rx_page_skb; } bp->rx_dir = DMA_BIDIRECTIONAL; - /* Disable LRO or GRO_HW */ - netdev_update_features(dev); } else { dev->max_mtu = bp->max_mtu; bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; bp->rx_dir = DMA_FROM_DEVICE; bp->rx_skb_func = bnxt_rx_skb; } - return 0; +} + +void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) +{ + __bnxt_set_rx_skb_mode(bp, page_mode); + + if (!page_mode) { + int rx, tx; + + bnxt_get_max_rings(bp, &rx, &tx, true); + if (rx > 1) { + bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features |= NETIF_F_LRO; + } + } + + /* Update LRO and GRO_HW availability */ + netdev_update_features(bp->dev); } static void bnxt_free_vnic_attributes(struct bnxt *bp) @@ -7221,6 +7288,26 @@ err_out: return rc; } +static void bnxt_cancel_dim(struct bnxt *bp) +{ + int i; + + /* DIM work is initialized in bnxt_enable_napi(). Proceed only + * if NAPI is enabled. + */ + if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) + return; + + /* Make sure NAPI sees that the VNIC is disabled */ + synchronize_net(); + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_napi *bnapi = rxr->bnapi; + + cancel_work_sync(&bnapi->cp_ring.dim.work); + } +} + static int hwrm_ring_free_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, int cmpl_ring_id) @@ -7321,6 +7408,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } + bnxt_cancel_dim(bp); for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path); @@ -8320,7 +8408,7 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; int n = 1; - if (!ctxm->max_entries) + if (!ctxm->max_entries || ctxm->pg_info) continue; if (ctxm->instance_bmap) @@ -8924,8 +9012,8 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) continue; } bnxt_bs_trace_init(bp, ctxm); - last_type = type; } + last_type = type; } if (last_type == BNXT_CTX_INV) { @@ -11264,8 +11352,6 @@ static void bnxt_disable_napi(struct bnxt *bp) if (bnapi->in_reset) cpr->sw_stats->rx.rx_resets++; napi_disable(&bnapi->napi); - if (bnapi->rx_ring) - cancel_work_sync(&cpr->dim.work); } } @@ -13663,7 +13749,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) bnxt_reset_task(bp, true); break; } - bnxt_free_one_rx_ring_skbs(bp, i); + bnxt_free_one_rx_ring_skbs(bp, rxr); rxr->rx_prod = 0; rxr->rx_agg_prod = 0; rxr->rx_sw_agg_prod = 0; @@ -15293,19 +15379,6 @@ static const struct netdev_stat_ops bnxt_stat_ops = { .get_base_stats = bnxt_get_base_stats, }; -static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) -{ - u16 mem_size; - - rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; - mem_size = rxr->rx_agg_bmap_size / 8; - rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); - if (!rxr->rx_agg_bmap) - return -ENOMEM; - - return 0; -} - static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) { struct bnxt_rx_ring_info *rxr, *clone; @@ -15354,15 +15427,25 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) goto err_free_rx_agg_ring; } + if (bp->flags & BNXT_FLAG_TPA) { + rc = bnxt_alloc_one_tpa_info(bp, clone); + if (rc) + goto err_free_tpa_info; + } + bnxt_init_one_rx_ring_rxbd(bp, clone); bnxt_init_one_rx_agg_ring_rxbd(bp, clone); bnxt_alloc_one_rx_ring_skb(bp, clone, idx); if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_alloc_one_rx_ring_page(bp, clone, idx); + if (bp->flags & BNXT_FLAG_TPA) + bnxt_alloc_one_tpa_info_data(bp, clone); return 0; +err_free_tpa_info: + bnxt_free_one_tpa_info(bp, clone); err_free_rx_agg_ring: bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem); err_free_rx_ring: @@ -15370,9 +15453,11 @@ err_free_rx_ring: err_rxq_info_unreg: xdp_rxq_info_unreg(&clone->xdp_rxq); err_page_pool_destroy: - clone->page_pool->p.napi = NULL; page_pool_destroy(clone->page_pool); + if (bnxt_separate_head_pool()) + page_pool_destroy(clone->head_pool); clone->page_pool = NULL; + clone->head_pool = NULL; return rc; } @@ -15382,13 +15467,15 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) struct bnxt *bp = netdev_priv(dev); struct bnxt_ring_struct *ring; - bnxt_free_one_rx_ring(bp, rxr); - bnxt_free_one_rx_agg_ring(bp, rxr); + bnxt_free_one_rx_ring_skbs(bp, rxr); xdp_rxq_info_unreg(&rxr->xdp_rxq); page_pool_destroy(rxr->page_pool); + if (bnxt_separate_head_pool()) + page_pool_destroy(rxr->head_pool); rxr->page_pool = NULL; + rxr->head_pool = NULL; ring = &rxr->rx_ring_struct; bnxt_free_ring(bp, &ring->ring_mem); @@ -15470,7 +15557,10 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) rxr->rx_agg_prod = clone->rx_agg_prod; rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod; rxr->rx_next_cons = clone->rx_next_cons; + rxr->rx_tpa = clone->rx_tpa; + rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map; rxr->page_pool = clone->page_pool; + rxr->head_pool = clone->head_pool; rxr->xdp_rxq = clone->xdp_rxq; bnxt_copy_rx_ring(bp, rxr, clone); @@ -15523,12 +15613,16 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) bnxt_hwrm_vnic_update(bp, vnic, VNIC_UPDATE_REQ_ENABLES_MRU_VALID); } - + /* Make sure NAPI sees that the VNIC is disabled */ + synchronize_net(); rxr = &bp->rx_ring[idx]; + cancel_work_sync(&rxr->bnapi->cp_ring.dim.work); bnxt_hwrm_rx_ring_free(bp, rxr, false); bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); rxr->rx_next_cons = 0; page_pool_disable_direct_recycling(rxr->page_pool); + if (bnxt_separate_head_pool()) + page_pool_disable_direct_recycling(rxr->head_pool); memcpy(qmem, rxr, sizeof(*rxr)); bnxt_init_rx_ring_struct(bp, qmem); @@ -16135,7 +16229,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (bp->max_fltr < BNXT_MAX_FLTR) bp->max_fltr = BNXT_MAX_FLTR; bnxt_init_l2_fltr_tbl(bp); - bnxt_set_rx_skb_mode(bp, false); + __bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); bnxt_rdma_aux_device_init(bp); |