diff options
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4')
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 15 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 57 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 54 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 204 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 175 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 15 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 58 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sge.c | 32 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 8 |
13 files changed, 502 insertions, 148 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 9cb8b229c1b3..3352dad6ca99 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -146,6 +146,11 @@ enum { CXGB4_ETHTOOL_FLASH_BOOTCFG = 4 }; +enum cxgb4_netdev_tls_ops { + CXGB4_TLSDEV_OPS = 1, + CXGB4_XFRMDEV_OPS +}; + struct cxgb4_bootcfg_data { __le16 signature; __u8 reserved[2]; @@ -1196,6 +1201,12 @@ struct adapter { struct cxgb4_tc_u32_table *tc_u32; struct chcr_ktls chcr_ktls; struct chcr_stats_debug chcr_stats; +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) + struct ch_ktls_stats_debug ch_ktls_stats; +#endif +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) + struct ch_ipsec_stats_debug ch_ipsec_stats; +#endif /* TC flower offload */ bool tc_flower_initialized; @@ -2100,7 +2111,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q, void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq, u32 ndesc); int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc); -void cxgb4_ethofld_restart(unsigned long data); +void cxgb4_ethofld_restart(struct tasklet_struct *t); int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *si); void free_txq(struct adapter *adap, struct sge_txq *q); @@ -2169,7 +2180,7 @@ void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q); void cxgb4_quiesce_rx(struct sge_rspq *q); int cxgb4_port_mirror_alloc(struct net_device *dev); void cxgb4_port_mirror_free(struct net_device *dev); -#ifdef CONFIG_CHELSIO_TLS_DEVICE +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) int cxgb4_set_ktls_feature(struct adapter *adap, bool enable); #endif #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 05f33b7e3677..0273f40b85f7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -3527,6 +3527,10 @@ DEFINE_SHOW_ATTRIBUTE(meminfo); static int chcr_stats_show(struct seq_file *seq, void *v) { +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) + struct ch_ktls_port_stats_debug *ktls_port; + int i = 0; +#endif struct adapter *adap = seq->private; seq_puts(seq, "Chelsio Crypto Accelerator Stats \n"); @@ -3542,52 +3546,45 @@ static int chcr_stats_show(struct seq_file *seq, void *v) atomic_read(&adap->chcr_stats.error)); seq_printf(seq, "Fallback: %10u \n", atomic_read(&adap->chcr_stats.fallback)); - seq_printf(seq, "IPSec PDU: %10u\n", - atomic_read(&adap->chcr_stats.ipsec_cnt)); seq_printf(seq, "TLS PDU Tx: %10u\n", atomic_read(&adap->chcr_stats.tls_pdu_tx)); seq_printf(seq, "TLS PDU Rx: %10u\n", atomic_read(&adap->chcr_stats.tls_pdu_rx)); seq_printf(seq, "TLS Keys (DDR) Count: %10u\n", atomic_read(&adap->chcr_stats.tls_key)); -#ifdef CONFIG_CHELSIO_TLS_DEVICE +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) + seq_puts(seq, "\nChelsio Inline IPsec Crypto Accelerator Stats\n"); + seq_printf(seq, "IPSec PDU: %10u\n", + atomic_read(&adap->ch_ipsec_stats.ipsec_cnt)); +#endif +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n"); seq_printf(seq, "Tx TLS offload refcount: %20u\n", refcount_read(&adap->chcr_ktls.ktls_refcount)); - seq_printf(seq, "Tx HW offload contexts added: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_ctx)); - seq_printf(seq, "Tx connection created: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_connection_open)); - seq_printf(seq, "Tx connection failed: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_connection_fail)); - seq_printf(seq, "Tx connection closed: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_connection_close)); - seq_printf(seq, "Packets passed for encryption : %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_packets)); - seq_printf(seq, "Bytes passed for encryption : %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_bytes)); seq_printf(seq, "Tx records send: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_send_records)); + atomic64_read(&adap->ch_ktls_stats.ktls_tx_send_records)); seq_printf(seq, "Tx partial start of records: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_start_pkts)); + atomic64_read(&adap->ch_ktls_stats.ktls_tx_start_pkts)); seq_printf(seq, "Tx partial middle of records: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_middle_pkts)); + atomic64_read(&adap->ch_ktls_stats.ktls_tx_middle_pkts)); seq_printf(seq, "Tx partial end of record: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_end_pkts)); + atomic64_read(&adap->ch_ktls_stats.ktls_tx_end_pkts)); seq_printf(seq, "Tx complete records: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_complete_pkts)); + atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts)); seq_printf(seq, "TX trim pkts : %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_trimmed_pkts)); - seq_printf(seq, "Tx out of order packets: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_ooo)); - seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_skip_no_sync_data)); - seq_printf(seq, "Tx drop not synced packets: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_drop_no_sync_data)); - seq_printf(seq, "Tx drop bypass req: %20llu\n", - atomic64_read(&adap->chcr_stats.ktls_tx_drop_bypass_req)); + atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts)); + while (i < MAX_NPORTS) { + ktls_port = &adap->ch_ktls_stats.ktls_port[i]; + seq_printf(seq, "Port %d\n", i); + seq_printf(seq, "Tx connection created: %20llu\n", + atomic64_read(&ktls_port->ktls_tx_connection_open)); + seq_printf(seq, "Tx connection failed: %20llu\n", + atomic64_read(&ktls_port->ktls_tx_connection_fail)); + seq_printf(seq, "Tx connection closed: %20llu\n", + atomic64_read(&ktls_port->ktls_tx_connection_close)); + i++; + } #endif - return 0; } DEFINE_SHOW_ATTRIBUTE(chcr_stats); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 9f3173f86eed..61ea3ec5c3fc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -117,15 +117,7 @@ static const char stats_strings[][ETH_GSTRING_LEN] = { "vlan_insertions ", "gro_packets ", "gro_merged ", -}; - -static char adapter_stats_strings[][ETH_GSTRING_LEN] = { - "db_drop ", - "db_full ", - "db_empty ", - "write_coal_success ", - "write_coal_fail ", -#ifdef CONFIG_CHELSIO_TLS_DEVICE +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) "tx_tls_encrypted_packets", "tx_tls_encrypted_bytes ", "tx_tls_ctx ", @@ -136,6 +128,14 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = { #endif }; +static char adapter_stats_strings[][ETH_GSTRING_LEN] = { + "db_drop ", + "db_full ", + "db_empty ", + "write_coal_success ", + "write_coal_fail ", +}; + static char loopback_stats_strings[][ETH_GSTRING_LEN] = { "-------Loopback----------- ", "octets_ok ", @@ -257,15 +257,7 @@ struct queue_port_stats { u64 vlan_ins; u64 gro_pkts; u64 gro_merged; -}; - -struct adapter_stats { - u64 db_drop; - u64 db_full; - u64 db_empty; - u64 wc_success; - u64 wc_fail; -#ifdef CONFIG_CHELSIO_TLS_DEVICE +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) u64 tx_tls_encrypted_packets; u64 tx_tls_encrypted_bytes; u64 tx_tls_ctx; @@ -276,12 +268,23 @@ struct adapter_stats { #endif }; +struct adapter_stats { + u64 db_drop; + u64 db_full; + u64 db_empty; + u64 wc_success; + u64 wc_fail; +}; + static void collect_sge_port_stats(const struct adapter *adap, const struct port_info *p, struct queue_port_stats *s) { const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) + const struct ch_ktls_port_stats_debug *ktls_stats; +#endif struct sge_eohw_txq *eohw_tx; unsigned int i; @@ -306,6 +309,21 @@ static void collect_sge_port_stats(const struct adapter *adap, s->vlan_ins += eohw_tx->vlan_ins; } } +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) + ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id]; + s->tx_tls_encrypted_packets = + atomic64_read(&ktls_stats->ktls_tx_encrypted_packets); + s->tx_tls_encrypted_bytes = + atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes); + s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx); + s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo); + s->tx_tls_skip_no_sync_data = + atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data); + s->tx_tls_drop_no_sync_data = + atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data); + s->tx_tls_drop_bypass_req = + atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req); +#endif } static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 481498585ead..6ec5f2f26f05 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -604,17 +604,14 @@ int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en, /* If the new rule wants to get inserted into * HPFILTER region, but its prio is greater * than the rule with the highest prio in HASH - * region, then reject the rule. - */ - if (t->tc_hash_tids_max_prio && - tc_prio > t->tc_hash_tids_max_prio) - break; - - /* If there's not enough slots available - * in HPFILTER region, then move on to - * normal FILTER region immediately. + * region, or if there's not enough slots + * available in HPFILTER region, then skip + * trying to insert this rule into HPFILTER + * region and directly go to the next region. */ - if (ftid + n > t->nhpftids) { + if ((t->tc_hash_tids_max_prio && + tc_prio > t->tc_hash_tids_max_prio) || + (ftid + n) > t->nhpftids) { ftid = t->nhpftids; continue; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index de078a5bf23e..a952fe198eb9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -66,7 +66,7 @@ #include <linux/crash_dump.h> #include <net/udp_tunnel.h> #include <net/xfrm.h> -#if defined(CONFIG_CHELSIO_TLS_DEVICE) +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) #include <net/tls.h> #endif @@ -6396,7 +6396,50 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) } #endif /* CONFIG_PCI_IOV */ -#if defined(CONFIG_CHELSIO_TLS_DEVICE) +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) + +static int chcr_offload_state(struct adapter *adap, + enum cxgb4_netdev_tls_ops op_val) +{ + switch (op_val) { +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) + case CXGB4_TLSDEV_OPS: + if (!adap->uld[CXGB4_ULD_KTLS].handle) { + dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n"); + return -EOPNOTSUPP; + } + if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) { + dev_dbg(adap->pdev_dev, + "ch_ktls driver has no registered tlsdev_ops\n"); + return -EOPNOTSUPP; + } + break; +#endif /* CONFIG_CHELSIO_TLS_DEVICE */ +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) + case CXGB4_XFRMDEV_OPS: + if (!adap->uld[CXGB4_ULD_IPSEC].handle) { + dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n"); + return -EOPNOTSUPP; + } + if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) { + dev_dbg(adap->pdev_dev, + "chipsec driver has no registered xfrmdev_ops\n"); + return -EOPNOTSUPP; + } + break; +#endif /* CONFIG_CHELSIO_IPSEC_INLINE */ + default: + dev_dbg(adap->pdev_dev, + "driver has no support for offload %d\n", op_val); + return -EOPNOTSUPP; + } + + return 0; +} + +#endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */ + +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk, enum tls_offload_ctx_dir direction, @@ -6404,30 +6447,21 @@ static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk, u32 tcp_sn) { struct adapter *adap = netdev2adap(netdev); - int ret = 0; + int ret; mutex_lock(&uld_mutex); - if (!adap->uld[CXGB4_ULD_CRYPTO].handle) { - dev_err(adap->pdev_dev, "chcr driver is not loaded\n"); - ret = -EOPNOTSUPP; - goto out_unlock; - } - - if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) { - dev_err(adap->pdev_dev, - "chcr driver has no registered tlsdev_ops()\n"); - ret = -EOPNOTSUPP; + ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS); + if (ret) goto out_unlock; - } ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE); if (ret) goto out_unlock; - ret = adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_add(netdev, sk, - direction, - crypto_info, - tcp_sn); + ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk, + direction, + crypto_info, + tcp_sn); /* if there is a failure, clear the refcount */ if (ret) cxgb4_set_ktls_feature(adap, @@ -6444,19 +6478,11 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev, struct adapter *adap = netdev2adap(netdev); mutex_lock(&uld_mutex); - if (!adap->uld[CXGB4_ULD_CRYPTO].handle) { - dev_err(adap->pdev_dev, "chcr driver is not loaded\n"); + if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS)) goto out_unlock; - } - if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) { - dev_err(adap->pdev_dev, - "chcr driver has no registered tlsdev_ops\n"); - goto out_unlock; - } - - adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx, - direction); + adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx, + direction); cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE); out_unlock: @@ -6469,6 +6495,114 @@ static const struct tlsdev_ops cxgb4_ktls_ops = { }; #endif /* CONFIG_CHELSIO_TLS_DEVICE */ +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) + +static int cxgb4_xfrm_add_state(struct xfrm_state *x) +{ + struct adapter *adap = netdev2adap(x->xso.dev); + int ret; + + if (!mutex_trylock(&uld_mutex)) { + dev_dbg(adap->pdev_dev, + "crypto uld critical resource is under use\n"); + return -EBUSY; + } + ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS); + if (ret) + goto out_unlock; + + ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x); + +out_unlock: + mutex_unlock(&uld_mutex); + + return ret; +} + +static void cxgb4_xfrm_del_state(struct xfrm_state *x) +{ + struct adapter *adap = netdev2adap(x->xso.dev); + + if (!mutex_trylock(&uld_mutex)) { + dev_dbg(adap->pdev_dev, + "crypto uld critical resource is under use\n"); + return; + } + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) + goto out_unlock; + + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); + +out_unlock: + mutex_unlock(&uld_mutex); +} + +static void cxgb4_xfrm_free_state(struct xfrm_state *x) +{ + struct adapter *adap = netdev2adap(x->xso.dev); + + if (!mutex_trylock(&uld_mutex)) { + dev_dbg(adap->pdev_dev, + "crypto uld critical resource is under use\n"); + return; + } + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) + goto out_unlock; + + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); + +out_unlock: + mutex_unlock(&uld_mutex); +} + +static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) +{ + struct adapter *adap = netdev2adap(x->xso.dev); + bool ret = false; + + if (!mutex_trylock(&uld_mutex)) { + dev_dbg(adap->pdev_dev, + "crypto uld critical resource is under use\n"); + return ret; + } + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) + goto out_unlock; + + ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); + +out_unlock: + mutex_unlock(&uld_mutex); + return ret; +} + +static void cxgb4_advance_esn_state(struct xfrm_state *x) +{ + struct adapter *adap = netdev2adap(x->xso.dev); + + if (!mutex_trylock(&uld_mutex)) { + dev_dbg(adap->pdev_dev, + "crypto uld critical resource is under use\n"); + return; + } + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) + goto out_unlock; + + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x); + +out_unlock: + mutex_unlock(&uld_mutex); +} + +static const struct xfrmdev_ops cxgb4_xfrmdev_ops = { + .xdo_dev_state_add = cxgb4_xfrm_add_state, + .xdo_dev_state_delete = cxgb4_xfrm_del_state, + .xdo_dev_state_free = cxgb4_xfrm_free_state, + .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok, + .xdo_dev_state_advance_esn = cxgb4_advance_esn_state, +}; + +#endif /* CONFIG_CHELSIO_IPSEC_INLINE */ + static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; @@ -6721,14 +6855,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_HIGHDMA; netdev->features |= netdev->hw_features; netdev->vlan_features = netdev->features & VLAN_FEAT; -#if defined(CONFIG_CHELSIO_TLS_DEVICE) +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) { netdev->hw_features |= NETIF_F_HW_TLS_TX; netdev->tlsdev_ops = &cxgb4_ktls_ops; /* initialize the refcount */ refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0); } -#endif +#endif /* CONFIG_CHELSIO_TLS_DEVICE */ +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) + if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) { + netdev->hw_enc_features |= NETIF_F_HW_ESP; + netdev->features |= NETIF_F_HW_ESP; + netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops; + } +#endif /* CONFIG_CHELSIO_IPSEC_INLINE */ + netdev->priv_flags |= IFF_UNICAST_FLT; /* MTU range: 81 - 9600 */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index f642c1b475c4..1b88bd1c2dbe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = { PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), }; +static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = { + /* Default supported NAT modes */ + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_NONE, + .natmode = NAT_MODE_NONE, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP, + .natmode = NAT_MODE_DIP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT, + .natmode = NAT_MODE_DIP_DP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT | + CXGB4_ACTION_NATMODE_SIP, + .natmode = NAT_MODE_DIP_DP_SIP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT | + CXGB4_ACTION_NATMODE_SPORT, + .natmode = NAT_MODE_DIP_DP_SP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT, + .natmode = NAT_MODE_SIP_SP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP | + CXGB4_ACTION_NATMODE_SPORT, + .natmode = NAT_MODE_DIP_SIP_SP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP | + CXGB4_ACTION_NATMODE_DPORT | + CXGB4_ACTION_NATMODE_SPORT, + .natmode = NAT_MODE_ALL, + }, + /* T6+ can ignore L4 ports when they're disabled. */ + { + .chip = CHELSIO_T6, + .flags = CXGB4_ACTION_NATMODE_SIP, + .natmode = NAT_MODE_SIP_SP, + }, + { + .chip = CHELSIO_T6, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT, + .natmode = NAT_MODE_DIP_DP_SP, + }, + { + .chip = CHELSIO_T6, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP, + .natmode = NAT_MODE_ALL, + }, +}; + +static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs, + u8 natmode_flags) +{ + u8 i = 0; + + /* Translate the enabled NAT 4-tuple fields to one of the + * hardware supported NAT mode configurations. This ensures + * that we pick a valid combination, where the disabled fields + * do not get overwritten to 0. + */ + for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) { + if (cxgb4_natmode_config_array[i].flags == natmode_flags) { + fs->nat_mode = cxgb4_natmode_config_array[i].natmode; + return; + } + } +} + static struct ch_tc_flower_entry *allocate_flower_entry(void) { struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); @@ -289,7 +372,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask, } static void process_pedit_field(struct ch_filter_specification *fs, u32 val, - u32 mask, u32 offset, u8 htype) + u32 mask, u32 offset, u8 htype, + u8 *natmode_flags) { switch (htype) { case FLOW_ACT_MANGLE_HDR_TYPE_ETH: @@ -314,60 +398,94 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, switch (offset) { case PEDIT_IP4_SRC: offload_pedit(fs, val, mask, IP4_SRC); + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; break; case PEDIT_IP4_DST: offload_pedit(fs, val, mask, IP4_DST); + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; } - fs->nat_mode = NAT_MODE_ALL; break; case FLOW_ACT_MANGLE_HDR_TYPE_IP6: switch (offset) { case PEDIT_IP6_SRC_31_0: offload_pedit(fs, val, mask, IP6_SRC_31_0); + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; break; case PEDIT_IP6_SRC_63_32: offload_pedit(fs, val, mask, IP6_SRC_63_32); + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; break; case PEDIT_IP6_SRC_95_64: offload_pedit(fs, val, mask, IP6_SRC_95_64); + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; break; case PEDIT_IP6_SRC_127_96: offload_pedit(fs, val, mask, IP6_SRC_127_96); + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; break; case PEDIT_IP6_DST_31_0: offload_pedit(fs, val, mask, IP6_DST_31_0); + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; case PEDIT_IP6_DST_63_32: offload_pedit(fs, val, mask, IP6_DST_63_32); + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; case PEDIT_IP6_DST_95_64: offload_pedit(fs, val, mask, IP6_DST_95_64); + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; case PEDIT_IP6_DST_127_96: offload_pedit(fs, val, mask, IP6_DST_127_96); + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; } - fs->nat_mode = NAT_MODE_ALL; break; case FLOW_ACT_MANGLE_HDR_TYPE_TCP: switch (offset) { case PEDIT_TCP_SPORT_DPORT: - if (~mask & PEDIT_TCP_UDP_SPORT_MASK) + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) { fs->nat_fport = val; - else + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; + } else { fs->nat_lport = val >> 16; + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; + } } - fs->nat_mode = NAT_MODE_ALL; break; case FLOW_ACT_MANGLE_HDR_TYPE_UDP: switch (offset) { case PEDIT_UDP_SPORT_DPORT: - if (~mask & PEDIT_TCP_UDP_SPORT_MASK) + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) { fs->nat_fport = val; - else + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; + } else { fs->nat_lport = val >> 16; + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; + } } - fs->nat_mode = NAT_MODE_ALL; + break; + } +} + +static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags, + struct netlink_ext_ack *extack) +{ + u8 i = 0; + + /* Extract the NAT mode to enable based on what 4-tuple fields + * are enabled to be overwritten. This ensures that the + * disabled fields don't get overwritten to 0. + */ + for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) { + const struct cxgb4_natmode_config *c; + + c = &cxgb4_natmode_config_array[i]; + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip && + natmode_flags == c->flags) + return 0; } + NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination"); + return -EOPNOTSUPP; } void cxgb4_process_flow_actions(struct net_device *in, @@ -375,6 +493,7 @@ void cxgb4_process_flow_actions(struct net_device *in, struct ch_filter_specification *fs) { struct flow_action_entry *act; + u8 natmode_flags = 0; int i; flow_action_for_each(i, act, actions) { @@ -426,7 +545,8 @@ void cxgb4_process_flow_actions(struct net_device *in, val = act->mangle.val; offset = act->mangle.offset; - process_pedit_field(fs, val, mask, offset, htype); + process_pedit_field(fs, val, mask, offset, htype, + &natmode_flags); } break; case FLOW_ACTION_QUEUE: @@ -438,6 +558,9 @@ void cxgb4_process_flow_actions(struct net_device *in, break; } } + if (natmode_flags) + cxgb4_action_natmode_tweak(fs, natmode_flags); + } static bool valid_l4_mask(u32 mask) @@ -454,7 +577,8 @@ static bool valid_l4_mask(u32 mask) } static bool valid_pedit_action(struct net_device *dev, - const struct flow_action_entry *act) + const struct flow_action_entry *act, + u8 *natmode_flags) { u32 mask, offset; u8 htype; @@ -479,7 +603,10 @@ static bool valid_pedit_action(struct net_device *dev, case FLOW_ACT_MANGLE_HDR_TYPE_IP4: switch (offset) { case PEDIT_IP4_SRC: + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; + break; case PEDIT_IP4_DST: + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; default: netdev_err(dev, "%s: Unsupported pedit field\n", @@ -493,10 +620,13 @@ static bool valid_pedit_action(struct net_device *dev, case PEDIT_IP6_SRC_63_32: case PEDIT_IP6_SRC_95_64: case PEDIT_IP6_SRC_127_96: + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; + break; case PEDIT_IP6_DST_31_0: case PEDIT_IP6_DST_63_32: case PEDIT_IP6_DST_95_64: case PEDIT_IP6_DST_127_96: + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; default: netdev_err(dev, "%s: Unsupported pedit field\n", @@ -512,6 +642,10 @@ static bool valid_pedit_action(struct net_device *dev, __func__); return false; } + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; + else + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; break; default: netdev_err(dev, "%s: Unsupported pedit field\n", @@ -527,6 +661,10 @@ static bool valid_pedit_action(struct net_device *dev, __func__); return false; } + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; + else + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; break; default: netdev_err(dev, "%s: Unsupported pedit field\n", @@ -546,10 +684,12 @@ int cxgb4_validate_flow_actions(struct net_device *dev, struct netlink_ext_ack *extack, u8 matchall_filter) { + struct adapter *adap = netdev2adap(dev); struct flow_action_entry *act; bool act_redir = false; bool act_pedit = false; bool act_vlan = false; + u8 natmode_flags = 0; int i; if (!flow_action_basic_hw_stats_check(actions, extack)) @@ -563,7 +703,6 @@ int cxgb4_validate_flow_actions(struct net_device *dev, break; case FLOW_ACTION_MIRRED: case FLOW_ACTION_REDIRECT: { - struct adapter *adap = netdev2adap(dev); struct net_device *n_dev, *target_dev; bool found = false; unsigned int i; @@ -620,7 +759,8 @@ int cxgb4_validate_flow_actions(struct net_device *dev, } break; case FLOW_ACTION_MANGLE: { - bool pedit_valid = valid_pedit_action(dev, act); + bool pedit_valid = valid_pedit_action(dev, act, + &natmode_flags); if (!pedit_valid) return -EOPNOTSUPP; @@ -642,6 +782,15 @@ int cxgb4_validate_flow_actions(struct net_device *dev, return -EINVAL; } + if (act_pedit) { + int ret; + + ret = cxgb4_action_natmode_validate(adap, natmode_flags, + extack); + if (ret) + return ret; + } + return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index 6296e1d5a12b..3a2fa00c8cde 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -108,6 +108,21 @@ struct ch_tc_pedit_fields { #define PEDIT_TCP_SPORT_DPORT 0x0 #define PEDIT_UDP_SPORT_DPORT 0x0 +enum cxgb4_action_natmode_flags { + CXGB4_ACTION_NATMODE_NONE = 0, + CXGB4_ACTION_NATMODE_DIP = (1 << 0), + CXGB4_ACTION_NATMODE_SIP = (1 << 1), + CXGB4_ACTION_NATMODE_DPORT = (1 << 2), + CXGB4_ACTION_NATMODE_SPORT = (1 << 3), +}; + +/* TC PEDIT action to NATMODE translation entry */ +struct cxgb4_natmode_config { + enum chip_type chip; + u8 flags; + u8 natmode; +}; + void cxgb4_process_flow_actions(struct net_device *in, struct flow_action *actions, struct ch_filter_specification *fs); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c index ae7123a9de8e..6c259de96f96 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c @@ -114,8 +114,7 @@ static int cxgb4_init_eosw_txq(struct net_device *dev, eosw_txq->cred = adap->params.ofldq_wr_cred; eosw_txq->hwqid = hwqid; eosw_txq->netdev = dev; - tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart, - (unsigned long)eosw_txq); + tasklet_setup(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 08439e215efe..743af9e654aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -663,7 +663,7 @@ static int uld_attach(struct adapter *adap, unsigned int uld) return 0; } -#ifdef CONFIG_CHELSIO_TLS_DEVICE +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) static bool cxgb4_uld_in_use(struct adapter *adap) { const struct tid_info *t = &adap->tids; @@ -690,8 +690,8 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable) * ULD is/are already active, return failure. */ if (cxgb4_uld_in_use(adap)) { - dev_warn(adap->pdev_dev, - "ULD connections (tid/stid) active. Can't enable kTLS\n"); + dev_dbg(adap->pdev_dev, + "ULD connections (tid/stid) active. Can't enable kTLS\n"); return -EINVAL; } ret = t4_set_params(adap, adap->mbox, adap->pf, @@ -699,7 +699,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable) if (ret) return ret; refcount_set(&adap->chcr_ktls.ktls_refcount, 1); - pr_info("kTLS has been enabled. Restrictions placed on ULD support\n"); + pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n"); } else { /* ktls settings already up, just increment refcount. */ refcount_inc(&adap->chcr_ktls.ktls_refcount); @@ -716,7 +716,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable) 0, 1, ¶ms, ¶ms); if (ret) return ret; - pr_info("kTLS is disabled. Restrictions on ULD support removed\n"); + pr_debug("kTLS is disabled. Restrictions on ULD support removed\n"); } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index a963fd0b4540..b169776ab484 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -40,9 +40,11 @@ #include <linux/skbuff.h> #include <linux/inetdevice.h> #include <linux/atomic.h> +#include <net/tls.h> #include "cxgb4.h" #define MAX_ULD_QSETS 16 +#define MAX_ULD_NPORTS 4 /* CPL message priority levels */ enum { @@ -302,7 +304,9 @@ enum cxgb4_uld { CXGB4_ULD_ISCSI, CXGB4_ULD_ISCSIT, CXGB4_ULD_CRYPTO, + CXGB4_ULD_IPSEC, CXGB4_ULD_TLS, + CXGB4_ULD_KTLS, CXGB4_ULD_MAX }; @@ -361,28 +365,11 @@ struct cxgb4_virt_res { /* virtualized HW resources */ struct cxgb4_range ppod_edram; }; -struct chcr_stats_debug { - atomic_t cipher_rqst; - atomic_t digest_rqst; - atomic_t aead_rqst; - atomic_t complete; - atomic_t error; - atomic_t fallback; - atomic_t ipsec_cnt; - atomic_t tls_pdu_tx; - atomic_t tls_pdu_rx; - atomic_t tls_key; -#ifdef CONFIG_CHELSIO_TLS_DEVICE +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) +struct ch_ktls_port_stats_debug { atomic64_t ktls_tx_connection_open; atomic64_t ktls_tx_connection_fail; atomic64_t ktls_tx_connection_close; - atomic64_t ktls_tx_send_records; - atomic64_t ktls_tx_end_pkts; - atomic64_t ktls_tx_start_pkts; - atomic64_t ktls_tx_middle_pkts; - atomic64_t ktls_tx_retransmit_pkts; - atomic64_t ktls_tx_complete_pkts; - atomic64_t ktls_tx_trimmed_pkts; atomic64_t ktls_tx_encrypted_packets; atomic64_t ktls_tx_encrypted_bytes; atomic64_t ktls_tx_ctx; @@ -390,10 +377,38 @@ struct chcr_stats_debug { atomic64_t ktls_tx_skip_no_sync_data; atomic64_t ktls_tx_drop_no_sync_data; atomic64_t ktls_tx_drop_bypass_req; +}; +struct ch_ktls_stats_debug { + struct ch_ktls_port_stats_debug ktls_port[MAX_ULD_NPORTS]; + atomic64_t ktls_tx_send_records; + atomic64_t ktls_tx_end_pkts; + atomic64_t ktls_tx_start_pkts; + atomic64_t ktls_tx_middle_pkts; + atomic64_t ktls_tx_retransmit_pkts; + atomic64_t ktls_tx_complete_pkts; + atomic64_t ktls_tx_trimmed_pkts; +}; #endif + +struct chcr_stats_debug { + atomic_t cipher_rqst; + atomic_t digest_rqst; + atomic_t aead_rqst; + atomic_t complete; + atomic_t error; + atomic_t fallback; + atomic_t tls_pdu_tx; + atomic_t tls_pdu_rx; + atomic_t tls_key; }; +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) +struct ch_ipsec_stats_debug { + atomic_t ipsec_cnt; +}; +#endif + #define OCQ_WIN_OFFSET(pdev, vres) \ (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size)) @@ -470,9 +485,12 @@ struct cxgb4_uld_info { struct napi_struct *napi); void (*lro_flush)(struct t4_lro_mgr *); int (*tx_handler)(struct sk_buff *skb, struct net_device *dev); -#if IS_ENABLED(CONFIG_TLS_DEVICE) +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) const struct tlsdev_ops *tlsdev_ops; #endif +#if IS_ENABLED(CONFIG_XFRM_OFFLOAD) + const struct xfrmdev_ops *xfrmdev_ops; +#endif }; void cxgb4_uld_enable(struct adapter *adap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 869431a1eedd..a9e9c7ae565d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1416,14 +1416,14 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) pi = netdev_priv(dev); adap = pi->adapter; ssi = skb_shinfo(skb); -#ifdef CONFIG_CHELSIO_IPSEC_INLINE +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) if (xfrm_offload(skb) && !ssi->gso_size) - return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev); + return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev); #endif /* CHELSIO_IPSEC_INLINE */ -#ifdef CONFIG_CHELSIO_TLS_DEVICE +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) if (skb->decrypted) - return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev); + return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); #endif /* CHELSIO_TLS_DEVICE */ qidx = skb_get_queue_mapping(skb); @@ -2660,15 +2660,15 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) /** * restart_ctrlq - restart a suspended control queue - * @data: the control queue to restart + * @t: pointer to the tasklet associated with this handler * * Resumes transmission on a suspended Tx control queue. */ -static void restart_ctrlq(unsigned long data) +static void restart_ctrlq(struct tasklet_struct *t) { struct sk_buff *skb; unsigned int written = 0; - struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; + struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk); spin_lock(&q->sendq.lock); reclaim_completed_tx_imm(&q->q); @@ -2961,13 +2961,13 @@ static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) /** * restart_ofldq - restart a suspended offload queue - * @data: the offload queue to restart + * @t: pointer to the tasklet associated with this handler * * Resumes transmission on a suspended Tx offload queue. */ -static void restart_ofldq(unsigned long data) +static void restart_ofldq(struct tasklet_struct *t) { - struct sge_uld_txq *q = (struct sge_uld_txq *)data; + struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk); spin_lock(&q->sendq.lock); q->full = 0; /* the queue actually is completely empty now */ @@ -3887,9 +3887,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) return work_done; } -void cxgb4_ethofld_restart(unsigned long data) +void cxgb4_ethofld_restart(struct tasklet_struct *t) { - struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data; + struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t, + qresume_tsk); int pktcount; spin_lock(&eosw_txq->lock); @@ -4580,7 +4581,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); txq->adap = adap; skb_queue_head_init(&txq->sendq); - tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); + tasklet_setup(&txq->qresume_tsk, restart_ctrlq); txq->full = 0; return 0; } @@ -4670,7 +4671,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, txq->q.q_type = CXGB4_TXQ_ULD; txq->adap = adap; skb_queue_head_init(&txq->sendq); - tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); + tasklet_setup(&txq->qresume_tsk, restart_ofldq); txq->full = 0; txq->mapping_err = 0; return 0; @@ -4872,9 +4873,6 @@ void t4_sge_stop(struct adapter *adap) int i; struct sge *s = &adap->sge; - if (in_interrupt()) /* actions below require waiting */ - return; - if (s->rx_timer.function) del_timer_sync(&s->rx_timer); if (s->tx_timer.function) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index fa3367966f4b..98d01a7497ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4745,9 +4745,11 @@ static void le_intr_handler(struct adapter *adap) static struct intr_info t6_le_intr_info[] = { { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, + { CMDTIDERR_F, "LE cmd tid error", -1, 1 }, { TCAMINTPERR_F, "LE parity error", -1, 1 }, { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, + { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 }, { 0 } }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 065c01c654ff..b11a172b5174 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -3017,6 +3017,14 @@ #define REV_V(x) ((x) << REV_S) #define REV_G(x) (((x) >> REV_S) & REV_M) +#define HASHTBLMEMCRCERR_S 27 +#define HASHTBLMEMCRCERR_V(x) ((x) << HASHTBLMEMCRCERR_S) +#define HASHTBLMEMCRCERR_F HASHTBLMEMCRCERR_V(1U) + +#define CMDTIDERR_S 22 +#define CMDTIDERR_V(x) ((x) << CMDTIDERR_S) +#define CMDTIDERR_F CMDTIDERR_V(1U) + #define T6_UNKNOWNCMD_S 3 #define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S) #define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U) |