diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/fec_main.c')
-rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 1236 |
1 files changed, 935 insertions, 301 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 4f87dffcb9b2..87975b5dda94 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -57,21 +57,19 @@ #include <linux/regulator/consumer.h> #include <linux/if_vlan.h> #include <linux/pinctrl/consumer.h> +#include <linux/prefetch.h> #include <asm/cacheflush.h> #include "fec.h" static void set_multicast_list(struct net_device *ndev); - -#if defined(CONFIG_ARM) -#define FEC_ALIGNMENT 0xf -#else -#define FEC_ALIGNMENT 0x3 -#endif +static void fec_enet_itr_coal_init(struct net_device *ndev); #define DRIVER_NAME "fec" +#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) + /* Pause frame feild and FIFO threshold */ #define FEC_ENET_FCE (1 << 5) #define FEC_ENET_RSEM_V 0x84 @@ -104,6 +102,22 @@ static void set_multicast_list(struct net_device *ndev); * ENET_TDAR[TDAR]. */ #define FEC_QUIRK_ERR006358 (1 << 7) +/* ENET IP hw AVB + * + * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. + * - Two class indicators on receive with configurable priority + * - Two class indicators and line speed timer on transmit allowing + * implementation class credit based shapers externally + * - Additional DMA registers provisioned to allow managing up to 3 + * independent rings + */ +#define FEC_QUIRK_HAS_AVB (1 << 8) +/* There is a TDAR race condition for mutliQ when the software sets TDAR + * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles). + * This will cause the udma_tx and udma_tx_arbiter state machines to hang. + * The issue exist at i.MX6SX enet IP. + */ +#define FEC_QUIRK_ERR007885 (1 << 9) static struct platform_device_id fec_devtype[] = { { @@ -128,6 +142,12 @@ static struct platform_device_id fec_devtype[] = { .name = "mvf600-fec", .driver_data = FEC_QUIRK_ENET_MAC, }, { + .name = "imx6sx-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885, + }, { /* sentinel */ } }; @@ -139,6 +159,7 @@ enum imx_fec_type { IMX28_FEC, IMX6Q_FEC, MVF600_FEC, + IMX6SX_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -147,6 +168,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -175,21 +197,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #endif #endif /* CONFIG_M5272 */ -/* Interrupt events/masks. */ -#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ -#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ -#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ -#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ -#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ -#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ -#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ -#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ -#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ -#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ - -#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) -#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) - /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. */ #define PKT_MAXBUF_SIZE 1522 @@ -230,6 +237,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_PAUSE_FLAG_AUTONEG 0x1 #define FEC_PAUSE_FLAG_ENABLE 0x2 +#define COPYBREAK_DEFAULT 256 + #define TSO_HEADER_SIZE 128 /* Max number of allowed TCP segments for software TSO */ #define FEC_MAX_TSO_SEGS 100 @@ -242,22 +251,26 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); static int mii_cnt; static inline -struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, + struct fec_enet_private *fep, + int queue_id) { struct bufdesc *new_bd = bdp + 1; struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; struct bufdesc_ex *ex_base; struct bufdesc *base; int ring_size; - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + if (bdp >= txq->tx_bd_base) { + base = txq->tx_bd_base; + ring_size = txq->tx_ring_size; + ex_base = (struct bufdesc_ex *)txq->tx_bd_base; } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + base = rxq->rx_bd_base; + ring_size = rxq->rx_ring_size; + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; } if (fep->bufdesc_ex) @@ -269,22 +282,26 @@ struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_priva } static inline -struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, + struct fec_enet_private *fep, + int queue_id) { struct bufdesc *new_bd = bdp - 1; struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; struct bufdesc_ex *ex_base; struct bufdesc *base; int ring_size; - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + if (bdp >= txq->tx_bd_base) { + base = txq->tx_bd_base; + ring_size = txq->tx_ring_size; + ex_base = (struct bufdesc_ex *)txq->tx_bd_base; } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + base = rxq->rx_bd_base; + ring_size = rxq->rx_ring_size; + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; } if (fep->bufdesc_ex) @@ -300,14 +317,15 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; } -static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep) +static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, + struct fec_enet_priv_tx_q *txq) { int entries; - entries = ((const char *)fep->dirty_tx - - (const char *)fep->cur_tx) / fep->bufdesc_size - 1; + entries = ((const char *)txq->dirty_tx - + (const char *)txq->cur_tx) / fep->bufdesc_size - 1; - return entries > 0 ? entries : entries + fep->tx_ring_size; + return entries > 0 ? entries : entries + txq->tx_ring_size; } static void *swap_buffer(void *bufaddr, int len) @@ -324,22 +342,26 @@ static void *swap_buffer(void *bufaddr, int len) static void fec_dump(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct bufdesc *bdp = fep->tx_bd_base; - unsigned int index = 0; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + int index = 0; netdev_info(ndev, "TX ring dump\n"); pr_info("Nr SC addr len SKB\n"); + txq = fep->tx_queue[0]; + bdp = txq->tx_bd_base; + do { pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", index, - bdp == fep->cur_tx ? 'S' : ' ', - bdp == fep->dirty_tx ? 'H' : ' ', + bdp == txq->cur_tx ? 'S' : ' ', + bdp == txq->dirty_tx ? 'H' : ' ', bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, - fep->tx_skbuff[index]); - bdp = fec_enet_get_nextdesc(bdp, fep); + txq->tx_skbuff[index]); + bdp = fec_enet_get_nextdesc(bdp, fep, 0); index++; - } while (bdp != fep->tx_bd_base); + } while (bdp != txq->tx_bd_base); } static inline bool is_ipv4_pkt(struct sk_buff *skb) @@ -365,14 +387,17 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) } static int -fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) +fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, + struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - struct bufdesc *bdp = fep->cur_tx; + struct bufdesc *bdp = txq->cur_tx; struct bufdesc_ex *ebdp; int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned short queue = skb_get_queue_mapping(skb); int frag, frag_len; unsigned short status; unsigned int estatus = 0; @@ -384,7 +409,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) for (frag = 0; frag < nr_frags; frag++) { this_frag = &skb_shinfo(skb)->frags[frag]; - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); ebdp = (struct bufdesc_ex *)bdp; status = bdp->cbd_sc; @@ -404,6 +429,8 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) } if (fep->bufdesc_ex) { + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -412,11 +439,11 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + if (((unsigned long) bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], bufaddr, frag_len); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], bufaddr, frag_len); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, frag_len); @@ -436,21 +463,22 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) bdp->cbd_sc = status; } - fep->cur_tx = bdp; + txq->cur_tx = bdp; return 0; dma_mapping_error: - bdp = fep->cur_tx; + bdp = txq->cur_tx; for (i = 0; i < frag; i++) { - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); } return NETDEV_TX_OK; } -static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) +static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = @@ -461,12 +489,13 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) dma_addr_t addr; unsigned short status; unsigned short buflen; + unsigned short queue; unsigned int estatus = 0; unsigned int index; int entries_free; int ret; - entries_free = fec_enet_get_free_txdesc_num(fep); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); if (entries_free < MAX_SKB_FRAGS + 1) { dev_kfree_skb_any(skb); if (net_ratelimit()) @@ -481,7 +510,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) } /* Fill in a Tx ring entry */ - bdp = fep->cur_tx; + bdp = txq->cur_tx; status = bdp->cbd_sc; status &= ~BD_ENET_TX_STATS; @@ -489,11 +518,12 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) bufaddr = skb->data; buflen = skb_headlen(skb); - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + queue = skb_get_queue_mapping(skb); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + if (((unsigned long) bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], skb->data, buflen); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], skb->data, buflen); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, buflen); @@ -509,7 +539,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) } if (nr_frags) { - ret = fec_enet_txq_submit_frag_skb(skb, ndev); + ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); if (ret) return ret; } else { @@ -530,6 +560,9 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) fep->hwts_tx_en)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); + if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; @@ -537,10 +570,10 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ebdp->cbd_esc = estatus; } - last_bdp = fep->cur_tx; - index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep); + last_bdp = txq->cur_tx; + index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); /* Save skb pointer */ - fep->tx_skbuff[index] = skb; + txq->tx_skbuff[index] = skb; bdp->cbd_datlen = buflen; bdp->cbd_bufaddr = addr; @@ -552,27 +585,29 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) bdp->cbd_sc = status; /* If this was the last BD in the ring, start at the beginning again. */ - bdp = fec_enet_get_nextdesc(last_bdp, fep); + bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); skb_tx_timestamp(skb); - fep->cur_tx = bdp; + txq->cur_tx = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); return 0; } static int -fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, - struct bufdesc *bdp, int index, char *data, - int size, bool last_tcp, bool is_last) +fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, + struct net_device *ndev, + struct bufdesc *bdp, int index, char *data, + int size, bool last_tcp, bool is_last) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); + unsigned short queue = skb_get_queue_mapping(skb); unsigned short status; unsigned int estatus = 0; dma_addr_t addr; @@ -582,10 +617,10 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - if (((unsigned long) data) & FEC_ALIGNMENT || + if (((unsigned long) data) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], data, size); - data = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], data, size); + data = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, size); @@ -603,6 +638,8 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, bdp->cbd_bufaddr = addr; if (fep->bufdesc_ex) { + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -624,14 +661,16 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, } static int -fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, - struct bufdesc *bdp, int index) +fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, struct net_device *ndev, + struct bufdesc *bdp, int index) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); + unsigned short queue = skb_get_queue_mapping(skb); void *bufaddr; unsigned long dmabuf; unsigned short status; @@ -641,12 +680,12 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE; - dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE; - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; + dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; + if (((unsigned long)bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], skb->data, hdr_len); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], skb->data, hdr_len); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, hdr_len); @@ -665,6 +704,8 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, bdp->cbd_datlen = hdr_len; if (fep->bufdesc_ex) { + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -676,17 +717,22 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, return 0; } -static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) +static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, + struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int total_len, data_left; - struct bufdesc *bdp = fep->cur_tx; + struct bufdesc *bdp = txq->cur_tx; + unsigned short queue = skb_get_queue_mapping(skb); struct tso_t tso; unsigned int index = 0; int ret; + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); - if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) { + if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "NOT enough BD for TSO!\n"); @@ -706,14 +752,14 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) while (total_len > 0) { char *hdr; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); total_len -= data_left; /* prepare packet headers: MAC + IP + TCP */ - hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE; + hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); - ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index); + ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); if (ret) goto err_release; @@ -721,10 +767,13 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) int size; size = min_t(int, tso.size, data_left); - bdp = fec_enet_get_nextdesc(bdp, fep); - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); - ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data, - size, size == data_left, + bdp = fec_enet_get_nextdesc(bdp, fep, queue); + index = fec_enet_get_bd_index(txq->tx_bd_base, + bdp, fep); + ret = fec_enet_txq_put_data_tso(txq, skb, ndev, + bdp, index, + tso.data, size, + size == data_left, total_len == 0); if (ret) goto err_release; @@ -733,17 +782,22 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) tso_build_data(skb, &tso, size); } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Save skb pointer */ - fep->tx_skbuff[index] = skb; + txq->tx_skbuff[index] = skb; skb_tx_timestamp(skb); - fep->cur_tx = bdp; + txq->cur_tx = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); return 0; @@ -757,18 +811,25 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int entries_free; + unsigned short queue; + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; int ret; + queue = skb_get_queue_mapping(skb); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(ndev, queue); + if (skb_is_gso(skb)) - ret = fec_enet_txq_submit_tso(skb, ndev); + ret = fec_enet_txq_submit_tso(txq, skb, ndev); else - ret = fec_enet_txq_submit_skb(skb, ndev); + ret = fec_enet_txq_submit_skb(txq, skb, ndev); if (ret) return ret; - entries_free = fec_enet_get_free_txdesc_num(fep); - if (entries_free <= fep->tx_stop_threshold) - netif_stop_queue(ndev); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free <= txq->tx_stop_threshold) + netif_tx_stop_queue(nq); return NETDEV_TX_OK; } @@ -778,46 +839,111 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) static void fec_enet_bd_init(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned int i; + unsigned int q; - /* Initialize the receive buffer descriptors. */ - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { + for (q = 0; q < fep->num_rx_queues; q++) { + /* Initialize the receive buffer descriptors. */ + rxq = fep->rx_queue[q]; + bdp = rxq->rx_bd_base; - /* Initialize the BD for every fragment in the page. */ - if (bdp->cbd_bufaddr) - bdp->cbd_sc = BD_ENET_RX_EMPTY; - else + for (i = 0; i < rxq->rx_ring_size; i++) { + + /* Initialize the BD for every fragment in the page. */ + if (bdp->cbd_bufaddr) + bdp->cbd_sc = BD_ENET_RX_EMPTY; + else + bdp->cbd_sc = 0; + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp->cbd_sc |= BD_SC_WRAP; + + rxq->cur_rx = rxq->rx_bd_base; + } + + for (q = 0; q < fep->num_tx_queues; q++) { + /* ...and the same for transmit */ + txq = fep->tx_queue[q]; + bdp = txq->tx_bd_base; + txq->cur_tx = bdp; + + for (i = 0; i < txq->tx_ring_size; i++) { + /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); + if (txq->tx_skbuff[i]) { + dev_kfree_skb_any(txq->tx_skbuff[i]); + txq->tx_skbuff[i] = NULL; + } + bdp->cbd_bufaddr = 0; + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp->cbd_sc |= BD_SC_WRAP; + txq->dirty_tx = bdp; } +} - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->cbd_sc |= BD_SC_WRAP; +static void fec_enet_active_rxring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; - fep->cur_rx = fep->rx_bd_base; + for (i = 0; i < fep->num_rx_queues; i++) + writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); +} - /* ...and the same for transmit */ - bdp = fep->tx_bd_base; - fep->cur_tx = bdp; - for (i = 0; i < fep->tx_ring_size; i++) { +static void fec_enet_enable_ring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + int i; - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; - } - bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); + + /* enable DMA1/2 */ + if (i) + writel(RCMR_MATCHEN | RCMR_CMP(i), + fep->hwp + FEC_RCMR(i)); } - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->cbd_sc |= BD_SC_WRAP; - fep->dirty_tx = bdp; + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); + + /* enable DMA1/2 */ + if (i) + writel(DMA_CLASS_EN | IDLE_SLOPE(i), + fep->hwp + FEC_DMA_CFG(i)); + } +} + +static void fec_enet_reset_skb(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + int i, j; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + + for (j = 0; j < txq->tx_ring_size; j++) { + if (txq->tx_skbuff[j]) { + dev_kfree_skb_any(txq->tx_skbuff[j]); + txq->tx_skbuff[j] = NULL; + } + } + } } /* @@ -831,15 +957,21 @@ fec_restart(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - int i; u32 val; u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } /* * enet-mac reset will reset mac address registers too, @@ -859,22 +991,10 @@ fec_restart(struct net_device *ndev) fec_enet_bd_init(ndev); - /* Set receive and transmit descriptor base. */ - writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); - if (fep->bufdesc_ex) - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); - else - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); - + fec_enet_enable_ring(ndev); - for (i = 0; i <= TX_RING_MOD_MASK; i++) { - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; - } - } + /* Reset tx SKB buffers. */ + fec_enet_reset_skb(ndev); /* Enable MII mode */ if (fep->full_duplex == DUPLEX_FULL) { @@ -996,13 +1116,17 @@ fec_restart(struct net_device *ndev) /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); - writel(0, fep->hwp + FEC_R_DES_ACTIVE); + fec_enet_active_rxring(ndev); if (fep->bufdesc_ex) fec_ptp_start_cyclecounter(ndev); /* Enable interrupts we wish to service */ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + + /* Init the interrupt coalescing */ + fec_enet_itr_coal_init(ndev); + } static void @@ -1021,9 +1145,16 @@ fec_stop(struct net_device *ndev) netdev_err(ndev, "Graceful transmit stop did not complete!\n"); } - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); @@ -1081,37 +1212,45 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, } static void -fec_enet_tx(struct net_device *ndev) +fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) { struct fec_enet_private *fep; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; int index = 0; int entries_free; fep = netdev_priv(ndev); - bdp = fep->dirty_tx; + + queue_id = FEC_ENET_GET_QUQUE(queue_id); + + txq = fep->tx_queue[queue_id]; + /* get next bdp of dirty_tx */ + nq = netdev_get_tx_queue(ndev, queue_id); + bdp = txq->dirty_tx; /* get next bdp of dirty_tx */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { /* current queue is empty */ - if (bdp == fep->cur_tx) + if (bdp == txq->cur_tx) break; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); - skb = fep->tx_skbuff[index]; - fep->tx_skbuff[index] = NULL; - if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr)) + skb = txq->tx_skbuff[index]; + txq->tx_skbuff[index] = NULL; + if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); bdp->cbd_bufaddr = 0; if (!skb) { - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); continue; } @@ -1153,23 +1292,81 @@ fec_enet_tx(struct net_device *ndev) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); - fep->dirty_tx = bdp; + txq->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); /* Since we have freed up a buffer, the ring is no longer full */ if (netif_queue_stopped(ndev)) { - entries_free = fec_enet_get_free_txdesc_num(fep); - if (entries_free >= fep->tx_wake_threshold) - netif_wake_queue(ndev); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free >= txq->tx_wake_threshold) + netif_tx_wake_queue(nq); } } /* ERR006538: Keep the transmitter going */ - if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + if (bdp != txq->cur_tx && + readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); +} + +static void +fec_enet_tx(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u16 queue_id; + /* First process class A queue, then Class B and Best Effort queue */ + for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { + clear_bit(queue_id, &fep->work_tx); + fec_enet_tx_queue(ndev, queue_id); + } + return; +} + +static int +fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int off; + + off = ((unsigned long)skb->data) & fep->rx_align; + if (off) + skb_reserve(skb, fep->rx_align + 1 - off); + + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + if (net_ratelimit()) + netdev_err(ndev, "Rx DMA memory map failed\n"); + return -ENOMEM; + } + + return 0; +} + +static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, + struct bufdesc *bdp, u32 length) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct sk_buff *new_skb; + + if (length > fep->rx_copybreak) + return false; + + new_skb = netdev_alloc_skb(ndev, length); + if (!new_skb) + return false; + + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + memcpy(new_skb->data, (*skb)->data, length); + *skb = new_skb; + + return true; } /* During a receive, the cur_rx points to the current incoming buffer. @@ -1178,14 +1375,16 @@ fec_enet_tx(struct net_device *ndev) * effectively tossing the packet. */ static int -fec_enet_rx(struct net_device *ndev, int budget) +fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned short status; - struct sk_buff *skb; + struct sk_buff *skb_new = NULL; + struct sk_buff *skb; ushort pkt_len; __u8 *data; int pkt_received = 0; @@ -1193,15 +1392,18 @@ fec_enet_rx(struct net_device *ndev, int budget) bool vlan_packet_rcvd = false; u16 vlan_tag; int index = 0; + bool is_copybreak; #ifdef CONFIG_M532x flush_cache_all(); #endif + queue_id = FEC_ENET_GET_QUQUE(queue_id); + rxq = fep->rx_queue[queue_id]; /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ - bdp = fep->cur_rx; + bdp = rxq->cur_rx; while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { @@ -1215,7 +1417,6 @@ fec_enet_rx(struct net_device *ndev, int budget) if ((status & BD_ENET_RX_LAST) == 0) netdev_err(ndev, "rcv is not +last\n"); - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | @@ -1248,11 +1449,28 @@ fec_enet_rx(struct net_device *ndev, int budget) pkt_len = bdp->cbd_datlen; ndev->stats.rx_bytes += pkt_len; - index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep); - data = fep->rx_skbuff[index]->data; - dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); + skb = rxq->rx_skbuff[index]; + /* The packet length includes FCS, but we don't want to + * include that when passing upstream as it messes up + * bridging applications. + */ + is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4); + if (!is_copybreak) { + skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); + if (unlikely(!skb_new)) { + ndev->stats.rx_dropped++; + goto rx_processing_done; + } + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + } + + prefetch(skb->data - NET_IP_ALIGN); + skb_put(skb, pkt_len - 4); + data = skb->data; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); @@ -1264,66 +1482,53 @@ fec_enet_rx(struct net_device *ndev, int budget) /* If this is a VLAN packet remove the VLAN Tag */ vlan_packet_rcvd = false; if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && - fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { /* Push and remove the vlan tag */ struct vlan_hdr *vlan_header = (struct vlan_hdr *) (data + ETH_HLEN); vlan_tag = ntohs(vlan_header->h_vlan_TCI); - pkt_len -= VLAN_HLEN; vlan_packet_rcvd = true; + + skb_copy_to_linear_data_offset(skb, VLAN_HLEN, + data, (2 * ETH_ALEN)); + skb_pull(skb, VLAN_HLEN); } - /* This does 16 byte alignment, exactly what we need. - * The packet length includes FCS, but we don't want to - * include that when passing upstream as it messes up - * bridging applications. - */ - skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN); + skb->protocol = eth_type_trans(skb, ndev); - if (unlikely(!skb)) { - ndev->stats.rx_dropped++; - } else { - int payload_offset = (2 * ETH_ALEN); - skb_reserve(skb, NET_IP_ALIGN); - skb_put(skb, pkt_len - 4); /* Make room */ - - /* Extract the frame data without the VLAN header. */ - skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); - if (vlan_packet_rcvd) - payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; - skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), - data + payload_offset, - pkt_len - 4 - (2 * ETH_ALEN)); - - skb->protocol = eth_type_trans(skb, ndev); - - /* Get receive timestamp from the skb */ - if (fep->hwts_rx_en && fep->bufdesc_ex) - fec_enet_hwtstamp(fep, ebdp->ts, - skb_hwtstamps(skb)); - - if (fep->bufdesc_ex && - (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { - if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { - /* don't check it */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { - skb_checksum_none_assert(skb); - } + /* Get receive timestamp from the skb */ + if (fep->hwts_rx_en && fep->bufdesc_ex) + fec_enet_hwtstamp(fep, ebdp->ts, + skb_hwtstamps(skb)); + + if (fep->bufdesc_ex && + (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { + if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { + /* don't check it */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); } + } + + /* Handle received VLAN packets */ + if (vlan_packet_rcvd) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); - /* Handle received VLAN packets */ - if (vlan_packet_rcvd) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); + napi_gro_receive(&fep->napi, skb); - napi_gro_receive(&fep->napi, skb); + if (is_copybreak) { + dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + } else { + rxq->rx_skbuff[index] = skb_new; + fec_enet_new_rxbdp(ndev, bdp, skb_new); } - dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; @@ -1341,19 +1546,56 @@ rx_processing_done: } /* Update BD pointer to next entry */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ - writel(0, fep->hwp + FEC_R_DES_ACTIVE); + writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); } - fep->cur_rx = bdp; + rxq->cur_rx = bdp; + return pkt_received; +} +static int +fec_enet_rx(struct net_device *ndev, int budget) +{ + int pkt_received = 0; + u16 queue_id; + struct fec_enet_private *fep = netdev_priv(ndev); + + for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { + clear_bit(queue_id, &fep->work_rx); + pkt_received += fec_enet_rx_queue(ndev, + budget - pkt_received, queue_id); + } return pkt_received; } +static bool +fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) +{ + if (int_events == 0) + return false; + + if (int_events & FEC_ENET_RXF) + fep->work_rx |= (1 << 2); + if (int_events & FEC_ENET_RXF_1) + fep->work_rx |= (1 << 0); + if (int_events & FEC_ENET_RXF_2) + fep->work_rx |= (1 << 1); + + if (int_events & FEC_ENET_TXF) + fep->work_tx |= (1 << 2); + if (int_events & FEC_ENET_TXF_1) + fep->work_tx |= (1 << 0); + if (int_events & FEC_ENET_TXF_2) + fep->work_tx |= (1 << 1); + + return true; +} + static irqreturn_t fec_enet_interrupt(int irq, void *dev_id) { @@ -1365,6 +1607,7 @@ fec_enet_interrupt(int irq, void *dev_id) int_events = readl(fep->hwp + FEC_IEVENT); writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); + fec_enet_collect_events(fep, int_events); if (int_events & napi_mask) { ret = IRQ_HANDLED; @@ -1611,20 +1854,41 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) goto failed_clk_enet_out; } if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); ret = clk_prepare_enable(fep->clk_ptp); - if (ret) + if (ret) { + mutex_unlock(&fep->ptp_clk_mutex); goto failed_clk_ptp; + } else { + fep->ptp_clk_on = true; + } + mutex_unlock(&fep->ptp_clk_mutex); + } + if (fep->clk_ref) { + ret = clk_prepare_enable(fep->clk_ref); + if (ret) + goto failed_clk_ref; } } else { clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); - if (fep->clk_ptp) + if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); clk_disable_unprepare(fep->clk_ptp); + fep->ptp_clk_on = false; + mutex_unlock(&fep->ptp_clk_mutex); + } + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); } return 0; + +failed_clk_ref: + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); @@ -1664,13 +1928,13 @@ static int fec_enet_mii_probe(struct net_device *ndev) continue; if (dev_id--) continue; - strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); break; } if (phy_id >= PHY_MAX_ADDR) { netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); phy_id = 0; } @@ -2052,12 +2316,179 @@ static int fec_enet_nway_reset(struct net_device *dev) return genphy_restart_aneg(phydev); } +/* ITR clock source is enet system clock (clk_ahb). + * TCTT unit is cycle_ns * 64 cycle + * So, the ICTT value = X us / (cycle_ns * 64) + */ +static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + return us * (fep->itr_clk_rate / 64000) / 1000; +} + +/* Set threshold for interrupt coalescing */ +static void fec_enet_itr_coal_set(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + int rx_itr, tx_itr; + + if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + return; + + /* Must be greater than zero to avoid unpredictable behavior */ + if (!fep->rx_time_itr || !fep->rx_pkts_itr || + !fep->tx_time_itr || !fep->tx_pkts_itr) + return; + + /* Select enet system clock as Interrupt Coalescing + * timer Clock Source + */ + rx_itr = FEC_ITR_CLK_SEL; + tx_itr = FEC_ITR_CLK_SEL; + + /* set ICFT and ICTT */ + rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); + rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); + tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); + tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); + + rx_itr |= FEC_ITR_EN; + tx_itr |= FEC_ITR_EN; + + writel(tx_itr, fep->hwp + FEC_TXIC0); + writel(rx_itr, fep->hwp + FEC_RXIC0); + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); +} + +static int +fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + + if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + return -EOPNOTSUPP; + + ec->rx_coalesce_usecs = fep->rx_time_itr; + ec->rx_max_coalesced_frames = fep->rx_pkts_itr; + + ec->tx_coalesce_usecs = fep->tx_time_itr; + ec->tx_max_coalesced_frames = fep->tx_pkts_itr; + + return 0; +} + +static int +fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + + unsigned int cycle; + + if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + return -EOPNOTSUPP; + + if (ec->rx_max_coalesced_frames > 255) { + pr_err("Rx coalesced frames exceed hardware limiation"); + return -EINVAL; + } + + if (ec->tx_max_coalesced_frames > 255) { + pr_err("Tx coalesced frame exceed hardware limiation"); + return -EINVAL; + } + + cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + if (cycle > 0xFFFF) { + pr_err("Rx coalesed usec exceeed hardware limiation"); + return -EINVAL; + } + + cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); + if (cycle > 0xFFFF) { + pr_err("Rx coalesed usec exceeed hardware limiation"); + return -EINVAL; + } + + fep->rx_time_itr = ec->rx_coalesce_usecs; + fep->rx_pkts_itr = ec->rx_max_coalesced_frames; + + fep->tx_time_itr = ec->tx_coalesce_usecs; + fep->tx_pkts_itr = ec->tx_max_coalesced_frames; + + fec_enet_itr_coal_set(ndev); + + return 0; +} + +static void fec_enet_itr_coal_init(struct net_device *ndev) +{ + struct ethtool_coalesce ec; + + ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; + ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + + ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; + ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + + fec_enet_set_coalesce(ndev, &ec); +} + +static int fec_enet_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + void *data) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = fep->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int fec_enet_set_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + fep->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + static const struct ethtool_ops fec_enet_ethtool_ops = { .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, .nway_reset = fec_enet_nway_reset, .get_link = ethtool_op_get_link, + .get_coalesce = fec_enet_get_coalesce, + .set_coalesce = fec_enet_set_coalesce, #ifndef CONFIG_M5272 .get_pauseparam = fec_enet_get_pauseparam, .set_pauseparam = fec_enet_set_pauseparam, @@ -2066,6 +2497,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .get_sset_count = fec_enet_get_sset_count, #endif .get_ts_info = fec_enet_get_ts_info, + .get_tunable = fec_enet_get_tunable, + .set_tunable = fec_enet_set_tunable, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) @@ -2095,55 +2528,136 @@ static void fec_enet_free_buffers(struct net_device *ndev) unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + unsigned int q; + + for (q = 0; q < fep->num_rx_queues; q++) { + rxq = fep->rx_queue[q]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { + skb = rxq->rx_skbuff[i]; + rxq->rx_skbuff[i] = NULL; + if (skb) { + dma_unmap_single(&fep->pdev->dev, + bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + } + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + } - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { - skb = fep->rx_skbuff[i]; - fep->rx_skbuff[i] = NULL; - if (skb) { - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + for (q = 0; q < fep->num_tx_queues; q++) { + txq = fep->tx_queue[q]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + kfree(txq->tx_bounce[i]); + txq->tx_bounce[i] = NULL; + skb = txq->tx_skbuff[i]; + txq->tx_skbuff[i] = NULL; dev_kfree_skb(skb); } - bdp = fec_enet_get_nextdesc(bdp, fep); + } +} + +static void fec_enet_free_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { + txq = fep->tx_queue[i]; + dma_free_coherent(NULL, + txq->tx_ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, + txq->tso_hdrs_dma); + } + + for (i = 0; i < fep->num_rx_queues; i++) + if (fep->rx_queue[i]) + kfree(fep->rx_queue[i]); + + for (i = 0; i < fep->num_tx_queues; i++) + if (fep->tx_queue[i]) + kfree(fep->tx_queue[i]); +} + +static int fec_enet_alloc_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + int ret = 0; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->tx_queue[i] = txq; + txq->tx_ring_size = TX_RING_SIZE; + fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; + + txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; + txq->tx_wake_threshold = + (txq->tx_ring_size - txq->tx_stop_threshold) / 2; + + txq->tso_hdrs = dma_alloc_coherent(NULL, + txq->tx_ring_size * TSO_HEADER_SIZE, + &txq->tso_hdrs_dma, + GFP_KERNEL); + if (!txq->tso_hdrs) { + ret = -ENOMEM; + goto alloc_failed; + } } - bdp = fep->tx_bd_base; - for (i = 0; i < fep->tx_ring_size; i++) { - kfree(fep->tx_bounce[i]); - fep->tx_bounce[i] = NULL; - skb = fep->tx_skbuff[i]; - fep->tx_skbuff[i] = NULL; - dev_kfree_skb(skb); + for (i = 0; i < fep->num_rx_queues; i++) { + fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), + GFP_KERNEL); + if (!fep->rx_queue[i]) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; + fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; } + return ret; + +alloc_failed: + fec_enet_free_queue(ndev); + return ret; } -static int fec_enet_alloc_buffers(struct net_device *ndev) +static int +fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) { struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; + struct fec_enet_priv_rx_q *rxq; - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { - dma_addr_t addr; - + rxq = fep->rx_queue[queue]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (!skb) goto err_alloc; - addr = dma_map_single(&fep->pdev->dev, skb->data, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&fep->pdev->dev, addr)) { + if (fec_enet_new_rxbdp(ndev, bdp, skb)) { dev_kfree_skb(skb); - if (net_ratelimit()) - netdev_err(ndev, "Rx DMA memory map failed\n"); goto err_alloc; } - fep->rx_skbuff[i] = skb; - bdp->cbd_bufaddr = addr; + rxq->rx_skbuff[i] = skb; bdp->cbd_sc = BD_ENET_RX_EMPTY; if (fep->bufdesc_ex) { @@ -2151,17 +2665,32 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_RX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); + bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp->cbd_sc |= BD_SC_WRAP; + return 0; - bdp = fep->tx_bd_base; - for (i = 0; i < fep->tx_ring_size; i++) { - fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); - if (!fep->tx_bounce[i]) + err_alloc: + fec_enet_free_buffers(ndev); + return -ENOMEM; +} + +static int +fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + + txq = fep->tx_queue[queue]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); + if (!txq->tx_bounce[i]) goto err_alloc; bdp->cbd_sc = 0; @@ -2172,11 +2701,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_TX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); + bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp->cbd_sc |= BD_SC_WRAP; return 0; @@ -2186,6 +2715,21 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) return -ENOMEM; } +static int fec_enet_alloc_buffers(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + + for (i = 0; i < fep->num_rx_queues; i++) + if (fec_enet_alloc_rxq_buffers(ndev, i)) + return -ENOMEM; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fec_enet_alloc_txq_buffers(ndev, i)) + return -ENOMEM; + return 0; +} + static int fec_enet_open(struct net_device *ndev) { @@ -2203,20 +2747,26 @@ fec_enet_open(struct net_device *ndev) ret = fec_enet_alloc_buffers(ndev); if (ret) - return ret; + goto err_enet_alloc; /* Probe and connect to PHY when open the interface */ ret = fec_enet_mii_probe(ndev); - if (ret) { - fec_enet_free_buffers(ndev); - return ret; - } + if (ret) + goto err_enet_mii_probe; fec_restart(ndev); napi_enable(&fep->napi); phy_start(fep->phy_dev); - netif_start_queue(ndev); + netif_tx_start_all_queues(ndev); + return 0; + +err_enet_mii_probe: + fec_enet_free_buffers(ndev); +err_enet_alloc: + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); + return ret; } static int @@ -2389,7 +2939,7 @@ static int fec_set_features(struct net_device *netdev, /* Resume the device after updates */ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { fec_restart(netdev); - netif_wake_queue(netdev); + netif_tx_wake_all_queues(netdev); netif_tx_unlock_bh(netdev); napi_enable(&fep->napi); } @@ -2422,39 +2972,38 @@ static int fec_enet_init(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; struct bufdesc *cbd_base; + dma_addr_t bd_dma; int bd_size; + unsigned int i; - /* init the tx & rx ring size */ - fep->tx_ring_size = TX_RING_SIZE; - fep->rx_ring_size = RX_RING_SIZE; +#if defined(CONFIG_ARM) + fep->rx_align = 0xf; + fep->tx_align = 0xf; +#else + fep->rx_align = 0x3; + fep->tx_align = 0x3; +#endif - fep->tx_stop_threshold = FEC_MAX_SKB_DESCS; - fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2; + fec_enet_alloc_queue(ndev); if (fep->bufdesc_ex) fep->bufdesc_size = sizeof(struct bufdesc_ex); else fep->bufdesc_size = sizeof(struct bufdesc); - bd_size = (fep->tx_ring_size + fep->rx_ring_size) * + bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * fep->bufdesc_size; /* Allocate memory for buffer descriptors. */ - cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma, + cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, GFP_KERNEL); - if (!cbd_base) - return -ENOMEM; - - fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE, - &fep->tso_hdrs_dma, GFP_KERNEL); - if (!fep->tso_hdrs) { - dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma); + if (!cbd_base) { return -ENOMEM; } - memset(cbd_base, 0, PAGE_SIZE); - - fep->netdev = ndev; + memset(cbd_base, 0, bd_size); /* Get the Ethernet address */ fec_get_mac(ndev); @@ -2462,12 +3011,36 @@ static int fec_enet_init(struct net_device *ndev) fec_set_mac_address(ndev, NULL); /* Set receive and transmit descriptor base. */ - fep->rx_bd_base = cbd_base; - if (fep->bufdesc_ex) - fep->tx_bd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); - else - fep->tx_bd_base = cbd_base + fep->rx_ring_size; + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + rxq->index = i; + rxq->rx_bd_base = (struct bufdesc *)cbd_base; + rxq->bd_dma = bd_dma; + if (fep->bufdesc_ex) { + bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; + cbd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); + } else { + bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; + cbd_base += rxq->rx_ring_size; + } + } + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + txq->index = i; + txq->tx_bd_base = (struct bufdesc *)cbd_base; + txq->bd_dma = bd_dma; + if (fep->bufdesc_ex) { + bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; + cbd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); + } else { + bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; + cbd_base += txq->tx_ring_size; + } + } + /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; @@ -2490,6 +3063,11 @@ static int fec_enet_init(struct net_device *ndev) fep->csum_flags |= FLAG_RX_CSUM_ENABLED; } + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + fep->tx_align = 0; + fep->rx_align = 0x3f; + } + ndev->hw_features = ndev->features; fec_restart(ndev); @@ -2535,6 +3113,42 @@ static void fec_reset_phy(struct platform_device *pdev) } #endif /* CONFIG_OF */ +static void +fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) +{ + struct device_node *np = pdev->dev.of_node; + int err; + + *num_tx = *num_rx = 1; + + if (!np || !of_device_is_available(np)) + return; + + /* parse the num of tx and rx queues */ + err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); + if (err) + *num_tx = 1; + + err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx); + if (err) + *num_rx = 1; + + if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { + dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", + *num_tx); + *num_tx = 1; + return; + } + + if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { + dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", + *num_rx); + *num_rx = 1; + return; + } + +} + static int fec_probe(struct platform_device *pdev) { @@ -2546,13 +3160,18 @@ fec_probe(struct platform_device *pdev) const struct of_device_id *of_id; static int dev_id; struct device_node *np = pdev->dev.of_node, *phy_node; + int num_tx_qs; + int num_rx_qs; of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) pdev->id_entry = of_id->data; + fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); + /* Init network device */ - ndev = alloc_etherdev(sizeof(struct fec_enet_private)); + ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), + num_tx_qs, num_rx_qs); if (!ndev) return -ENOMEM; @@ -2561,6 +3180,9 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); + fep->num_rx_queues = num_rx_qs; + fep->num_tx_queues = num_tx_qs; + #if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ if (pdev->id_entry && @@ -2620,11 +3242,21 @@ fec_probe(struct platform_device *pdev) goto failed_clk; } + fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); + /* enet_out is optional, depends on board */ fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); if (IS_ERR(fep->clk_enet_out)) fep->clk_enet_out = NULL; + fep->ptp_clk_on = false; + mutex_init(&fep->ptp_clk_mutex); + + /* clk_ref is optional, depends on board */ + fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); + if (IS_ERR(fep->clk_ref)) + fep->clk_ref = NULL; + fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); fep->bufdesc_ex = pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; @@ -2672,6 +3304,7 @@ fec_probe(struct platform_device *pdev) goto failed_irq; } + init_completion(&fep->mdio_done); ret = fec_enet_mii_init(pdev); if (ret) goto failed_mii_init; @@ -2688,6 +3321,7 @@ fec_probe(struct platform_device *pdev) if (fep->bufdesc_ex && fep->ptp_clock) netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); + fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); return 0; @@ -2715,10 +3349,10 @@ fec_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); + cancel_delayed_work_sync(&fep->time_keep); cancel_work_sync(&fep->tx_timeout_work); unregister_netdev(ndev); fec_enet_mii_remove(fep); - del_timer_sync(&fep->time_keep); if (fep->reg_phy) regulator_disable(fep->reg_phy); if (fep->ptp_clock) |