diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-10-15 07:41:27 +0200 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-10-15 07:41:27 +0200 |
commit | c362495586e8a3a6487a318fcd82eaf15ffe2142 (patch) | |
tree | 86f7b195d36ba198f24f86be327f21a8d24ec248 /drivers/net | |
parent | tty: serial: sccnxp: Fix bug with unterminated platform_id list (diff) | |
parent | Linux 3.7-rc1 (diff) | |
download | linux-c362495586e8a3a6487a318fcd82eaf15ffe2142.tar.xz linux-c362495586e8a3a6487a318fcd82eaf15ffe2142.zip |
Merge 3.7-rc1 into tty-linus
This syncs up the tty-linus branch to the latest in Linus's tree to get all of
the UAPI stuff needed for the next set of patches to merge.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/net')
111 files changed, 1238 insertions, 713 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7858c58df4a3..b721902bb6b4 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4826,6 +4826,7 @@ static int bond_check_params(struct bond_params *params) static struct lock_class_key bonding_netdev_xmit_lock_key; static struct lock_class_key bonding_netdev_addr_lock_key; +static struct lock_class_key bonding_tx_busylock_key; static void bond_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, @@ -4840,6 +4841,7 @@ static void bond_set_lockdep_class(struct net_device *dev) lockdep_set_class(&dev->addr_list_lock, &bonding_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL); + dev->qdisc_tx_busylock = &bonding_tx_busylock_key; } /* diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index c975999bb055..799c354083c4 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -247,7 +247,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, } #endif /* CONFIG_PPC_MPC512x */ -static struct of_device_id mpc5xxx_can_table[]; +static const struct of_device_id mpc5xxx_can_table[]; static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) { const struct of_device_id *match; @@ -380,17 +380,17 @@ static int mpc5xxx_can_resume(struct platform_device *ofdev) } #endif -static const struct mpc5xxx_can_data __devinitdata mpc5200_can_data = { +static const struct mpc5xxx_can_data __devinitconst mpc5200_can_data = { .type = MSCAN_TYPE_MPC5200, .get_clock = mpc52xx_can_get_clock, }; -static const struct mpc5xxx_can_data __devinitdata mpc5121_can_data = { +static const struct mpc5xxx_can_data __devinitconst mpc5121_can_data = { .type = MSCAN_TYPE_MPC5121, .get_clock = mpc512x_can_get_clock, }; -static struct of_device_id __devinitdata mpc5xxx_can_table[] = { +static const struct of_device_id __devinitconst mpc5xxx_can_table[] = { { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, }, /* Note that only MPC5121 Rev. 2 (and later) is supported */ { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, }, diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index f0a12962f7b6..f5b82aeb2540 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -583,12 +583,14 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev, cfg_base = pci_iomap(pdev, 0, PEAK_PCI_CFG_SIZE); if (!cfg_base) { dev_err(&pdev->dev, "failed to map PCI resource #0\n"); + err = -ENOMEM; goto failure_release_regions; } reg_base = pci_iomap(pdev, 1, PEAK_PCI_CHAN_SIZE * channels); if (!reg_base) { dev_err(&pdev->dev, "failed to map PCI resource #1\n"); + err = -ENOMEM; goto failure_unmap_cfg_base; } diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c index ec6bd9d1b2af..272a85f32b14 100644 --- a/drivers/net/can/sja1000/peak_pcmcia.c +++ b/drivers/net/can/sja1000/peak_pcmcia.c @@ -686,8 +686,10 @@ static int __devinit pcan_probe(struct pcmcia_device *pdev) /* detect available channels */ pcan_add_channels(card); - if (!card->chan_count) + if (!card->chan_count) { + err = -ENOMEM; goto probe_err_4; + } /* init the timer which controls the leds */ init_timer(&card->led_timer); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 034c16b60e96..adc3708d8829 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -56,7 +56,7 @@ #include <linux/kernel.h> #include <linux/can.h> -static __initdata const char banner[] = +static __initconst const char banner[] = KERN_INFO "slcan: serial line CAN interface driver\n"; MODULE_ALIAS_LDISC(N_SLCAN); diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 4f93c0be0053..0a2a5ee79a17 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -49,7 +49,7 @@ #include <linux/slab.h> #include <net/rtnetlink.h> -static __initdata const char banner[] = +static __initconst const char banner[] = KERN_INFO "vcan: Virtual CAN interface driver\n"; MODULE_DESCRIPTION("virtual CAN interface"); diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c index a2f8b2b8e27c..e3f57427d5c5 100644 --- a/drivers/net/ethernet/8390/ne3210.c +++ b/drivers/net/ethernet/8390/ne3210.c @@ -81,7 +81,7 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0}; -static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"}; +static const char * const ifmap[] __initconst = {"UTP", "?", "BNC", "AUI"}; static int ifmap_val[] __initdata = { IF_PORT_10BASET, IF_PORT_UNKNOWN, diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index d920a529ba22..5b65992c2a0a 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -295,7 +295,7 @@ MODULE_DEVICE_TABLE(pci, starfire_pci_tbl); static const struct chip_info { const char *name; int drv_flags; -} netdrv_tbl[] __devinitdata = { +} netdrv_tbl[] __devinitconst = { { "Adaptec Starfire 6915", CanHaveMII }, }; diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 64d0d9c1afa2..3491d4312fc9 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1845,6 +1845,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){ printk(KERN_ERR "amd8111e: No Power Management capability, " "exiting.\n"); + err = -ENODEV; goto err_free_reg; } @@ -1852,6 +1853,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { printk(KERN_ERR "amd8111e: DMA not supported," "exiting.\n"); + err = -ENODEV; goto err_free_reg; } diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 397596b078d9..f195acfa2df7 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1174,8 +1174,10 @@ static int __devinit au1000_probe(struct platform_device *pdev) snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, aup->mac_id); aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (aup->mii_bus->irq == NULL) + if (aup->mii_bus->irq == NULL) { + err = -ENOMEM; goto err_out; + } for (i = 0; i < PHY_MAX_ADDR; ++i) aup->mii_bus->irq[i] = PHY_POLL; @@ -1190,7 +1192,8 @@ static int __devinit au1000_probe(struct platform_device *pdev) goto err_mdiobus_reg; } - if (au1000_mii_probe(dev) != 0) + err = au1000_mii_probe(dev); + if (err != 0) goto err_out; pDBfree = NULL; @@ -1205,6 +1208,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) } aup->pDBfree = pDBfree; + err = -ENODEV; for (i = 0; i < NUM_RX_DMA; i++) { pDB = au1000_GetFreeDB(aup); if (!pDB) @@ -1213,6 +1217,8 @@ static int __devinit au1000_probe(struct platform_device *pdev) aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; aup->rx_db_inuse[i] = pDB; } + + err = -ENODEV; for (i = 0; i < NUM_TX_DMA; i++) { pDB = au1000_GetFreeDB(aup); if (!pDB) diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 55a2e3795055..d19f82f7597a 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -702,7 +702,7 @@ struct atl1c_platform_patch { u32 patch_flag; #define ATL1C_LINK_PATCH 0x1 }; -static const struct atl1c_platform_patch plats[] __devinitdata = { +static const struct atl1c_platform_patch plats[] __devinitconst = { {0x2060, 0xC1, 0x1019, 0x8152, 0x1}, {0x2060, 0xC1, 0x1019, 0x2060, 0x1}, {0x2060, 0xC1, 0x1019, 0xE000, 0x1}, diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 57d64b80fd72..623dd8635c46 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw) */ #define ATL2_PARAM(X, desc) \ - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ MODULE_PARM_DESC(X, desc); #else diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h index 0e3048b788c2..133d5857b9e2 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -10,6 +10,7 @@ #include <bcm63xx_regs.h> #include <bcm63xx_irq.h> #include <bcm63xx_io.h> +#include <bcm63xx_iudma.h> /* default number of descriptor */ #define BCMENET_DEF_RX_DESC 64 @@ -31,35 +32,6 @@ #define BCMENET_MAX_MTU 2046 /* - * rx/tx dma descriptor - */ -struct bcm_enet_desc { - u32 len_stat; - u32 address; -}; - -#define DMADESC_LENGTH_SHIFT 16 -#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT) -#define DMADESC_OWNER_MASK (1 << 15) -#define DMADESC_EOP_MASK (1 << 14) -#define DMADESC_SOP_MASK (1 << 13) -#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK) -#define DMADESC_WRAP_MASK (1 << 12) - -#define DMADESC_UNDER_MASK (1 << 9) -#define DMADESC_APPEND_CRC (1 << 8) -#define DMADESC_OVSIZE_MASK (1 << 4) -#define DMADESC_RXER_MASK (1 << 2) -#define DMADESC_CRC_MASK (1 << 1) -#define DMADESC_OV_MASK (1 << 0) -#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \ - DMADESC_OVSIZE_MASK | \ - DMADESC_RXER_MASK | \ - DMADESC_CRC_MASK | \ - DMADESC_OV_MASK) - - -/* * MIB Counters register definitions */ #define ETH_MIB_TX_GD_OCTETS 0 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 30f04a389227..24220992413f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3523,15 +3523,18 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) } else #endif if (!bp->rx_ring_size) { - u32 cfg = SHMEM_RD(bp, - dev_info.port_hw_config[BP_PORT(bp)].default_cfg); - rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); - /* Dercease ring size for 1G functions */ - if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) == - PORT_HW_CFG_NET_SERDES_IF_SGMII) - rx_ring_size /= 10; + if (CHIP_IS_E3(bp)) { + u32 cfg = SHMEM_RD(bp, + dev_info.port_hw_config[BP_PORT(bp)]. + default_cfg); + + /* Decrease ring size for 1G functions */ + if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) == + PORT_HW_CFG_NET_SERDES_IF_SGMII) + rx_ring_size /= 10; + } /* allocate at least number of buffers required by FW */ rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index f7ed122f4071..d5648fc666bd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -3052,9 +3052,8 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) struct eth_stats_info *ether_stat = &bp->slowpath->drv_info_to_mcp.ether_stat; - /* leave last char as NULL */ - memcpy(ether_stat->version, DRV_MODULE_VERSION, - ETH_STAT_INFO_VERSION_LEN - 1); + strlcpy(ether_stat->version, DRV_MODULE_VERSION, + ETH_STAT_INFO_VERSION_LEN); bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj, DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 46280ba4c5d4..a8800ac10df9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -782,7 +782,8 @@ static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) return i == timeout_us / 10; } -int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len) +static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, + u32 len) { int err; u32 i, bufoff, msgoff, maxlen, apedata; @@ -7763,7 +7764,7 @@ static int tg3_alloc_consistent(struct tg3 *tp) sblk = tnapi->hw_status; if (tg3_flag(tp, ENABLE_RSS)) { - u16 *prodptr = 0; + u16 *prodptr = NULL; /* * When RSS is enabled, the status block format changes @@ -8103,11 +8104,11 @@ static int tg3_chip_reset(struct tg3 *tp) u16 val16; if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { - int i; + int j; u32 cfg_val; /* Wait for link training to complete. */ - for (i = 0; i < 5000; i++) + for (j = 0; j < 5000; j++) udelay(100); pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); @@ -10206,7 +10207,7 @@ static u32 tg3_irq_count(struct tg3 *tp) static bool tg3_enable_msix(struct tg3 *tp) { int i, rc; - struct msix_entry msix_ent[tp->irq_max]; + struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; tp->txq_cnt = tp->txq_req; tp->rxq_cnt = tp->rxq_req; diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 2b4b4f529ab4..16814b34d4b6 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -375,7 +375,6 @@ struct xgmac_priv { unsigned int tx_tail; void __iomem *base; - struct sk_buff_head rx_recycle; unsigned int dma_buf_sz; dma_addr_t dma_rx_phy; dma_addr_t dma_tx_phy; @@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv) p = priv->dma_rx + entry; if (priv->rx_skbuff[entry] == NULL) { - skb = __skb_dequeue(&priv->rx_recycle); - if (skb == NULL) - skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); + skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); if (unlikely(skb == NULL)) break; @@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) desc_get_buf_len(p), DMA_TO_DEVICE); } - /* - * If there's room in the queue (limit it to size) - * we add this skb back into the pool, - * if it's the right size. - */ - if ((skb_queue_len(&priv->rx_recycle) < - DMA_RX_RING_SZ) && - skb_recycle_check(skb, priv->dma_buf_sz)) - __skb_queue_head(&priv->rx_recycle, skb); - else - dev_kfree_skb(skb); + dev_kfree_skb(skb); } if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > @@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev) dev->dev_addr); } - skb_queue_head_init(&priv->rx_recycle); memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); /* Initialize the XGMAC and descriptors */ @@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev) napi_disable(&priv->napi); writel(0, priv->base + XGMAC_DMA_INTR_ENA); - skb_queue_purge(&priv->rx_recycle); /* Disable the MAC core */ xgmac_mac_disable(priv->base); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 745a1f53361f..a4da893ac1e1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -43,6 +43,7 @@ #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/timer.h> +#include <linux/vmalloc.h> #include <asm/io.h> #include "cxgb4_uld.h" #include "t4_hw.h" @@ -695,6 +696,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable); int get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); unsigned int t4_flash_cfg_addr(struct adapter *adapter); +int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); int t4_check_fw_version(struct adapter *adapter); int t4_prep_adapter(struct adapter *adapter); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6b9f6bb2f7ed..604f4f87f550 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -443,7 +443,10 @@ int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ module_param(dbfifo_int_thresh, int, 0644); MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); -int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */ +/* + * usecs to sleep while draining the dbfifo + */ +static int dbfifo_drain_delay = 1000; module_param(dbfifo_drain_delay, int, 0644); MODULE_PARM_DESC(dbfifo_drain_delay, "usecs to sleep while draining the dbfifo"); @@ -636,7 +639,7 @@ static void name_msix_vecs(struct adapter *adap) static int request_msix_queue_irqs(struct adapter *adap) { struct sge *s = &adap->sge; - int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; + int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2; err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, adap->msix_info[1].desc, &s->fw_evtq); @@ -644,56 +647,60 @@ static int request_msix_queue_irqs(struct adapter *adap) return err; for_each_ethrxq(s, ethqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, &s->ethrxq[ethqidx].rspq); if (err) goto unwind; - msi++; + msi_index++; } for_each_ofldrxq(s, ofldqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, &s->ofldrxq[ofldqidx].rspq); if (err) goto unwind; - msi++; + msi_index++; } for_each_rdmarxq(s, rdmaqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, &s->rdmarxq[rdmaqidx].rspq); if (err) goto unwind; - msi++; + msi_index++; } return 0; unwind: while (--rdmaqidx >= 0) - free_irq(adap->msix_info[--msi].vec, + free_irq(adap->msix_info[--msi_index].vec, &s->rdmarxq[rdmaqidx].rspq); while (--ofldqidx >= 0) - free_irq(adap->msix_info[--msi].vec, + free_irq(adap->msix_info[--msi_index].vec, &s->ofldrxq[ofldqidx].rspq); while (--ethqidx >= 0) - free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); + free_irq(adap->msix_info[--msi_index].vec, + &s->ethrxq[ethqidx].rspq); free_irq(adap->msix_info[1].vec, &s->fw_evtq); return err; } static void free_msix_queue_irqs(struct adapter *adap) { - int i, msi = 2; + int i, msi_index = 2; struct sge *s = &adap->sge; free_irq(adap->msix_info[1].vec, &s->fw_evtq); for_each_ethrxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); + free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); for_each_ofldrxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); + free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); for_each_rdmarxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); + free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); } /** @@ -2535,9 +2542,8 @@ static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8); if (!ret) { - indices = be64_to_cpu(indices); - *cidx = (indices >> 25) & 0xffff; - *pidx = (indices >> 9) & 0xffff; + *cidx = (be64_to_cpu(indices) >> 25) & 0xffff; + *pidx = (be64_to_cpu(indices) >> 9) & 0xffff; } return ret; } @@ -3634,10 +3640,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) * field selections will fit in the 36-bit budget. */ if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) { - int i, bits = 0; + int j, bits = 0; - for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++) - switch (tp_vlan_pri_map & (1 << i)) { + for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++) + switch (tp_vlan_pri_map & (1 << j)) { case 0: /* compressed filter field not enabled */ break; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 35b81d8b59e9..32eec15fe4c2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -380,9 +380,11 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) { if (dir) - *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i)); + *data++ = (__force __be32) t4_read_reg(adap, + (MEMWIN0_BASE + i)); else - t4_write_reg(adap, (MEMWIN0_BASE + i), *data++); + t4_write_reg(adap, (MEMWIN0_BASE + i), + (__force u32) *data++); } return 0; @@ -408,7 +410,8 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf, int dir) { u32 pos, start, end, offset, memoffset; - int ret; + int ret = 0; + __be32 *data; /* * Argument sanity checks ... @@ -416,6 +419,10 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, if ((addr & 0x3) || (len & 0x3)) return -EINVAL; + data = vmalloc(MEMWIN0_APERTURE); + if (!data) + return -ENOMEM; + /* * Offset into the region of memory which is being accessed * MEM_EDC0 = 0 @@ -438,7 +445,6 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, offset = (addr - start)/sizeof(__be32); for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) { - __be32 data[MEMWIN0_APERTURE/sizeof(__be32)]; /* * If we're writing, copy the data from the caller's memory @@ -452,7 +458,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, if (offset || len < MEMWIN0_APERTURE) { ret = t4_mem_win_rw(adap, pos, data, 1); if (ret) - return ret; + break; } while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && len > 0) { @@ -466,7 +472,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, */ ret = t4_mem_win_rw(adap, pos, data, dir); if (ret) - return ret; + break; /* * If we're reading, copy the data into the caller's memory @@ -480,7 +486,8 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, } } - return 0; + vfree(data); + return ret; } int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, @@ -519,16 +526,21 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) u32 cclk_param, cclk_val; int i, ret; int ec, sn; - u8 vpd[VPD_LEN], csum; + u8 *vpd, csum; unsigned int vpdr_len, kw_offset, id_len; - ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd); + vpd = vmalloc(VPD_LEN); + if (!vpd) + return -ENOMEM; + + ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd); if (ret < 0) - return ret; + goto out; if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { dev_err(adapter->pdev_dev, "missing VPD ID string\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } id_len = pci_vpd_lrdt_size(vpd); @@ -538,21 +550,24 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) { dev_err(adapter->pdev_dev, "missing VPD-R section\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } vpdr_len = pci_vpd_lrdt_size(&vpd[i]); kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; if (vpdr_len + kw_offset > VPD_LEN) { dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); - return -EINVAL; + ret = -EINVAL; + goto out; } #define FIND_VPD_KW(var, name) do { \ var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ if (var < 0) { \ dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ - return -EINVAL; \ + ret = -EINVAL; \ + goto out; \ } \ var += PCI_VPD_INFO_FLD_HDR_SIZE; \ } while (0) @@ -564,7 +579,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) if (csum) { dev_err(adapter->pdev_dev, "corrupted VPD EEPROM, actual csum %u\n", csum); - return -EINVAL; + ret = -EINVAL; + goto out; } FIND_VPD_KW(ec, "EC"); @@ -587,6 +603,9 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); ret = t4_query_params(adapter, adapter->mbox, 0, 0, 1, &cclk_param, &cclk_val); + +out: + vfree(vpd); if (ret) return ret; p->cclk = cclk_val; @@ -727,7 +746,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr, if (ret) return ret; if (byte_oriented) - *data = htonl(*data); + *data = (__force __u32) (htonl(*data)); } return 0; } @@ -975,7 +994,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) int ret, addr; unsigned int i; u8 first_page[SF_PAGE_SIZE]; - const u32 *p = (const u32 *)fw_data; + const __be32 *p = (const __be32 *)fw_data; const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; unsigned int fw_img_start = adap->params.sf_fw_start; @@ -2298,7 +2317,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); for (i = 0; i < len; i += 4) - *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); + *data++ = (__force __be32) t4_read_reg(adap, + (MEMWIN0_BASE + off + i)); return 0; } diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 61cc09342865..77335853ac36 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -661,9 +661,6 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb, new frame, not around filling de->setup_frame. This is non-deterministic when re-entered but still correct. */ -#undef set_bit_le -#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0) - static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) { struct de_private *de = netdev_priv(dev); @@ -673,12 +670,12 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); - set_bit_le(255, hash_table); /* Broadcast entry */ + __set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */ netdev_for_each_mc_addr(ha, dev) { int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; - set_bit_le(index, hash_table); + __set_bit_le(index, hash_table); } for (i = 0; i < 32; i++) { diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 4d6fe604fa64..d23755ea9bc7 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -446,13 +446,17 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, /* Allocate Tx/Rx descriptor memory */ db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); - if (!db->desc_pool_ptr) + if (!db->desc_pool_ptr) { + err = -ENOMEM; goto err_out_res; + } db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); - if (!db->buf_pool_ptr) + if (!db->buf_pool_ptr) { + err = -ENOMEM; goto err_out_free_desc; + } db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; db->first_tx_desc_dma = db->desc_pool_dma_ptr; @@ -462,8 +466,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, db->chip_id = ent->driver_data; /* IO type range. */ db->ioaddr = pci_iomap(pdev, 0, 0); - if (!db->ioaddr) + if (!db->ioaddr) { + err = -ENOMEM; goto err_out_free_buf; + } db->chip_revision = pdev->revision; db->wol_mode = 0; diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c index ed7d1dcd9566..44f7e8e82d85 100644 --- a/drivers/net/ethernet/dec/tulip/eeprom.c +++ b/drivers/net/ethernet/dec/tulip/eeprom.c @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = { {NULL}}; -static const char *block_name[] __devinitdata = { +static const char *const block_name[] __devinitconst = { "21140 non-MII", "21140 MII PHY", "21142 Serial PHY", diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index c4f37aca2269..885700a19978 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1010,9 +1010,6 @@ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) new frame, not around filling tp->setup_frame. This is non-deterministic when re-entered but still correct. */ -#undef set_bit_le -#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0) - static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); @@ -1022,12 +1019,12 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); - set_bit_le(255, hash_table); /* Broadcast entry */ + __set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */ netdev_for_each_mc_addr(ha, dev) { int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; - set_bit_le(index, hash_table); + __set_bit_le(index, hash_table); } for (i = 0; i < 32; i++) { *setup_frm++ = hash_table[i]; diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 4d1ffca83c82..7c1ec4d7920b 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -236,7 +236,7 @@ struct pci_id_info { int drv_flags; /* Driver use, intended as capability flags. */ }; -static const struct pci_id_info pci_id_tbl[] __devinitdata = { +static const struct pci_id_info pci_id_tbl[] __devinitconst = { { /* Sometime a Level-One switch card. */ "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, { "Winbond W89c840", CanHaveMII | HasBrokenTx}, diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index d7bb52a7bda1..3b83588e51f6 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -218,7 +218,7 @@ enum { struct pci_id_info { const char *name; }; -static const struct pci_id_info pci_id_tbl[] __devinitdata = { +static const struct pci_id_info pci_id_tbl[] __devinitconst = { {"D-Link DFE-550TX FAST Ethernet Adapter"}, {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, {"D-Link DFE-580TX 4 port Server Adapter"}, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index eb3f2cb3b93b..d1b6cc587639 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2129,8 +2129,11 @@ void be_detect_error(struct be_adapter *adapter) ue_hi = (ue_hi & ~ue_hi_mask); } - if (ue_lo || ue_hi || - sliport_status & SLIPORT_STATUS_ERR_MASK) { + /* On certain platforms BE hardware can indicate spurious UEs. + * Allow the h/w to stop working completely in case of a real UE. + * Hence not setting the hw_error for UE detection. + */ + if (sliport_status & SLIPORT_STATUS_ERR_MASK) { adapter->hw_error = true; dev_err(&adapter->pdev->dev, "Error detected in the card\n"); diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 9d71c9cc300b..0e4a0ac86aa8 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -150,7 +150,7 @@ struct chip_info { int flags; }; -static const struct chip_info skel_netdrv_tbl[] __devinitdata = { +static const struct chip_info skel_netdrv_tbl[] __devinitconst = { { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index a1b52ec3b930..1d03dcdd5e56 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1765,7 +1765,6 @@ static void free_skb_resources(struct gfar_private *priv) sizeof(struct rxbd8) * priv->total_rx_ring_size, priv->tx_queue[0]->tx_bd_base, priv->tx_queue[0]->tx_bd_dma_base); - skb_queue_purge(&priv->rx_recycle); } void gfar_start(struct net_device *dev) @@ -1943,8 +1942,6 @@ static int gfar_enet_open(struct net_device *dev) enable_napi(priv); - skb_queue_head_init(&priv->rx_recycle); - /* Initialize a bunch of registers */ init_registers(dev); @@ -2533,16 +2530,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) bytes_sent += skb->len; - /* If there's room in the queue (limit it to rx_buffer_size) - * we add this skb back into the pool, if it's the right size - */ - if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && - skb_recycle_check(skb, priv->rx_buffer_size + - RXBUF_ALIGNMENT)) { - gfar_align_skb(skb); - skb_queue_head(&priv->rx_recycle, skb); - } else - dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb); tx_queue->tx_skbuff[skb_dirtytx] = NULL; @@ -2608,7 +2596,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, static struct sk_buff *gfar_alloc_skb(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb = NULL; + struct sk_buff *skb; skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); if (!skb) @@ -2621,14 +2609,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev) struct sk_buff *gfar_new_skb(struct net_device *dev) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb = NULL; - - skb = skb_dequeue(&priv->rx_recycle); - if (!skb) - skb = gfar_alloc_skb(dev); - - return skb; + return gfar_alloc_skb(dev); } static inline void count_errors(unsigned short status, struct net_device *dev) @@ -2787,7 +2768,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) if (unlikely(!newskb)) newskb = skb; else if (skb) - skb_queue_head(&priv->rx_recycle, skb); + dev_kfree_skb(skb); } else { /* Increment the number of packets */ rx_queue->stats.rx_packets++; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 4141ef2ddafc..22eabc13ca99 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1080,8 +1080,6 @@ struct gfar_private { u32 cur_filer_idx; - struct sk_buff_head rx_recycle; - /* RX queue filer rule set*/ struct ethtool_rx_list rx_list; struct mutex rx_queue_access; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 164288439220..0a70bb55d1b0 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -209,14 +209,12 @@ static struct list_head *dequeue(struct list_head *lh) static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 __iomem *bd) { - struct sk_buff *skb = NULL; + struct sk_buff *skb; - skb = __skb_dequeue(&ugeth->rx_recycle); + skb = netdev_alloc_skb(ugeth->ndev, + ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT); if (!skb) - skb = netdev_alloc_skb(ugeth->ndev, - ugeth->ug_info->uf_info.max_rx_buf_length + - UCC_GETH_RX_DATA_BUF_ALIGNMENT); - if (skb == NULL) return NULL; /* We need the data buffer to be aligned properly. We will reserve @@ -2020,8 +2018,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) iounmap(ugeth->ug_regs); ugeth->ug_regs = NULL; } - - skb_queue_purge(&ugeth->rx_recycle); } static void ucc_geth_set_multi(struct net_device *dev) @@ -2230,8 +2226,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) return -ENOMEM; } - skb_queue_head_init(&ugeth->rx_recycle); - return 0; } @@ -3274,12 +3268,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit if (netif_msg_rx_err(ugeth)) ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", __func__, __LINE__, (u32) skb); - if (skb) { - skb->data = skb->head + NET_SKB_PAD; - skb->len = 0; - skb_reset_tail_pointer(skb); - __skb_queue_head(&ugeth->rx_recycle, skb); - } + dev_kfree_skb(skb); ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; dev->stats.rx_dropped++; @@ -3349,13 +3338,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) dev->stats.tx_packets++; - if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && - skb_recycle_check(skb, - ugeth->ug_info->uf_info.max_rx_buf_length + - UCC_GETH_RX_DATA_BUF_ALIGNMENT)) - __skb_queue_head(&ugeth->rx_recycle, skb); - else - dev_kfree_skb(skb); + dev_kfree_skb(skb); ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; ugeth->skb_dirtytx[txQ] = diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index f71b3e7b12de..75f337163ce3 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -1214,8 +1214,6 @@ struct ucc_geth_private { /* index of the first skb which hasn't been transmitted yet. */ u16 skb_dirtytx[NUM_TX_QUEUES]; - struct sk_buff_head rx_recycle; - struct ugeth_mii_info *mii_info; struct phy_device *phydev; phy_interface_t phy_interface; diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h index b8e46cc31e53..6be7b9839f35 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea.h +++ b/drivers/net/ethernet/ibm/ehea/ehea.h @@ -35,7 +35,6 @@ #include <linux/if_vlan.h> #include <asm/ibmebus.h> -#include <asm/abs_addr.h> #include <asm/io.h> #define DRV_NAME "ehea" diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c index 30f903332e92..d3a130ccdcc8 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c @@ -141,7 +141,7 @@ u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category, qp_category, /* R5 */ qp_handle, /* R6 */ sel_mask, /* R7 */ - virt_to_abs(cb_addr), /* R8 */ + __pa(cb_addr), /* R8 */ 0, 0); } @@ -415,7 +415,7 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat, (u64) cat, /* R5 */ qp_handle, /* R6 */ sel_mask, /* R7 */ - virt_to_abs(cb_addr), /* R8 */ + __pa(cb_addr), /* R8 */ 0, 0, 0, 0); /* R9-R12 */ *inv_attr_id = outs[0]; @@ -528,7 +528,7 @@ u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr) { u64 hret, cb_logaddr; - cb_logaddr = virt_to_abs(cb_addr); + cb_logaddr = __pa(cb_addr); hret = ehea_plpar_hcall_norets(H_QUERY_HEA, adapter_handle, /* R4 */ @@ -545,7 +545,7 @@ u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num, void *cb_addr) { u64 port_info; - u64 cb_logaddr = virt_to_abs(cb_addr); + u64 cb_logaddr = __pa(cb_addr); u64 arr_index = 0; port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) @@ -567,7 +567,7 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num, unsigned long outs[PLPAR_HCALL9_BUFSIZE]; u64 port_info; u64 arr_index = 0; - u64 cb_logaddr = virt_to_abs(cb_addr); + u64 cb_logaddr = __pa(cb_addr); port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num); @@ -621,6 +621,6 @@ u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle, return ehea_plpar_hcall_norets(H_ERROR_DATA, adapter_handle, /* R4 */ ressource_handle, /* R5 */ - virt_to_abs(rblock), /* R6 */ + __pa(rblock), /* R6 */ 0, 0, 0, 0); /* R7-R12 */ } diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c index cb66f574dc97..27f881758d16 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -163,7 +163,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, goto out_kill_hwq; } - rpage = virt_to_abs(vpage); + rpage = __pa(vpage); hret = ehea_h_register_rpage(adapter->handle, 0, EHEA_CQ_REGISTER_ORIG, cq->fw_handle, rpage, 1); @@ -290,7 +290,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, goto out_kill_hwq; } - rpage = virt_to_abs(vpage); + rpage = __pa(vpage); hret = ehea_h_register_rpage(adapter->handle, 0, EHEA_EQ_REGISTER_ORIG, @@ -395,7 +395,7 @@ static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, pr_err("hw_qpageit_get_inc failed\n"); goto out_kill_hwq; } - rpage = virt_to_abs(vpage); + rpage = __pa(vpage); hret = ehea_h_register_rpage(adapter->handle, 0, h_call_q_selector, qp->fw_handle, rpage, 1); @@ -790,7 +790,7 @@ u64 ehea_map_vaddr(void *caddr) if (!ehea_bmap) return EHEA_INVAL_ADDR; - index = virt_to_abs(caddr) >> SECTION_SIZE_BITS; + index = __pa(caddr) >> SECTION_SIZE_BITS; top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK; if (!ehea_bmap->top[top]) return EHEA_INVAL_ADDR; @@ -812,7 +812,7 @@ static inline void *ehea_calc_sectbase(int top, int dir, int idx) unsigned long ret = idx; ret |= dir << EHEA_DIR_INDEX_SHIFT; ret |= top << EHEA_TOP_INDEX_SHIFT; - return abs_to_virt(ret << SECTION_SIZE_BITS); + return __va(ret << SECTION_SIZE_BITS); } static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, @@ -822,7 +822,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, void *pg; u64 j, m, hret; unsigned long k = 0; - u64 pt_abs = virt_to_abs(pt); + u64 pt_abs = __pa(pt); void *sectbase = ehea_calc_sectbase(top, dir, idx); @@ -830,7 +830,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, for (m = 0; m < EHEA_MAX_RPAGE; m++) { pg = sectbase + ((k++) * EHEA_PAGESIZE); - pt[m] = virt_to_abs(pg); + pt[m] = __pa(pg); } hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, 0, pt_abs, EHEA_MAX_RPAGE); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index cb3356c9af80..04668b47a1df 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -175,13 +175,13 @@ struct e1000_info; /* * in the case of WTHRESH, it appears at least the 82571/2 hardware * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when - * WTHRESH=4, and since we want 64 bytes at a time written back, set - * it to 5 + * WTHRESH=4, so a setting of 5 gives the most efficient bus + * utilization but to avoid possible Tx stalls, set it to 1 */ #define E1000_TXDCTL_DMA_BURST_ENABLE \ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ E1000_TXDCTL_COUNT_DESC | \ - (5 << 16) | /* wthresh must be +1 more than desired */\ + (1 << 16) | /* wthresh must be +1 more than desired */\ (1 << 8) | /* hthresh */ \ 0x1f) /* pthresh */ diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index ed5b40985edb..d37bfd96c987 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -412,6 +412,8 @@ enum e1e_registers { #define E1000_DEV_ID_PCH2_LV_V 0x1503 #define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A #define E1000_DEV_ID_PCH_LPT_I217_V 0x153B +#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A +#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fb659dd8db03..f444eb0b76d8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2831,7 +2831,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) * set up some performance related parameters to encourage the * hardware to use the bus more efficiently in bursts, depends * on the tx_int_delay to be enabled, - * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time + * wthresh = 1 ==> burst write is disabled to avoid Tx stalls * hthresh = 1 ==> prefetch when one or more available * pthresh = 0x1f ==> prefetch if internal cache 31 or less * BEWARE: this seems to work but should be considered first if @@ -6558,6 +6558,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5bd26763554c..30efc9f0f47a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -410,7 +410,7 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) #define IXGBE_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) -#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ #ifdef IXGBE_FCOE /* Use 3K as the baby jumbo frame size for FCoE */ #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 18bf08c9d7a4..1077cb2b38db 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1099,7 +1099,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; - udelay(10); + usleep_range(1000, 2000); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 90e41db3cb69..dbf37e4a45fd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -70,6 +70,7 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: return 0; case IXGBE_DEV_ID_82599_T3_LOM: return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 4104ea25d818..56b20d17d0e4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2690,10 +2690,7 @@ static int ixgbe_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_SOME); + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); break; #endif /* CONFIG_IXGBE_PTP */ default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 868af6938219..fa3d552e1f4a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -114,6 +114,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, /* required last entry */ {0, } }; @@ -2322,6 +2323,12 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, default: break; } + +#ifdef CONFIG_IXGBE_PTP + if (adapter->hw.mac.type == ixgbe_mac_X540) + mask |= IXGBE_EIMS_TIMESYNC; +#endif + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) mask |= IXGBE_EIMS_FLOW_DIR; @@ -2385,8 +2392,10 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) } ixgbe_check_fan_failure(adapter, eicr); + #ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_check_pps_event(adapter, eicr); + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) + ixgbe_ptp_check_pps_event(adapter, eicr); #endif /* re-enable the original interrupt state, no lsc, no queues */ @@ -2580,7 +2589,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); #ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_check_pps_event(adapter, eicr); + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) + ixgbe_ptp_check_pps_event(adapter, eicr); #endif /* would disable interrupts here but EIAM disabled it */ @@ -7045,6 +7055,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, is_wol_supported = 1; break; case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: /* check eeprom to see if enabled wol */ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 39881cb17a4b..d9291316ee9f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -106,6 +106,83 @@ static struct sock_filter ptp_filter[] = { }; /** + * ixgbe_ptp_setup_sdp + * @hw: the hardware private structure + * + * this function enables or disables the clock out feature on SDP0 for + * the X540 device. It will create a 1second periodic output that can + * be used as the PPS (via an interrupt). + * + * It calculates when the systime will be on an exact second, and then + * aligns the start of the PPS signal to that value. The shift is + * necessary because it can change based on the link speed. + */ +static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int shift = adapter->cc.shift; + u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; + u64 ns = 0, clock_edge = 0; + + if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) && + (hw->mac.type == ixgbe_mac_X540)) { + + /* disable the pin first */ + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + IXGBE_WRITE_FLUSH(hw); + + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* + * enable the SDP0 pin as output, and connected to the + * native function for Timesync (ClockOut) + */ + esdp |= (IXGBE_ESDP_SDP0_DIR | + IXGBE_ESDP_SDP0_NATIVE); + + /* + * enable the Clock Out feature on SDP0, and allow + * interrupts to occur when the pin changes + */ + tsauxc = (IXGBE_TSAUXC_EN_CLK | + IXGBE_TSAUXC_SYNCLK | + IXGBE_TSAUXC_SDP0_INT); + + /* clock period (or pulse length) */ + clktiml = (u32)(NSECS_PER_SEC << shift); + clktimh = (u32)((NSECS_PER_SEC << shift) >> 32); + + /* + * Account for the cyclecounter wrap-around value by + * using the converted ns value of the current time to + * check for when the next aligned second would occur. + */ + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + ns = timecounter_cyc2time(&adapter->tc, clock_edge); + + div_u64_rem(ns, NSECS_PER_SEC, &rem); + clock_edge += ((NSECS_PER_SEC - (u64)rem) << shift); + + /* specify the initial clock start time */ + trgttiml = (u32)clock_edge; + trgttimh = (u32)(clock_edge >> 32); + + IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); + + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + } else { + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + } + + IXGBE_WRITE_FLUSH(hw); +} + +/** * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) * @cc: the cyclecounter structure * @@ -198,6 +275,9 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) now); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + ixgbe_ptp_setup_sdp(adapter); + return 0; } @@ -251,6 +331,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, timecounter_init(&adapter->tc, &adapter->cc, ns); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + ixgbe_ptp_setup_sdp(adapter); return 0; } @@ -281,8 +362,9 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp, if (on) adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; else - adapter->flags2 &= - ~IXGBE_FLAG2_PTP_PPS_ENABLED; + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; + + ixgbe_ptp_setup_sdp(adapter); return 0; default: break; @@ -305,109 +387,15 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) struct ixgbe_hw *hw = &adapter->hw; struct ptp_clock_event event; - event.type = PTP_CLOCK_PPS; - - /* Make sure ptp clock is valid, and PPS event enabled */ - if (!adapter->ptp_clock || - !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) - return; - - if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) { - switch (hw->mac.type) { - case ixgbe_mac_X540: - ptp_clock_event(adapter->ptp_clock, &event); - break; - default: - break; - } - } -} - -/** - * ixgbe_ptp_enable_sdp - * @hw: the hardware private structure - * @shift: the clock shift for calculating nanoseconds - * - * this function enables the clock out feature on the sdp0 for the - * X540 device. It will create a 1second periodic output that can be - * used as the PPS (via an interrupt). - * - * It calculates when the systime will be on an exact second, and then - * aligns the start of the PPS signal to that value. The shift is - * necessary because it can change based on the link speed. - */ -static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift) -{ - u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh; - u64 clock_edge = 0; - u32 rem; - switch (hw->mac.type) { case ixgbe_mac_X540: - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - - /* - * enable the SDP0 pin as output, and connected to the native - * function for Timesync (ClockOut) - */ - esdp |= (IXGBE_ESDP_SDP0_DIR | - IXGBE_ESDP_SDP0_NATIVE); - - /* - * enable the Clock Out feature on SDP0, and allow interrupts - * to occur when the pin changes - */ - tsauxc = (IXGBE_TSAUXC_EN_CLK | - IXGBE_TSAUXC_SYNCLK | - IXGBE_TSAUXC_SDP0_INT); - - /* clock period (or pulse length) */ - clktiml = (u32)(NSECS_PER_SEC << shift); - clktimh = (u32)((NSECS_PER_SEC << shift) >> 32); - - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; - - /* - * account for the fact that we can't do u64 division - * with remainder, by converting the clock values into - * nanoseconds first - */ - clock_edge >>= shift; - div_u64_rem(clock_edge, NSECS_PER_SEC, &rem); - clock_edge += (NSECS_PER_SEC - rem); - clock_edge <<= shift; - - /* specify the initial clock start time */ - trgttiml = (u32)clock_edge; - trgttimh = (u32)(clock_edge >> 32); - - IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); - IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); - - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); - - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_TIMESYNC); + ptp_clock_event(adapter->ptp_clock, &event); break; default: break; } } -/** - * ixgbe_ptp_disable_sdp - * @hw: the private hardware structure - * - * this function disables the auxiliary SDP clock out feature - */ -static void ixgbe_ptp_disable_sdp(struct ixgbe_hw *hw) -{ - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_TIMESYNC); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0); -} /** * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow @@ -822,9 +810,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) if (adapter->cycle_speed == cycle_speed && timinca) return; - /* disable the SDP clock out */ - ixgbe_ptp_disable_sdp(hw); - /** * Scale the NIC cycle counter by a large factor so that * relatively small corrections to the frequency can be added @@ -877,10 +862,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); IXGBE_WRITE_FLUSH(hw); - /* now that the shift has been calculated and the systime - * registers reset, (re-)enable the Clock out feature*/ - ixgbe_ptp_enable_sdp(hw, shift); - /* store the new cycle speed */ adapter->cycle_speed = cycle_speed; @@ -901,6 +882,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) ktime_to_ns(ktime_get_real())); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + /* + * Now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature + */ + ixgbe_ptp_setup_sdp(adapter); } /** @@ -979,10 +966,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) */ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) { - ixgbe_ptp_disable_sdp(&adapter->hw); - /* stop the overflow check task */ - adapter->flags2 &= ~IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; + adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED | + IXGBE_FLAG2_PTP_PPS_ENABLED); + + ixgbe_ptp_setup_sdp(adapter); if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 400f86a31174..0722f3368092 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -65,6 +65,7 @@ #define IXGBE_DEV_ID_82599_LS 0x154F #define IXGBE_DEV_ID_X540T 0x1528 #define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_X540T1 0x1560 /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 383b4e1cd175..4a9c9c285685 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -175,7 +175,7 @@ struct ixgbevf_q_vector { #define IXGBEVF_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) -#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ #define OTHER_VECTOR 1 #define NON_Q_VECTORS (OTHER_VECTOR) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 0ee9bd4819f4..de1ad506665d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1747,6 +1747,7 @@ err_tx_ring_allocation: **/ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) { + struct net_device *netdev = adapter->netdev; int err = 0; int vector, v_budget; @@ -1775,6 +1776,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) ixgbevf_acquire_msix_vectors(adapter, v_budget); + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto out; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + out: return err; } diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index c911d883c27e..f8064df10cc4 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> +#include <linux/pci-aspm.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -2973,6 +2974,9 @@ jme_init_one(struct pci_dev *pdev, /* * set up PCI device basics */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + rc = pci_enable_device(pdev); if (rc) { pr_err("Cannot enable PCI device\n"); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 087b9e0669f1..84c13263c514 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -412,7 +412,6 @@ struct mv643xx_eth_private { u8 work_rx_refill; int skb_size; - struct sk_buff_head rx_recycle; /* * RX state. @@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget) struct rx_desc *rx_desc; int size; - skb = __skb_dequeue(&mp->rx_recycle); - if (skb == NULL) - skb = netdev_alloc_skb(mp->dev, mp->skb_size); + skb = netdev_alloc_skb(mp->dev, mp->skb_size); if (skb == NULL) { mp->oom = 1; @@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) desc->byte_cnt, DMA_TO_DEVICE); } - if (skb != NULL) { - if (skb_queue_len(&mp->rx_recycle) < - mp->rx_ring_size && - skb_recycle_check(skb, mp->skb_size)) - __skb_queue_head(&mp->rx_recycle, skb); - else - dev_kfree_skb(skb); - } + dev_kfree_skb(skb); } __netif_tx_unlock(nq); @@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev) napi_enable(&mp->napi); - skb_queue_head_init(&mp->rx_recycle); - mp->int_mask = INT_EXT; for (i = 0; i < mp->rxq_count; i++) { @@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev) mib_counters_update(mp); del_timer_sync(&mp->mib_counters_timer); - skb_queue_purge(&mp->rx_recycle); - for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); for (i = 0; i < mp->txq_count; i++) diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 5a30bf823099..9b9c2ac5c4c2 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3189,7 +3189,7 @@ static int skge_poll(struct napi_struct *napi, int to_do) if (work_done < to_do) { unsigned long flags; - napi_gro_flush(napi); + napi_gro_flush(napi, false); spin_lock_irqsave(&hw->hw_lock, flags); __napi_complete(napi); hw->intr_mask |= napimask[skge->port]; @@ -3945,8 +3945,10 @@ static int __devinit skge_probe(struct pci_dev *pdev, skge_board_name(hw), hw->chip_rev); dev = skge_devinit(hw, 0, using_dac); - if (!dev) + if (!dev) { + err = -ENOMEM; goto err_out_led_off; + } /* Some motherboards are broken and has zero in ROM. */ if (!is_valid_ether_addr(dev->dev_addr)) @@ -4153,6 +4155,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = { DMI_MATCH(DMI_BOARD_NAME, "nForce"), }, }, + { + .ident = "ASUS P5NSLI", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") + }, + }, {} }; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 2b0748dba8b8..78946feab4a2 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4924,6 +4924,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, if (~reg == 0) { dev_err(&pdev->dev, "PCI configuration read error\n"); + err = -EIO; goto err_out; } @@ -4993,8 +4994,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev, hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), &hw->st_dma); - if (!hw->st_le) + if (!hw->st_le) { + err = -ENOMEM; goto err_out_reset; + } dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index ba6506ff4abb..926c911c0ac4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3094,6 +3094,8 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, if (validate_eth_header_mac(slave, rule_header, rlist)) return -EINVAL; break; + case MLX4_NET_TRANS_RULE_ID_IB: + break; case MLX4_NET_TRANS_RULE_ID_IPV4: case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_UDP: diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 5b61d12f8b91..dbaaa99a0d43 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -947,8 +947,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, i = register_netdev(dev); if (i) goto err_register_netdev; - - if (NATSEMI_CREATE_FILE(pdev, dspcfg_workaround)) + i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround); + if (i) goto err_create_file; if (netif_msg_drv(np)) { diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index e01c0a07a93a..7dfe88398d7d 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -205,6 +205,7 @@ static int __init sonic_probe1(struct net_device *dev) if (lp->descriptors == NULL) { printk(KERN_ERR "%s: couldn't alloc DMA memory for " " descriptors.\n", dev_name(lp->device)); + err = -ENOMEM; goto out; } diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index a688a2ddcfd6..f97719c48516 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -3,13 +3,14 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2009 Cavium Networks + * Copyright (C) 2009-2012 Cavium, Inc */ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/capability.h> +#include <linux/net_tstamp.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/spinlock.h> @@ -33,8 +34,7 @@ #define OCTEON_MGMT_NAPI_WEIGHT 16 -/* - * Ring sizes that are powers of two allow for more efficient modulo +/* Ring sizes that are powers of two allow for more efficient modulo * opertions. */ #define OCTEON_MGMT_RX_RING_SIZE 512 @@ -93,6 +93,7 @@ union mgmt_port_ring_entry { #define AGL_GMX_RX_ADR_CAM4 0x1a0 #define AGL_GMX_RX_ADR_CAM5 0x1a8 +#define AGL_GMX_TX_CLK 0x208 #define AGL_GMX_TX_STATS_CTL 0x268 #define AGL_GMX_TX_CTL 0x270 #define AGL_GMX_TX_STAT0 0x280 @@ -110,8 +111,10 @@ struct octeon_mgmt { struct net_device *netdev; u64 mix; u64 agl; + u64 agl_prt_ctl; int port; int irq; + bool has_rx_tstamp; u64 *tx_ring; dma_addr_t tx_ring_handle; unsigned int tx_next; @@ -131,6 +134,7 @@ struct octeon_mgmt { spinlock_t lock; unsigned int last_duplex; unsigned int last_link; + unsigned int last_speed; struct device *dev; struct napi_struct napi; struct tasklet_struct tx_clean_tasklet; @@ -140,6 +144,8 @@ struct octeon_mgmt { resource_size_t mix_size; resource_size_t agl_phys; resource_size_t agl_size; + resource_size_t agl_prt_ctl_phys; + resource_size_t agl_prt_ctl_size; }; static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) @@ -166,22 +172,22 @@ static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) spin_unlock_irqrestore(&p->lock, flags); } -static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) +static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) { octeon_mgmt_set_rx_irq(p, 1); } -static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) +static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) { octeon_mgmt_set_rx_irq(p, 0); } -static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) +static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) { octeon_mgmt_set_tx_irq(p, 1); } -static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) +static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) { octeon_mgmt_set_tx_irq(p, 0); } @@ -233,6 +239,28 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) } } +static ktime_t ptp_to_ktime(u64 ptptime) +{ + ktime_t ktimebase; + u64 ptpbase; + unsigned long flags; + + local_irq_save(flags); + /* Fill the icache with the code */ + ktime_get_real(); + /* Flush all pending operations */ + mb(); + /* Read the time and PTP clock as close together as + * possible. It is important that this sequence take the same + * amount of time to reduce jitter + */ + ktimebase = ktime_get_real(); + ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI); + local_irq_restore(flags); + + return ktime_sub_ns(ktimebase, ptpbase - ptptime); +} + static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) { union cvmx_mixx_orcnt mix_orcnt; @@ -272,6 +300,20 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) dma_unmap_single(p->dev, re.s.addr, re.s.len, DMA_TO_DEVICE); + + /* Read the hardware TX timestamp if one was recorded */ + if (unlikely(re.s.tstamp)) { + struct skb_shared_hwtstamps ts; + /* Read the timestamp */ + u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); + /* Remove the timestamp from the FIFO */ + cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); + /* Tell the kernel about the timestamp */ + ts.syststamp = ptp_to_ktime(ns); + ts.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &ts); + } + dev_kfree_skb_any(skb); cleaned++; @@ -372,14 +414,23 @@ static int octeon_mgmt_receive_one(struct octeon_mgmt *p) /* A good packet, send it up. */ skb_put(skb, re.s.len); good: + /* Process the RX timestamp if it was recorded */ + if (p->has_rx_tstamp) { + /* The first 8 bytes are the timestamp */ + u64 ns = *(u64 *)skb->data; + struct skb_shared_hwtstamps *ts; + ts = skb_hwtstamps(skb); + ts->hwtstamp = ns_to_ktime(ns); + ts->syststamp = ptp_to_ktime(ns); + __skb_pull(skb, 8); + } skb->protocol = eth_type_trans(skb, netdev); netdev->stats.rx_packets++; netdev->stats.rx_bytes += skb->len; netif_receive_skb(skb); rc = 0; } else if (re.s.code == RING_ENTRY_CODE_MORE) { - /* - * Packet split across skbs. This can happen if we + /* Packet split across skbs. This can happen if we * increase the MTU. Buffers that are already in the * rx ring can then end up being too small. As the rx * ring is refilled, buffers sized for the new MTU @@ -409,8 +460,7 @@ good: } else { /* Some other error, discard it. */ dev_kfree_skb_any(skb); - /* - * Error statistics are accumulated in + /* Error statistics are accumulated in * octeon_mgmt_update_rx_stats. */ } @@ -488,7 +538,7 @@ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) mix_ctl.s.reset = 1; cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); cvmx_read_csr(p->mix + MIX_CTL); - cvmx_wait(64); + octeon_io_clk_delay(64); mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); if (mix_bist.u64) @@ -537,8 +587,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) cam_mode = 0; available_cam_entries = 8; } else { - /* - * One CAM entry for the primary address, leaves seven + /* One CAM entry for the primary address, leaves seven * for the secondary addresses. */ available_cam_entries = 7 - netdev->uc.count; @@ -595,12 +644,10 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) { - struct sockaddr *sa = addr; + int r = eth_mac_addr(netdev, addr); - if (!is_valid_ether_addr(sa->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); + if (r) + return r; octeon_mgmt_set_rx_filtering(netdev); @@ -612,8 +659,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) struct octeon_mgmt *p = netdev_priv(netdev); int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; - /* - * Limit the MTU to make sure the ethernet packets are between + /* Limit the MTU to make sure the ethernet packets are between * 64 bytes and 16383 bytes. */ if (size_without_fcs < 64 || size_without_fcs > 16383) { @@ -656,53 +702,258 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) return IRQ_HANDLED; } +static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, + struct ifreq *rq, int cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + struct hwtstamp_config config; + union cvmx_mio_ptp_clock_cfg ptp; + union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; + bool have_hw_timestamps = false; + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) /* reserved for future extensions */ + return -EINVAL; + + /* Check the status of hardware for tiemstamps */ + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + /* Get the current state of the PTP clock */ + ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); + if (!ptp.s.ext_clk_en) { + /* The clock has not been configured to use an + * external source. Program it to use the main clock + * reference. + */ + u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); + if (!ptp.s.ptp_en) + cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); + pr_info("PTP Clock: Using sclk reference at %lld Hz\n", + (NSEC_PER_SEC << 32) / clock_comp); + } else { + /* The clock is already programmed to use a GPIO */ + u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP); + pr_info("PTP Clock: Using GPIO %d at %lld Hz\n", + ptp.s.ext_clk_in, + (NSEC_PER_SEC << 32) / clock_comp); + } + + /* Enable the clock if it wasn't done already */ + if (!ptp.s.ptp_en) { + ptp.s.ptp_en = 1; + cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64); + } + have_hw_timestamps = true; + } + + if (!have_hw_timestamps) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + p->has_rx_tstamp = false; + rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); + rxx_frm_ctl.s.ptp_mode = 0; + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + p->has_rx_tstamp = have_hw_timestamps; + config.rx_filter = HWTSTAMP_FILTER_ALL; + if (p->has_rx_tstamp) { + rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); + rxx_frm_ctl.s.ptp_mode = 1; + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); + } + break; + default: + return -ERANGE; + } + + if (copy_to_user(rq->ifr_data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + static int octeon_mgmt_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct octeon_mgmt *p = netdev_priv(netdev); - if (!netif_running(netdev)) + switch (cmd) { + case SIOCSHWTSTAMP: + return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); + default: + if (p->phydev) + return phy_mii_ioctl(p->phydev, rq, cmd); return -EINVAL; + } +} - if (!p->phydev) - return -EINVAL; +static void octeon_mgmt_disable_link(struct octeon_mgmt *p) +{ + union cvmx_agl_gmx_prtx_cfg prtx_cfg; - return phy_mii_ioctl(p->phydev, rq, cmd); + /* Disable GMX before we make any changes. */ + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + prtx_cfg.s.en = 0; + prtx_cfg.s.tx_en = 0; + prtx_cfg.s.rx_en = 0; + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + int i; + for (i = 0; i < 10; i++) { + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1) + break; + mdelay(1); + i++; + } + } +} + +static void octeon_mgmt_enable_link(struct octeon_mgmt *p) +{ + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + + /* Restore the GMX enable state only if link is set */ + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + prtx_cfg.s.tx_en = 1; + prtx_cfg.s.rx_en = 1; + prtx_cfg.s.en = 1; + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); +} + +static void octeon_mgmt_update_link(struct octeon_mgmt *p) +{ + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + + if (!p->phydev->link) + prtx_cfg.s.duplex = 1; + else + prtx_cfg.s.duplex = p->phydev->duplex; + + switch (p->phydev->speed) { + case 10: + prtx_cfg.s.speed = 0; + prtx_cfg.s.slottime = 0; + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + prtx_cfg.s.burst = 1; + prtx_cfg.s.speed_msb = 1; + } + break; + case 100: + prtx_cfg.s.speed = 0; + prtx_cfg.s.slottime = 0; + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + prtx_cfg.s.burst = 1; + prtx_cfg.s.speed_msb = 0; + } + break; + case 1000: + /* 1000 MBits is only supported on 6XXX chips */ + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + prtx_cfg.s.speed = 1; + prtx_cfg.s.speed_msb = 0; + /* Only matters for half-duplex */ + prtx_cfg.s.slottime = 1; + prtx_cfg.s.burst = p->phydev->duplex; + } + break; + case 0: /* No link */ + default: + break; + } + + /* Write the new GMX setting with the port still disabled. */ + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); + + /* Read GMX CFG again to make sure the config is completed. */ + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + union cvmx_agl_gmx_txx_clk agl_clk; + union cvmx_agl_prtx_ctl prtx_ctl; + + prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); + /* MII (both speeds) and RGMII 1000 speed. */ + agl_clk.s.clk_cnt = 1; + if (prtx_ctl.s.mode == 0) { /* RGMII mode */ + if (p->phydev->speed == 10) + agl_clk.s.clk_cnt = 50; + else if (p->phydev->speed == 100) + agl_clk.s.clk_cnt = 5; + } + cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); + } } static void octeon_mgmt_adjust_link(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - union cvmx_agl_gmx_prtx_cfg prtx_cfg; unsigned long flags; int link_changed = 0; + if (!p->phydev) + return; + spin_lock_irqsave(&p->lock, flags); - if (p->phydev->link) { - if (!p->last_link) - link_changed = 1; - if (p->last_duplex != p->phydev->duplex) { - p->last_duplex = p->phydev->duplex; - prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - prtx_cfg.s.duplex = p->phydev->duplex; - cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); - } - } else { - if (p->last_link) - link_changed = -1; + + + if (!p->phydev->link && p->last_link) + link_changed = -1; + + if (p->phydev->link + && (p->last_duplex != p->phydev->duplex + || p->last_link != p->phydev->link + || p->last_speed != p->phydev->speed)) { + octeon_mgmt_disable_link(p); + link_changed = 1; + octeon_mgmt_update_link(p); + octeon_mgmt_enable_link(p); } + p->last_link = p->phydev->link; + p->last_speed = p->phydev->speed; + p->last_duplex = p->phydev->duplex; + spin_unlock_irqrestore(&p->lock, flags); if (link_changed != 0) { if (link_changed > 0) { - netif_carrier_on(netdev); pr_info("%s: Link is up - %d/%s\n", netdev->name, p->phydev->speed, DUPLEX_FULL == p->phydev->duplex ? "Full" : "Half"); } else { - netif_carrier_off(netdev); pr_info("%s: Link is down\n", netdev->name); } } @@ -723,9 +974,7 @@ static int octeon_mgmt_init_phy(struct net_device *netdev) PHY_INTERFACE_MODE_MII); if (!p->phydev) - return -1; - - phy_start_aneg(p->phydev); + return -ENODEV; return 0; } @@ -733,12 +982,10 @@ static int octeon_mgmt_init_phy(struct net_device *netdev) static int octeon_mgmt_open(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; union cvmx_mixx_ctl mix_ctl; union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; union cvmx_mixx_oring1 oring1; union cvmx_mixx_iring1 iring1; - union cvmx_agl_gmx_prtx_cfg prtx_cfg; union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; union cvmx_mixx_irhwm mix_irhwm; union cvmx_mixx_orhwm mix_orhwm; @@ -785,9 +1032,30 @@ static int octeon_mgmt_open(struct net_device *netdev) } while (mix_ctl.s.reset); } - agl_gmx_inf_mode.u64 = 0; - agl_gmx_inf_mode.s.en = 1; - cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); + if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { + agl_gmx_inf_mode.u64 = 0; + agl_gmx_inf_mode.s.en = 1; + cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); + } + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) + || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + /* Force compensation values, as they are not + * determined properly by HW + */ + union cvmx_agl_gmx_drv_ctl drv_ctl; + + drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); + if (p->port) { + drv_ctl.s.byp_en1 = 1; + drv_ctl.s.nctl1 = 6; + drv_ctl.s.pctl1 = 6; + } else { + drv_ctl.s.byp_en = 1; + drv_ctl.s.nctl = 6; + drv_ctl.s.pctl = 6; + } + cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); + } oring1.u64 = 0; oring1.s.obase = p->tx_ring_handle >> 3; @@ -799,18 +1067,12 @@ static int octeon_mgmt_open(struct net_device *netdev) iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); - /* Disable packet I/O. */ - prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - prtx_cfg.s.en = 0; - cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); - memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); octeon_mgmt_set_mac_address(netdev, &sa); octeon_mgmt_change_mtu(netdev, netdev->mtu); - /* - * Enable the port HW. Packets are not allowed until + /* Enable the port HW. Packets are not allowed until * cvmx_mgmt_port_enable() is called. */ mix_ctl.u64 = 0; @@ -819,27 +1081,70 @@ static int octeon_mgmt_open(struct net_device *netdev) mix_ctl.s.nbtarb = 0; /* Arbitration mode */ /* MII CB-request FIFO programmable high watermark */ mix_ctl.s.mrq_hwm = 1; +#ifdef __LITTLE_ENDIAN + mix_ctl.s.lendian = 1; +#endif cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); - if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) - || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { - /* - * Force compensation values, as they are not - * determined properly by HW - */ - union cvmx_agl_gmx_drv_ctl drv_ctl; + /* Read the PHY to find the mode of the interface. */ + if (octeon_mgmt_init_phy(netdev)) { + dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); + goto err_noirq; + } - drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); - if (port) { - drv_ctl.s.byp_en1 = 1; - drv_ctl.s.nctl1 = 6; - drv_ctl.s.pctl1 = 6; - } else { - drv_ctl.s.byp_en = 1; - drv_ctl.s.nctl = 6; - drv_ctl.s.pctl = 6; + /* Set the mode of the interface, RGMII/MII. */ + if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) { + union cvmx_agl_prtx_ctl agl_prtx_ctl; + int rgmii_mode = (p->phydev->supported & + (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; + + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + + /* MII clocks counts are based on the 125Mhz + * reference, which has an 8nS period. So our delays + * need to be multiplied by this factor. + */ +#define NS_PER_PHY_CLK 8 + + /* Take the DLL and clock tree out of reset */ + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_prtx_ctl.s.clkrst = 0; + if (rgmii_mode) { + agl_prtx_ctl.s.dllrst = 0; + agl_prtx_ctl.s.clktx_byp = 0; } - cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ + + /* Wait for the DLL to lock. External 125 MHz + * reference clock must be stable at this point. + */ + ndelay(256 * NS_PER_PHY_CLK); + + /* Enable the interface */ + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_prtx_ctl.s.enable = 1; + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + + /* Read the value back to force the previous write */ + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + + /* Enable the compensation controller */ + agl_prtx_ctl.s.comp = 1; + agl_prtx_ctl.s.drv_byp = 0; + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + /* Force write out before wait. */ + cvmx_read_csr(p->agl_prt_ctl); + + /* For compensation state to lock. */ + ndelay(1040 * NS_PER_PHY_CLK); + + /* Some Ethernet switches cannot handle standard + * Interframe Gap, increase to 16 bytes. + */ + cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88); } octeon_mgmt_rx_fill_ring(netdev); @@ -870,7 +1175,7 @@ static int octeon_mgmt_open(struct net_device *netdev) /* Interrupt when we have 1 or more packets to clean. */ mix_orhwm.u64 = 0; - mix_orhwm.s.orhwm = 1; + mix_orhwm.s.orhwm = 0; cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); /* Enable receive and transmit interrupts */ @@ -879,13 +1184,12 @@ static int octeon_mgmt_open(struct net_device *netdev) mix_intena.s.othena = 1; cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); - /* Enable packet I/O. */ rxx_frm_ctl.u64 = 0; + rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; rxx_frm_ctl.s.pre_align = 1; - /* - * When set, disables the length check for non-min sized pkts + /* When set, disables the length check for non-min sized pkts * with padding in the client data. */ rxx_frm_ctl.s.pad_len = 1; @@ -903,33 +1207,26 @@ static int octeon_mgmt_open(struct net_device *netdev) rxx_frm_ctl.s.ctl_drp = 1; /* Strip off the preamble */ rxx_frm_ctl.s.pre_strp = 1; - /* - * This port is configured to send PREAMBLE+SFD to begin every + /* This port is configured to send PREAMBLE+SFD to begin every * frame. GMX checks that the PREAMBLE is sent correctly. */ rxx_frm_ctl.s.pre_chk = 1; cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); - /* Enable the AGL block */ - agl_gmx_inf_mode.u64 = 0; - agl_gmx_inf_mode.s.en = 1; - cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); - - /* Configure the port duplex and enables */ - prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - prtx_cfg.s.tx_en = 1; - prtx_cfg.s.rx_en = 1; - prtx_cfg.s.en = 1; - p->last_duplex = 1; - prtx_cfg.s.duplex = p->last_duplex; - cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); + /* Configure the port duplex, speed and enables */ + octeon_mgmt_disable_link(p); + if (p->phydev) + octeon_mgmt_update_link(p); + octeon_mgmt_enable_link(p); p->last_link = 0; - netif_carrier_off(netdev); - - if (octeon_mgmt_init_phy(netdev)) { - dev_err(p->dev, "Cannot initialize PHY.\n"); - goto err_noirq; + p->last_speed = 0; + /* PHY is not present in simulator. The carrier is enabled + * while initializing the phy for simulator, leave it enabled. + */ + if (p->phydev) { + netif_carrier_off(netdev); + phy_start_aneg(p->phydev); } netif_wake_queue(netdev); @@ -959,6 +1256,7 @@ static int octeon_mgmt_stop(struct net_device *netdev) if (p->phydev) phy_disconnect(p->phydev); + p->phydev = NULL; netif_carrier_off(netdev); @@ -991,6 +1289,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) int rv = NETDEV_TX_BUSY; re.d64 = 0; + re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); re.s.len = skb->len; re.s.addr = dma_map_single(p->dev, skb->data, skb->len, @@ -1031,6 +1330,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) /* Ring the bell. */ cvmx_write_csr(p->mix + MIX_ORING2, 1); + netdev->trans_start = jiffies; rv = NETDEV_TX_OK; out: octeon_mgmt_update_tx_stats(netdev); @@ -1068,7 +1368,7 @@ static int octeon_mgmt_get_settings(struct net_device *netdev, if (p->phydev) return phy_ethtool_gset(p->phydev, cmd); - return -EINVAL; + return -EOPNOTSUPP; } static int octeon_mgmt_set_settings(struct net_device *netdev, @@ -1082,23 +1382,37 @@ static int octeon_mgmt_set_settings(struct net_device *netdev, if (p->phydev) return phy_ethtool_sset(p->phydev, cmd); - return -EINVAL; + return -EOPNOTSUPP; +} + +static int octeon_mgmt_nway_reset(struct net_device *dev) +{ + struct octeon_mgmt *p = netdev_priv(dev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (p->phydev) + return phy_start_aneg(p->phydev); + + return -EOPNOTSUPP; } static const struct ethtool_ops octeon_mgmt_ethtool_ops = { .get_drvinfo = octeon_mgmt_get_drvinfo, - .get_link = ethtool_op_get_link, .get_settings = octeon_mgmt_get_settings, - .set_settings = octeon_mgmt_set_settings + .set_settings = octeon_mgmt_set_settings, + .nway_reset = octeon_mgmt_nway_reset, + .get_link = ethtool_op_get_link, }; static const struct net_device_ops octeon_mgmt_ops = { .ndo_open = octeon_mgmt_open, .ndo_stop = octeon_mgmt_stop, .ndo_start_xmit = octeon_mgmt_xmit, - .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, + .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, .ndo_set_mac_address = octeon_mgmt_set_mac_address, - .ndo_do_ioctl = octeon_mgmt_ioctl, + .ndo_do_ioctl = octeon_mgmt_ioctl, .ndo_change_mtu = octeon_mgmt_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = octeon_mgmt_poll_controller, @@ -1113,6 +1427,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev) const u8 *mac; struct resource *res_mix; struct resource *res_agl; + struct resource *res_agl_prt_ctl; int len; int result; @@ -1120,6 +1435,8 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev) if (netdev == NULL) return -ENOMEM; + SET_NETDEV_DEV(netdev, &pdev->dev); + dev_set_drvdata(&pdev->dev, netdev); p = netdev_priv(netdev); netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, @@ -1127,6 +1444,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev) p->netdev = netdev; p->dev = &pdev->dev; + p->has_rx_tstamp = false; data = of_get_property(pdev->dev.of_node, "cell-index", &len); if (data && len == sizeof(*data)) { @@ -1159,10 +1477,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev) goto err; } + res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3); + if (res_agl_prt_ctl == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + p->mix_phys = res_mix->start; p->mix_size = resource_size(res_mix); p->agl_phys = res_agl->start; p->agl_size = resource_size(res_agl); + p->agl_prt_ctl_phys = res_agl_prt_ctl->start; + p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, @@ -1181,10 +1508,18 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev) goto err; } + if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, + p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { + result = -ENXIO; + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_agl_prt_ctl->name); + goto err; + } p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); - + p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, + p->agl_prt_ctl_size); spin_lock_init(&p->lock); skb_queue_head_init(&p->tx_list); @@ -1199,14 +1534,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev) mac = of_get_mac_address(pdev->dev.of_node); - if (mac) - memcpy(netdev->dev_addr, mac, 6); + if (mac && is_valid_ether_addr(mac)) { + memcpy(netdev->dev_addr, mac, ETH_ALEN); + netdev->addr_assign_type &= ~NET_ADDR_RANDOM; + } else { + eth_hw_addr_random(netdev); + } p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + netif_carrier_off(netdev); result = register_netdev(netdev); if (result) goto err; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index bce01641ee6b..5296cc8d3cba 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig @@ -26,7 +26,10 @@ if PCH_GBE config PCH_PTP bool "PCH PTP clock support" default n - depends on PTP_1588_CLOCK_PCH + depends on EXPERIMENTAL + select PPS + select PTP_1588_CLOCK + select PTP_1588_CLOCK_PCH ---help--- Say Y here if you want to use Precision Time Protocol (PTP) in the driver. PTP is a method to precisely synchronize distributed clocks diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index b528e52a8ee1..2a0c9dc48eb3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -38,7 +38,7 @@ static inline void writeq(u64 val, void __iomem *addr) } #endif -static const struct crb_128M_2M_block_map +static struct crb_128M_2M_block_map crb_128M_2M_map[64] __cacheline_aligned_in_smp = { {{{0, 0, 0, 0} } }, /* 0: PCI */ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 473ce134ca63..24ad17ec7fcd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1601,7 +1601,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->netdev = netdev; adapter->pdev = pdev; - if (qlcnic_alloc_adapter_resources(adapter)) + err = qlcnic_alloc_adapter_resources(adapter); + if (err) goto err_out_free_netdev; adapter->dev_rst_time = jiffies; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 995d0cfc4c06..1c818254b7be 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -563,7 +563,7 @@ rx_next: if (cpr16(IntrStatus) & cp_rx_intr_mask) goto rx_status_loop; - napi_gro_flush(napi); + napi_gro_flush(napi, false); spin_lock_irqsave(&cp->lock, flags); __napi_complete(napi); cpw16_f(IntrMask, cp_intr_mask); diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 1d83565cc6af..3ed7add23c12 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -228,7 +228,7 @@ typedef enum { static const struct { const char *name; u32 hw_flags; -} board_info[] __devinitdata = { +} board_info[] __devinitconst = { { "RealTek RTL8139", RTL8139_CAPS }, { "RealTek RTL8129", RTL8129_CAPS }, }; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index bad8f2eec9b4..c8bfea0524dd 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2438,6 +2438,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!rtsu) { dev_err(&pdev->dev, "Not found TSU resource\n"); + ret = -ENODEV; goto out_release; } mdp->tsu_addr = ioremap(rtsu->start, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 96bd980e828d..4f86d0cd516a 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2019,14 +2019,14 @@ static void efx_set_rx_mode(struct net_device *net_dev) netdev_for_each_mc_addr(ha, net_dev) { crc = ether_crc_le(ETH_ALEN, ha->addr); bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); - set_bit_le(bit, mc_hash->byte); + __set_bit_le(bit, mc_hash); } /* Broadcast packets go through the multicast hash filter. * ether_crc_le() of the broadcast address is 0xbe2612ff * so we always add bit 0xff to the mask. */ - set_bit_le(0xff, mc_hash->byte); + __set_bit_le(0xff, mc_hash); } if (efx->port_enabled) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index c1a010cda89b..576a31091165 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1101,18 +1101,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, return &rx_queue->buffer[index]; } -/* Set bit in a little-endian bitfield */ -static inline void set_bit_le(unsigned nr, unsigned char *addr) -{ - addr[nr / 8] |= (1 << (nr % 8)); -} - -/* Clear bit in a little-endian bitfield */ -static inline void clear_bit_le(unsigned nr, unsigned char *addr) -{ - addr[nr / 8] &= ~(1 << (nr % 8)); -} - /** * EFX_MAX_FRAME_LEN - calculate maximum frame length diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index cdff40b65729..aab7cacb2e34 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -472,9 +472,9 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) - clear_bit_le(tx_queue->queue, (void *)®); + __clear_bit_le(tx_queue->queue, ®); else - set_bit_le(tx_queue->queue, (void *)®); + __set_bit_le(tx_queue->queue, ®); efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); } diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 5b3dd028ce85..0767043f44a4 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -640,8 +640,7 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) evt = list_entry(cursor, struct efx_ptp_event_rx, link); if (time_after(jiffies, evt->expiry)) { - list_del(&evt->link); - list_add(&evt->link, &ptp->evt_free_list); + list_move(&evt->link, &ptp->evt_free_list); netif_warn(efx, hw, efx->net_dev, "PTP rx event dropped\n"); } @@ -684,8 +683,7 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, match->state = PTP_PACKET_STATE_MATCHED; rc = PTP_PACKET_STATE_MATCHED; - list_del(&evt->link); - list_add(&evt->link, &ptp->evt_free_list); + list_move(&evt->link, &ptp->evt_free_list); break; } } @@ -820,8 +818,7 @@ static int efx_ptp_stop(struct efx_nic *efx) /* Drop any pending receive events */ spin_lock_bh(&efx->ptp_data->evt_lock); list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { - list_del(cursor); - list_add(cursor, &efx->ptp_data->evt_free_list); + list_move(cursor, &efx->ptp_data->evt_free_list); } spin_unlock_bh(&efx->ptp_data->evt_lock); diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 4613591b43e7..d8166012b7d4 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1618,7 +1618,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, struct net_device *dev) { - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 }; + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 }; struct sis190_private *tp = netdev_priv(dev); struct pci_dev *isa_bridge; u8 reg, tmp8; diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 203d9c6ec23a..fb9f6b38511f 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -478,8 +478,10 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, /* IO region. */ ioaddr = pci_iomap(pci_dev, 0, 0); - if (!ioaddr) + if (!ioaddr) { + ret = -ENOMEM; goto err_out_cleardev; + } sis_priv = netdev_priv(net_dev); sis_priv->ioaddr = ioaddr; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index e872e1da3137..7d51a65ab099 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -50,7 +50,6 @@ struct stmmac_priv { unsigned int dirty_rx; struct sk_buff **rx_skbuff; dma_addr_t *rx_skbuff_dma; - struct sk_buff_head rx_recycle; struct net_device *dev; dma_addr_t dma_rx_phy; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3be88331d17a..c6cdbc4eb05e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv) priv->hw->ring->clean_desc3(p); if (likely(skb != NULL)) { - /* - * If there's room in the queue (limit it to size) - * we add this skb back into the pool, - * if it's the right size. - */ - if ((skb_queue_len(&priv->rx_recycle) < - priv->dma_rx_size) && - skb_recycle_check(skb, priv->dma_buf_sz)) - __skb_queue_head(&priv->rx_recycle, skb); - else - dev_kfree_skb(skb); - + dev_kfree_skb(skb); priv->tx_skbuff[entry] = NULL; } @@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev) priv->eee_enabled = stmmac_eee_init(priv); napi_enable(&priv->napi); - skb_queue_head_init(&priv->rx_recycle); netif_start_queue(dev); return 0; @@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev) kfree(priv->tm); #endif napi_disable(&priv->napi); - skb_queue_purge(&priv->rx_recycle); /* Free the IRQ lines */ free_irq(dev->irq, dev); @@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) if (likely(priv->rx_skbuff[entry] == NULL)) { struct sk_buff *skb; - skb = __skb_dequeue(&priv->rx_recycle); - if (skb == NULL) - skb = netdev_alloc_skb_ip_align(priv->dev, - bfsize); + skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); if (unlikely(skb == NULL)) break; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 8419bf385e08..275b430aeb75 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9788,6 +9788,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev, if (!pci_is_pcie(pdev)) { dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); + err = -ENODEV; goto err_out_free_res; } diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 9ae12d0c9632..6c8695ec7cb9 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2963,7 +2963,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev, goto err_out_iounmap; } - if (gem_get_device_address(gp)) + err = gem_get_device_address(gp); + if (err) goto err_out_free_consistent; dev->netdev_ops = &gem_netdev_ops; diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index d15c888e9df8..49956730cd8d 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -863,6 +863,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) next_dma = desc_read(desc, hw_next); chan->head = desc_from_phys(pool, next_dma); + chan->count--; chan->stats.teardown_dequeue++; /* issue callback without locks held */ diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 64783a0d545a..1450e33fc250 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -811,9 +811,9 @@ static struct tty_ldisc_ops sp_ldisc = { /* Initialize 6pack control device -- register 6pack line discipline */ -static const char msg_banner[] __initdata = KERN_INFO \ +static const char msg_banner[] __initconst = KERN_INFO \ "AX.25: 6pack driver, " SIXPACK_VERSION "\n"; -static const char msg_regfail[] __initdata = KERN_ERR \ +static const char msg_regfail[] __initconst = KERN_ERR \ "6pack: can't register line discipline (err = %d)\n"; static int __init sixpack_init_driver(void) @@ -829,7 +829,7 @@ static int __init sixpack_init_driver(void) return status; } -static const char msg_unregfail[] __exitdata = KERN_ERR \ +static const char msg_unregfail[] = KERN_ERR \ "6pack: can't unregister line discipline (err = %d)\n"; static void __exit sixpack_exit_driver(void) diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 76d54774ba82..c2e5497397d5 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -87,7 +87,7 @@ #include <linux/bpqether.h> -static const char banner[] __initdata = KERN_INFO \ +static const char banner[] __initconst = KERN_INFO \ "AX.25: bpqether driver version 004\n"; static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF}; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 2c0894a92abd..8e01c457015b 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -997,9 +997,9 @@ static struct tty_ldisc_ops ax_ldisc = { .write_wakeup = mkiss_write_wakeup }; -static const char banner[] __initdata = KERN_INFO \ +static const char banner[] __initconst = KERN_INFO \ "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n"; -static const char msg_regfail[] __initdata = KERN_ERR \ +static const char msg_regfail[] __initconst = KERN_ERR \ "mkiss: can't register line discipline (err = %d)\n"; static int __init mkiss_init_driver(void) @@ -1015,7 +1015,7 @@ static int __init mkiss_init_driver(void) return status; } -static const char msg_unregfail[] __exitdata = KERN_ERR \ +static const char msg_unregfail[] = KERN_ERR \ "mkiss: can't unregister line discipline (err = %d)\n"; static void __exit mkiss_exit_driver(void) diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index efc6c97163a7..1b4a47bd32b7 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -182,7 +182,7 @@ #include "z8530.h" -static const char banner[] __initdata = KERN_INFO \ +static const char banner[] __initconst = KERN_INFO \ "AX.25: Z8530 SCC driver version "VERSION".dl1bke\n"; static void t_dwait(unsigned long); diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 5a6412ecce73..c6645f1017af 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -76,7 +76,7 @@ /* --------------------------------------------------------------------- */ static const char yam_drvname[] = "yam"; -static const char yam_drvinfo[] __initdata = KERN_INFO \ +static const char yam_drvinfo[] __initconst = KERN_INFO \ "YAM driver version 0.8 by F1OAT/F6FBB\n"; /* --------------------------------------------------------------------- */ diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index 30087ca23a0f..6e4d4b62c9a8 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c @@ -459,8 +459,10 @@ static int irtty_open(struct tty_struct *tty) /* allocate private device info block */ priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) + if (!priv) { + ret = -ENOMEM; goto out_put; + } priv->magic = IRTTY_MAGIC; priv->tty = tty; diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 1a00b5990cb8..f07c340990da 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -920,8 +920,10 @@ static int mcs_probe(struct usb_interface *intf, ndev->netdev_ops = &mcs_netdev_ops; - if (!intf->cur_altsetting) + if (!intf->cur_altsetting) { + ret = -ENOMEM; goto error2; + } ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint, intf->cur_altsetting->desc.bNumEndpoints); diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 002a442bf73f..858de05bdb7d 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -846,8 +846,10 @@ static int pxa_irda_probe(struct platform_device *pdev) goto err_mem_2; dev = alloc_irdadev(sizeof(struct pxa_irda)); - if (!dev) + if (!dev) { + err = -ENOMEM; goto err_mem_3; + } SET_NETDEV_DEV(dev, &pdev->dev); si = netdev_priv(dev); diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index e25067552b20..42fde9ed23e1 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -940,8 +940,10 @@ static int sa1100_irda_probe(struct platform_device *pdev) goto err_mem_3; dev = alloc_irdadev(sizeof(struct sa1100_irda)); - if (!dev) + if (!dev) { + err = -ENOMEM; goto err_mem_4; + } SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c index eb315b8d07a3..4b746d9bd8e7 100644 --- a/drivers/net/irda/sh_irda.c +++ b/drivers/net/irda/sh_irda.c @@ -808,8 +808,8 @@ static int __devinit sh_irda_probe(struct platform_device *pdev) goto err_mem_4; platform_set_drvdata(pdev, ndev); - - if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) { + err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self); + if (err) { dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n"); goto err_mem_4; } diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index 795109425568..624ac1939e85 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -741,6 +741,7 @@ static int __devinit sh_sir_probe(struct platform_device *pdev) self->clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(self->clk)) { dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + err = -ENODEV; goto err_mem_3; } @@ -760,8 +761,8 @@ static int __devinit sh_sir_probe(struct platform_device *pdev) goto err_mem_4; platform_set_drvdata(pdev, ndev); - - if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) { + err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self); + if (err) { dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n"); goto err_mem_4; } diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 170eb411ab5d..c1ef3000ea60 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -26,6 +26,7 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/of_device.h> +#include <linux/of_mdio.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 91d25888a1b9..d8b9b1e8ee02 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -26,7 +26,7 @@ #include <linux/ethtool.h> #define DRV_NAME "rionet" -#define DRV_VERSION "0.2" +#define DRV_VERSION "0.3" #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>" #define DRV_DESC "Ethernet over RapidIO" @@ -47,8 +47,7 @@ MODULE_LICENSE("GPL"); #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE - -static LIST_HEAD(rionet_peers); +#define RIONET_MAX_NETS 8 struct rionet_private { struct rio_mport *mport; @@ -69,16 +68,14 @@ struct rionet_peer { struct resource *res; }; -static int rionet_check = 0; -static int rionet_capable = 1; +struct rionet_net { + struct net_device *ndev; + struct list_head peers; + struct rio_dev **active; + int nact; /* number of active peers */ +}; -/* - * This is a fast lookup table for translating TX - * Ethernet packets into a destination RIO device. It - * could be made into a hash table to save memory depending - * on system trade-offs. - */ -static struct rio_dev **rionet_active; +static struct rionet_net nets[RIONET_MAX_NETS]; #define is_rionet_capable(src_ops, dst_ops) \ ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ @@ -175,6 +172,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct ethhdr *eth = (struct ethhdr *)skb->data; u16 destid; unsigned long flags; + int add_num = 1; local_irq_save(flags); if (!spin_trylock(&rnet->tx_lock)) { @@ -182,7 +180,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_LOCKED; } - if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) { + if (is_multicast_ether_addr(eth->h_dest)) + add_num = nets[rnet->mport->id].nact; + + if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) { netif_stop_queue(ndev); spin_unlock_irqrestore(&rnet->tx_lock, flags); printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", @@ -191,15 +192,22 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } if (is_multicast_ether_addr(eth->h_dest)) { + int count = 0; + for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size); i++) - if (rionet_active[i]) + if (nets[rnet->mport->id].active[i]) { rionet_queue_tx_msg(skb, ndev, - rionet_active[i]); + nets[rnet->mport->id].active[i]); + if (count) + atomic_inc(&skb->users); + count++; + } } else if (RIONET_MAC_MATCH(eth->h_dest)) { destid = RIONET_GET_DESTID(eth->h_dest); - if (rionet_active[destid]) - rionet_queue_tx_msg(skb, ndev, rionet_active[destid]); + if (nets[rnet->mport->id].active[destid]) + rionet_queue_tx_msg(skb, ndev, + nets[rnet->mport->id].active[destid]); } spin_unlock_irqrestore(&rnet->tx_lock, flags); @@ -218,16 +226,21 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x", DRV_NAME, sid, tid, info); if (info == RIONET_DOORBELL_JOIN) { - if (!rionet_active[sid]) { - list_for_each_entry(peer, &rionet_peers, node) { - if (peer->rdev->destid == sid) - rionet_active[sid] = peer->rdev; + if (!nets[rnet->mport->id].active[sid]) { + list_for_each_entry(peer, + &nets[rnet->mport->id].peers, node) { + if (peer->rdev->destid == sid) { + nets[rnet->mport->id].active[sid] = + peer->rdev; + nets[rnet->mport->id].nact++; + } } rio_mport_send_doorbell(mport, sid, RIONET_DOORBELL_JOIN); } } else if (info == RIONET_DOORBELL_LEAVE) { - rionet_active[sid] = NULL; + nets[rnet->mport->id].active[sid] = NULL; + nets[rnet->mport->id].nact--; } else { if (netif_msg_intr(rnet)) printk(KERN_WARNING "%s: unhandled doorbell\n", @@ -321,7 +334,8 @@ static int rionet_open(struct net_device *ndev) netif_carrier_on(ndev); netif_start_queue(ndev); - list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { + list_for_each_entry_safe(peer, tmp, + &nets[rnet->mport->id].peers, node) { if (!(peer->res = rio_request_outb_dbell(peer->rdev, RIONET_DOORBELL_JOIN, RIONET_DOORBELL_LEAVE))) @@ -346,7 +360,7 @@ static int rionet_close(struct net_device *ndev) int i; if (netif_msg_ifup(rnet)) - printk(KERN_INFO "%s: close\n", DRV_NAME); + printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name); netif_stop_queue(ndev); netif_carrier_off(ndev); @@ -354,10 +368,11 @@ static int rionet_close(struct net_device *ndev) for (i = 0; i < RIONET_RX_RING_SIZE; i++) kfree_skb(rnet->rx_skb[i]); - list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { - if (rionet_active[peer->rdev->destid]) { + list_for_each_entry_safe(peer, tmp, + &nets[rnet->mport->id].peers, node) { + if (nets[rnet->mport->id].active[peer->rdev->destid]) { rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); - rionet_active[peer->rdev->destid] = NULL; + nets[rnet->mport->id].active[peer->rdev->destid] = NULL; } rio_release_outb_dbell(peer->rdev, peer->res); } @@ -373,17 +388,21 @@ static int rionet_close(struct net_device *ndev) static void rionet_remove(struct rio_dev *rdev) { struct net_device *ndev = rio_get_drvdata(rdev); + unsigned char netid = rdev->net->hport->id; struct rionet_peer *peer, *tmp; - free_pages((unsigned long)rionet_active, get_order(sizeof(void *) * - RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size))); unregister_netdev(ndev); - free_netdev(ndev); - list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { + free_pages((unsigned long)nets[netid].active, get_order(sizeof(void *) * + RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size))); + nets[netid].active = NULL; + + list_for_each_entry_safe(peer, tmp, &nets[netid].peers, node) { list_del(&peer->node); kfree(peer); } + + free_netdev(ndev); } static void rionet_get_drvinfo(struct net_device *ndev, @@ -435,13 +454,13 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) const size_t rionet_active_bytes = sizeof(void *) * RIO_MAX_ROUTE_ENTRIES(mport->sys_size); - rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, - get_order(rionet_active_bytes)); - if (!rionet_active) { + nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, + get_order(rionet_active_bytes)); + if (!nets[mport->id].active) { rc = -ENOMEM; goto out; } - memset((void *)rionet_active, 0, rionet_active_bytes); + memset((void *)nets[mport->id].active, 0, rionet_active_bytes); /* Set up private area */ rnet = netdev_priv(ndev); @@ -470,60 +489,62 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) if (rc != 0) goto out; - printk("%s: %s %s Version %s, MAC %pM\n", + printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n", ndev->name, DRV_NAME, DRV_DESC, DRV_VERSION, - ndev->dev_addr); + ndev->dev_addr, + mport->name); out: return rc; } -/* - * XXX Make multi-net safe - */ +static unsigned long net_table[RIONET_MAX_NETS/sizeof(unsigned long) + 1]; + static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) { int rc = -ENODEV; u32 lsrc_ops, ldst_ops; struct rionet_peer *peer; struct net_device *ndev = NULL; + unsigned char netid = rdev->net->hport->id; + int oldnet; - /* If local device is not rionet capable, give up quickly */ - if (!rionet_capable) - goto out; + if (netid >= RIONET_MAX_NETS) + return rc; - /* Allocate our net_device structure */ - ndev = alloc_etherdev(sizeof(struct rionet_private)); - if (ndev == NULL) { - rc = -ENOMEM; - goto out; - } + oldnet = test_and_set_bit(netid, net_table); /* * First time through, make sure local device is rionet - * capable, setup netdev, and set flags so this is skipped - * on later probes + * capable, setup netdev (will be skipped on later probes) */ - if (!rionet_check) { + if (!oldnet) { rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, &lsrc_ops); rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, &ldst_ops); if (!is_rionet_capable(lsrc_ops, ldst_ops)) { printk(KERN_ERR - "%s: local device is not network capable\n", - DRV_NAME); - rionet_check = 1; - rionet_capable = 0; + "%s: local device %s is not network capable\n", + DRV_NAME, rdev->net->hport->name); goto out; } + /* Allocate our net_device structure */ + ndev = alloc_etherdev(sizeof(struct rionet_private)); + if (ndev == NULL) { + rc = -ENOMEM; + goto out; + } + nets[netid].ndev = ndev; rc = rionet_setup_netdev(rdev->net->hport, ndev); - rionet_check = 1; - } + INIT_LIST_HEAD(&nets[netid].peers); + nets[netid].nact = 0; + } else if (nets[netid].ndev == NULL) + goto out; /* * If the remote device has mailbox/doorbell capabilities, @@ -535,10 +556,10 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) goto out; } peer->rdev = rdev; - list_add_tail(&peer->node, &rionet_peers); + list_add_tail(&peer->node, &nets[netid].peers); } - rio_set_drvdata(rdev, ndev); + rio_set_drvdata(rdev, nets[netid].ndev); out: return rc; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 5c7547c4f802..d44cca327588 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1315,6 +1315,7 @@ static const struct team_option team_options[] = { static struct lock_class_key team_netdev_xmit_lock_key; static struct lock_class_key team_netdev_addr_lock_key; +static struct lock_class_key team_tx_busylock_key; static void team_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, @@ -1327,6 +1328,7 @@ static void team_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL); + dev->qdisc_tx_busylock = &team_tx_busylock_key; } static int team_init(struct net_device *dev) diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index 434d5af8e6fb..c81e278629ff 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c @@ -244,8 +244,12 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) * - suspend: peripheral ready to suspend * - response: suggest N millisec polling * - response complete: suggest N sec polling + * + * Suspend is reported and maybe heeded. */ case 2: /* Suspend hint */ + usbnet_device_suggests_idle(dev); + continue; case 3: /* Response hint */ case 4: /* Response complete hint */ continue; diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index c75e11e1b385..afb117c16d2d 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -424,7 +424,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth, netdev_dbg(kaweth->net, "Downloading firmware at %p to kaweth device at %p\n", - fw->data, kaweth); + kaweth->firmware_buf, kaweth); netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len); return kaweth_control(kaweth, diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 03c2d8d653df..cc7e72010ac3 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -117,6 +117,7 @@ enum { struct mcs7830_data { u8 multi_filter[8]; u8 config; + u8 link_counter; }; static const char driver_name[] = "MOSCHIP usb-ethernet driver"; @@ -632,20 +633,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) static void mcs7830_status(struct usbnet *dev, struct urb *urb) { u8 *buf = urb->transfer_buffer; - bool link; + bool link, link_changed; + struct mcs7830_data *data = mcs7830_get_data(dev); if (urb->actual_length < 16) return; link = !(buf[1] & 0x20); - if (netif_carrier_ok(dev->net) != link) { - if (link) { - netif_carrier_on(dev->net); - usbnet_defer_kevent(dev, EVENT_LINK_RESET); - } else - netif_carrier_off(dev->net); - netdev_dbg(dev->net, "Link Status is: %d\n", link); - } + link_changed = netif_carrier_ok(dev->net) != link; + if (link_changed) { + data->link_counter++; + /* + track link state 20 times to guard against erroneous + link state changes reported sometimes by the chip + */ + if (data->link_counter > 20) { + data->link_counter = 0; + if (link) { + netif_carrier_on(dev->net); + usbnet_defer_kevent(dev, EVENT_LINK_RESET); + } else + netif_carrier_off(dev->net); + netdev_dbg(dev->net, "Link Status is: %d\n", link); + } + } else + data->link_counter = 0; } static const struct driver_info moschip_info = { diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index fc9f578a1e25..f9819d10b1f9 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1588,10 +1588,27 @@ int usbnet_resume (struct usb_interface *intf) tasklet_schedule (&dev->bh); } } + + if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) + usb_autopm_get_interface_no_resume(intf); + return 0; } EXPORT_SYMBOL_GPL(usbnet_resume); +/* + * Either a subdriver implements manage_power, then it is assumed to always + * be ready to be suspended or it reports the readiness to be suspended + * explicitly + */ +void usbnet_device_suggests_idle(struct usbnet *dev) +{ + if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) { + dev->intf->needs_remote_wakeup = 1; + usb_autopm_put_interface_async(dev->intf); + } +} +EXPORT_SYMBOL(usbnet_device_suggests_idle); /*-------------------------------------------------------------------------*/ diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 51de9edb55f5..607976c00162 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -28,7 +28,6 @@ #include <linux/igmp.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> -#include <linux/version.h> #include <linux/hash.h> #include <net/ip.h> #include <net/icmp.h> @@ -107,6 +106,8 @@ struct vxlan_dev { __be32 gaddr; /* multicast group */ __be32 saddr; /* source address */ unsigned int link; /* link to multicast over */ + __u16 port_min; /* source port range */ + __u16 port_max; __u8 tos; /* TOS override */ __u8 ttl; bool learn; @@ -229,9 +230,9 @@ static u32 eth_hash(const unsigned char *addr) /* only want 6 bytes */ #ifdef __BIG_ENDIAN - value <<= 16; -#else value >>= 16; +#else + value <<= 16; #endif return hash_64(value, FDB_HASH_BITS); } @@ -536,7 +537,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) } __skb_pull(skb, sizeof(struct vxlanhdr)); - skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr)); /* Is this VNI defined? */ vni = ntohl(vxh->vx_vni) >> 8; @@ -555,7 +555,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) /* Re-examine inner Ethernet packet */ oip = ip_hdr(skb); skb->protocol = eth_type_trans(skb, vxlan->dev); - skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); /* Ignore packet loops (and multicast echo) */ if (compare_ether_addr(eth_hdr(skb)->h_source, @@ -567,6 +566,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) __skb_tunnel_rx(skb, vxlan->dev); skb_reset_network_header(skb); + skb->ip_summed = CHECKSUM_NONE; err = IP_ECN_decapsulate(oip, skb); if (unlikely(err)) { @@ -622,46 +622,89 @@ static inline u8 vxlan_ecn_encap(u8 tos, return INET_ECN_encapsulate(tos, inner); } +static __be32 vxlan_find_dst(struct vxlan_dev *vxlan, struct sk_buff *skb) +{ + const struct ethhdr *eth = (struct ethhdr *) skb->data; + const struct vxlan_fdb *f; + + if (is_multicast_ether_addr(eth->h_dest)) + return vxlan->gaddr; + + f = vxlan_find_mac(vxlan, eth->h_dest); + if (f) + return f->remote_ip; + else + return vxlan->gaddr; + +} + +static void vxlan_sock_free(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +/* On transmit, associate with the tunnel socket */ +static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb) +{ + struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); + struct sock *sk = vn->sock->sk; + + skb_orphan(skb); + sock_hold(sk); + skb->sk = sk; + skb->destructor = vxlan_sock_free; +} + +/* Compute source port for outgoing packet + * first choice to use L4 flow hash since it will spread + * better and maybe available from hardware + * secondary choice is to use jhash on the Ethernet header + */ +static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb) +{ + unsigned int range = (vxlan->port_max - vxlan->port_min) + 1; + u32 hash; + + hash = skb_get_rxhash(skb); + if (!hash) + hash = jhash(skb->data, 2 * ETH_ALEN, + (__force u32) skb->protocol); + + return (((u64) hash * range) >> 32) + vxlan->port_min; +} + /* Transmit local packets over Vxlan * * Outer IP header inherits ECN and DF from inner header. * Outer UDP destination is the VXLAN assigned port. - * source port is based on hash of flow if available - * otherwise use a random value + * source port is based on hash of flow */ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); struct rtable *rt; - const struct ethhdr *eth; const struct iphdr *old_iph; struct iphdr *iph; struct vxlanhdr *vxh; struct udphdr *uh; struct flowi4 fl4; - struct vxlan_fdb *f; unsigned int pkt_len = skb->len; - u32 hash; __be32 dst; + __u16 src_port; __be16 df = 0; __u8 tos, ttl; int err; + dst = vxlan_find_dst(vxlan, skb); + if (!dst) + goto drop; + /* Need space for new headers (invalidates iph ptr) */ if (skb_cow_head(skb, VXLAN_HEADROOM)) goto drop; - eth = (void *)skb->data; old_iph = ip_hdr(skb); - if (!is_multicast_ether_addr(eth->h_dest) && - (f = vxlan_find_mac(vxlan, eth->h_dest))) - dst = f->remote_ip; - else if (vxlan->gaddr) { - dst = vxlan->gaddr; - } else - goto drop; - ttl = vxlan->ttl; if (!ttl && IN_MULTICAST(ntohl(dst))) ttl = 1; @@ -670,11 +713,15 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) if (tos == 1) tos = vxlan_get_dsfield(old_iph, skb); - hash = skb_get_rxhash(skb); + src_port = vxlan_src_port(vxlan, skb); + + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_oif = vxlan->link; + fl4.flowi4_tos = RT_TOS(tos); + fl4.daddr = dst; + fl4.saddr = vxlan->saddr; - rt = ip_route_output_gre(dev_net(dev), &fl4, dst, - vxlan->saddr, vxlan->vni, - RT_TOS(tos), vxlan->link); + rt = ip_route_output_key(dev_net(dev), &fl4); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &dst); dev->stats.tx_carrier_errors++; @@ -703,7 +750,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) uh = udp_hdr(skb); uh->dest = htons(vxlan_port); - uh->source = hash ? :random32(); + uh->source = htons(src_port); uh->len = htons(skb->len); uh->check = 0; @@ -716,10 +763,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) iph->frag_off = df; iph->protocol = IPPROTO_UDP; iph->tos = vxlan_ecn_encap(tos, old_iph, skb); - iph->daddr = fl4.daddr; + iph->daddr = dst; iph->saddr = fl4.saddr; iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); + vxlan_set_owner(dev, skb); + /* See __IPTUNNEL_XMIT */ skb->ip_summed = CHECKSUM_NONE; ip_select_ident(iph, &rt->dst, NULL); @@ -929,9 +978,11 @@ static void vxlan_setup(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); unsigned h; + int low, high; eth_hw_addr_random(dev); ether_setup(dev); + dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; dev->netdev_ops = &vxlan_netdev_ops; dev->destructor = vxlan_free; @@ -948,6 +999,10 @@ static void vxlan_setup(struct net_device *dev) vxlan->age_timer.function = vxlan_cleanup; vxlan->age_timer.data = (unsigned long) vxlan; + inet_get_local_port_range(&low, &high); + vxlan->port_min = low; + vxlan->port_max = high; + vxlan->dev = dev; for (h = 0; h < FDB_HASH_SIZE; ++h) @@ -964,6 +1019,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, + [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -996,6 +1052,18 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) return -EADDRNOTAVAIL; } } + + if (data[IFLA_VXLAN_PORT_RANGE]) { + const struct ifla_vxlan_port_range *p + = nla_data(data[IFLA_VXLAN_PORT_RANGE]); + + if (ntohs(p->high) < ntohs(p->low)) { + pr_debug("port range %u .. %u not valid\n", + ntohs(p->low), ntohs(p->high)); + return -EINVAL; + } + } + return 0; } @@ -1022,14 +1090,18 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, if (data[IFLA_VXLAN_LOCAL]) vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); - if (data[IFLA_VXLAN_LINK]) { - vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]); + if (data[IFLA_VXLAN_LINK] && + (vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) { + struct net_device *lowerdev + = __dev_get_by_index(net, vxlan->link); - if (!tb[IFLA_MTU]) { - struct net_device *lowerdev; - lowerdev = __dev_get_by_index(net, vxlan->link); - dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; + if (!lowerdev) { + pr_info("ifindex %d does not exist\n", vxlan->link); + return -ENODEV; } + + if (!tb[IFLA_MTU]) + dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; } if (data[IFLA_VXLAN_TOS]) @@ -1046,6 +1118,13 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, if (data[IFLA_VXLAN_LIMIT]) vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); + if (data[IFLA_VXLAN_PORT_RANGE]) { + const struct ifla_vxlan_port_range *p + = nla_data(data[IFLA_VXLAN_PORT_RANGE]); + vxlan->port_min = ntohs(p->low); + vxlan->port_max = ntohs(p->high); + } + err = register_netdevice(dev); if (!err) hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni)); @@ -1074,23 +1153,28 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ + nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 0; } static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { const struct vxlan_dev *vxlan = netdev_priv(dev); + struct ifla_vxlan_port_range ports = { + .low = htons(vxlan->port_min), + .high = htons(vxlan->port_max), + }; if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni)) goto nla_put_failure; - if (vxlan->gaddr && nla_put_u32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr)) + if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr)) goto nla_put_failure; if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link)) goto nla_put_failure; - if (vxlan->saddr && nla_put_u32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr)) + if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || @@ -1100,6 +1184,9 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax)) goto nla_put_failure; + if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) + goto nla_put_failure; + return 0; nla_put_failure: diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 1a623183cbe5..b6271325f803 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -597,7 +597,7 @@ fst_q_work_item(u64 * queue, int card_index) * bottom half for the card. Note the limitation of 64 cards. * That ought to be enough */ - mask = 1 << card_index; + mask = (u64)1 << card_index; *queue |= mask; spin_unlock_irqrestore(&fst_work_q_lock, flags); } diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 0e5769061702..feacc3b994b7 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -1775,7 +1775,7 @@ EXPORT_SYMBOL(z8530_queue_xmit); /* * Module support */ -static const char banner[] __initdata = +static const char banner[] __initconst = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n"; static int __init z85230_init_driver(void) diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 9fd6d9a9942e..9f31cfa56cc0 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1804,7 +1804,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { int ret; struct ath5k_hw *ah = hw->priv; - struct ath5k_vif *avf = (void *)vif->drv_priv; + struct ath5k_vif *avf; struct sk_buff *skb; if (WARN_ON(!vif)) { @@ -1819,6 +1819,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) goto out; } + avf = (void *)vif->drv_priv; ath5k_txbuf_free_skb(ah, avf->bbuf); avf->bbuf->skb = skb; ret = ath5k_beacon_setup(ah, avf->bbuf); diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 76f07d8c272d..1b48414dca95 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -120,7 +120,7 @@ static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) if (ath_tx_start(hw, skb, &txctl) != 0) { ath_dbg(common, XMIT, "CABQ TX failed\n"); - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); } } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index f9a6ec5cf470..8e1559aba495 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1450,9 +1450,14 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); + if (!ah->reset_power_on) + type = ATH9K_RESET_POWER_ON; + switch (type) { case ATH9K_RESET_POWER_ON: ret = ath9k_hw_set_reset_power_on(ah); + if (!ret) + ah->reset_power_on = true; break; case ATH9K_RESET_WARM: case ATH9K_RESET_COLD: diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 566a4ce4f156..dbc1b7a4cbfd 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -741,6 +741,7 @@ struct ath_hw { u32 rfkill_polarity; u32 ah_flags; + bool reset_power_on; bool htc_reset_init; enum nl80211_iftype opmode; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 31ab82e3ba85..dd45edfa6bae 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -639,8 +639,7 @@ static int ath9k_start(struct ieee80211_hw *hw) ath_err(common, "Unable to reset hardware; reset status %d (freq %u MHz)\n", r, curchan->center_freq); - spin_unlock_bh(&sc->sc_pcu_lock); - goto mutex_unlock; + ah->reset_power_on = false; } /* Setup our intr mask. */ @@ -665,11 +664,8 @@ static int ath9k_start(struct ieee80211_hw *hw) clear_bit(SC_OP_INVALID, &sc->sc_flags); sc->sc_ah->is_monitoring = false; - if (!ath_complete_reset(sc, false)) { - r = -EIO; - spin_unlock_bh(&sc->sc_pcu_lock); - goto mutex_unlock; - } + if (!ath_complete_reset(sc, false)) + ah->reset_power_on = false; if (ah->led_pin >= 0) { ath9k_hw_cfg_output(ah, ah->led_pin, @@ -688,12 +684,11 @@ static int ath9k_start(struct ieee80211_hw *hw) if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) common->bus_ops->extn_synch_en(common); -mutex_unlock: mutex_unlock(&sc->mutex); ath9k_ps_restore(sc); - return r; + return 0; } static void ath9k_tx(struct ieee80211_hw *hw, @@ -770,7 +765,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, return; exit: - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); } static void ath9k_stop(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 0e630a99b68b..f088f4bf9a26 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -324,6 +324,10 @@ static int ath_pci_suspend(struct device *device) static int ath_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); u32 val; /* @@ -335,6 +339,9 @@ static int ath_pci_resume(struct device *device) if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + ath_pci_aspm_init(common); + ah->reset_power_on = false; + return 0; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 36618e3a5e60..378bd70256b2 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -66,8 +66,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, - struct sk_buff *skb, - bool dequeue); + struct sk_buff *skb); enum { MCS_HT20, @@ -176,7 +175,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) fi = get_frame_info(skb); bf = fi->bf; - if (bf && fi->retries) { + if (!bf) { + bf = ath_tx_setup_buffer(sc, txq, tid, skb); + if (!bf) { + ieee80211_free_txskb(sc->hw, skb); + continue; + } + } + + if (fi->retries) { list_add_tail(&bf->list, &bf_head); ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); @@ -785,10 +792,13 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, fi = get_frame_info(skb); bf = fi->bf; if (!fi->bf) - bf = ath_tx_setup_buffer(sc, txq, tid, skb, true); + bf = ath_tx_setup_buffer(sc, txq, tid, skb); - if (!bf) + if (!bf) { + __skb_unlink(skb, &tid->buf_q); + ieee80211_free_txskb(sc->hw, skb); continue; + } bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; seqno = bf->bf_state.seqno; @@ -1731,9 +1741,11 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, return; } - bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false); - if (!bf) + bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); + if (!bf) { + ieee80211_free_txskb(sc->hw, skb); return; + } bf->bf_state.bf_type = BUF_AMPDU; INIT_LIST_HEAD(&bf_head); @@ -1757,11 +1769,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf; bf = fi->bf; - if (!bf) - bf = ath_tx_setup_buffer(sc, txq, tid, skb, false); - - if (!bf) - return; INIT_LIST_HEAD(&bf_head); list_add_tail(&bf->list, &bf_head); @@ -1839,8 +1846,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, - struct sk_buff *skb, - bool dequeue) + struct sk_buff *skb) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_frame_info *fi = get_frame_info(skb); @@ -1852,7 +1858,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, bf = ath_tx_get_buffer(sc); if (!bf) { ath_dbg(common, XMIT, "TX buffers are full\n"); - goto error; + return NULL; } ATH_TXBUF_RESET(bf); @@ -1881,18 +1887,12 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, ath_err(ath9k_hw_common(sc->sc_ah), "dma_mapping_error() on TX\n"); ath_tx_return_buffer(sc, bf); - goto error; + return NULL; } fi->bf = bf; return bf; - -error: - if (dequeue) - __skb_unlink(skb, &tid->buf_q); - dev_kfree_skb_any(skb); - return NULL; } /* FIXME: tx power */ @@ -1921,9 +1921,14 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, */ ath_tx_send_ampdu(sc, tid, skb, txctl); } else { - bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false); - if (!bf) + bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); + if (!bf) { + if (txctl->paprd) + dev_kfree_skb_any(skb); + else + ieee80211_free_txskb(sc->hw, skb); return; + } bf->bf_state.bfs_paprd = txctl->paprd; diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 2aa4a59c72c8..2df17f1e49ef 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@ -303,6 +303,7 @@ struct ar9170 { unsigned long queue_stop_timeout[__AR9170_NUM_TXQ]; unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ]; bool needs_full_reset; + bool force_usb_reset; atomic_t pending_restarts; /* interface mode settings */ diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 67997b39aba7..25a1e2f4f738 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -465,27 +465,26 @@ static void carl9170_restart_work(struct work_struct *work) { struct ar9170 *ar = container_of(work, struct ar9170, restart_work); - int err; + int err = -EIO; ar->usedkeys = 0; ar->filter_state = 0; carl9170_cancel_worker(ar); mutex_lock(&ar->mutex); - err = carl9170_usb_restart(ar); - if (net_ratelimit()) { - if (err) { - dev_err(&ar->udev->dev, "Failed to restart device " - " (%d).\n", err); - } else { - dev_info(&ar->udev->dev, "device restarted " - "successfully.\n"); + if (!ar->force_usb_reset) { + err = carl9170_usb_restart(ar); + if (net_ratelimit()) { + if (err) + dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err); + else + dev_info(&ar->udev->dev, "device restarted successfully.\n"); } } - carl9170_zap_queues(ar); mutex_unlock(&ar->mutex); - if (!err) { + + if (!err && !ar->force_usb_reset) { ar->restart_counter++; atomic_set(&ar->pending_restarts, 0); @@ -526,10 +525,10 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r) if (!ar->registered) return; - if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset) - ieee80211_queue_work(ar->hw, &ar->restart_work); - else - carl9170_usb_reset(ar); + if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset) + ar->force_usb_reset = true; + + ieee80211_queue_work(ar->hw, &ar->restart_work); /* * At this point, the device instance might have vanished/disabled. diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 2691620393ea..0679458a1bac 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1596,8 +1596,9 @@ done: } } - if (mwifiex_bss_start(priv, bss, &req_ssid)) - return -EFAULT; + ret = mwifiex_bss_start(priv, bss, &req_ssid); + if (ret) + return ret; if (mode == NL80211_IFTYPE_ADHOC) { /* Inform the BSS information to kernel, otherwise @@ -1652,9 +1653,19 @@ done: "info: association to bssid %pM failed\n", priv->cfg_bssid); memset(priv->cfg_bssid, 0, ETH_ALEN); + + if (ret > 0) + cfg80211_connect_result(priv->netdev, priv->cfg_bssid, + NULL, 0, NULL, 0, ret, + GFP_KERNEL); + else + cfg80211_connect_result(priv->netdev, priv->cfg_bssid, + NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); } - return ret; + return 0; } /* @@ -1802,7 +1813,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, { struct net_device *dev = request->wdev->netdev; struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - int i, offset; + int i, offset, ret; struct ieee80211_channel *chan; struct ieee_types_header *ie; @@ -1855,8 +1866,12 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, priv->user_scan_cfg->chan_list[i].scan_time = 0; } - if (mwifiex_scan_networks(priv, priv->user_scan_cfg)) - return -EFAULT; + + ret = mwifiex_scan_networks(priv, priv->user_scan_cfg); + if (ret) { + dev_err(priv->adapter->dev, "scan failed: %d\n", ret); + return ret; + } if (request->ie && request->ie_len) { for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 82e63cee1e97..7b0858af8f5d 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -1180,16 +1180,18 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, struct mwifiex_adapter *adapter = priv->adapter; struct host_cmd_ds_802_11_ad_hoc_result *adhoc_result; struct mwifiex_bssdescriptor *bss_desc; + u16 reason_code; adhoc_result = &resp->params.adhoc_result; bss_desc = priv->attempted_bss_desc; /* Join result code 0 --> SUCCESS */ - if (le16_to_cpu(resp->result)) { + reason_code = le16_to_cpu(resp->result); + if (reason_code) { dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n"); if (priv->media_connected) - mwifiex_reset_connect_state(priv); + mwifiex_reset_connect_state(priv, reason_code); memset(&priv->curr_bss_params.bss_descriptor, 0x00, sizeof(struct mwifiex_bssdescriptor)); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index bfb3fa69805c..c2d0ab146af5 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -847,7 +847,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc); int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); -void mwifiex_reset_connect_state(struct mwifiex_private *priv); +void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason); u8 mwifiex_band_to_radio_type(u8 band); int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac); int mwifiex_adhoc_start(struct mwifiex_private *priv, diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index e36a75988f87..00b658d3b6ec 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1296,7 +1296,7 @@ mwifiex_radio_type_to_band(u8 radio_type) int mwifiex_scan_networks(struct mwifiex_private *priv, const struct mwifiex_user_scan_cfg *user_scan_in) { - int ret = 0; + int ret; struct mwifiex_adapter *adapter = priv->adapter; struct cmd_ctrl_node *cmd_node; union mwifiex_scan_cmd_config_tlv *scan_cfg_out; @@ -1309,25 +1309,26 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, unsigned long flags; if (adapter->scan_processing) { - dev_dbg(adapter->dev, "cmd: Scan already in process...\n"); - return ret; + dev_err(adapter->dev, "cmd: Scan already in process...\n"); + return -EBUSY; } - spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); - adapter->scan_processing = true; - spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); - if (priv->scan_block) { - dev_dbg(adapter->dev, + dev_err(adapter->dev, "cmd: Scan is blocked during association...\n"); - return ret; + return -EBUSY; } + spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); + adapter->scan_processing = true; + spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); + scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv), GFP_KERNEL); if (!scan_cfg_out) { dev_err(adapter->dev, "failed to alloc scan_cfg_out\n"); - return -ENOMEM; + ret = -ENOMEM; + goto done; } buf_size = sizeof(struct mwifiex_chan_scan_param_set) * @@ -1336,7 +1337,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, if (!scan_chan_list) { dev_err(adapter->dev, "failed to alloc scan_chan_list\n"); kfree(scan_cfg_out); - return -ENOMEM; + ret = -ENOMEM; + goto done; } mwifiex_config_scan(priv, user_scan_in, &scan_cfg_out->config, @@ -1364,14 +1366,16 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); } - } else { - spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); - adapter->scan_processing = true; - spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); } kfree(scan_cfg_out); kfree(scan_chan_list); +done: + if (ret) { + spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); + adapter->scan_processing = false; + spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); + } return ret; } @@ -1430,8 +1434,8 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv, ret = mwifiex_is_network_compatible(priv, bss_desc, priv->bss_mode); if (ret) - dev_err(priv->adapter->dev, "cannot find ssid " - "%s\n", bss_desc->ssid.ssid); + dev_err(priv->adapter->dev, + "Incompatible network settings\n"); break; default: ret = 0; diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index e380171c4c5d..09e6a267f566 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -545,7 +545,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv, if (!memcmp(resp->params.deauth.mac_addr, &priv->curr_bss_params.bss_descriptor.mac_address, sizeof(resp->params.deauth.mac_addr))) - mwifiex_reset_connect_state(priv); + mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING); return 0; } @@ -558,7 +558,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv, static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { - mwifiex_reset_connect_state(priv); + mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING); return 0; } diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index aafde30e714a..8132119e1a21 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -41,7 +41,7 @@ * - Sends a disconnect event to upper layers/applications. */ void -mwifiex_reset_connect_state(struct mwifiex_private *priv) +mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) { struct mwifiex_adapter *adapter = priv->adapter; @@ -117,10 +117,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv) priv->media_connected = false; dev_dbg(adapter->dev, "info: successfully disconnected from %pM: reason code %d\n", - priv->cfg_bssid, WLAN_REASON_DEAUTH_LEAVING); + priv->cfg_bssid, reason_code); if (priv->bss_mode == NL80211_IFTYPE_STATION) { - cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING, - NULL, 0, GFP_KERNEL); + cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, + GFP_KERNEL); } memset(priv->cfg_bssid, 0, ETH_ALEN); @@ -186,7 +186,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) struct mwifiex_adapter *adapter = priv->adapter; int ret = 0; u32 eventcause = adapter->event_cause; - u16 ctrl; + u16 ctrl, reason_code; switch (eventcause) { case EVENT_DUMMY_HOST_WAKEUP_SIGNAL: @@ -204,22 +204,31 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_DEAUTHENTICATED: dev_dbg(adapter->dev, "event: Deauthenticated\n"); adapter->dbg.num_event_deauth++; - if (priv->media_connected) - mwifiex_reset_connect_state(priv); + if (priv->media_connected) { + reason_code = + le16_to_cpu(*(__le16 *)adapter->event_body); + mwifiex_reset_connect_state(priv, reason_code); + } break; case EVENT_DISASSOCIATED: dev_dbg(adapter->dev, "event: Disassociated\n"); adapter->dbg.num_event_disassoc++; - if (priv->media_connected) - mwifiex_reset_connect_state(priv); + if (priv->media_connected) { + reason_code = + le16_to_cpu(*(__le16 *)adapter->event_body); + mwifiex_reset_connect_state(priv, reason_code); + } break; case EVENT_LINK_LOST: dev_dbg(adapter->dev, "event: Link lost\n"); adapter->dbg.num_event_link_lost++; - if (priv->media_connected) - mwifiex_reset_connect_state(priv); + if (priv->media_connected) { + reason_code = + le16_to_cpu(*(__le16 *)adapter->event_body); + mwifiex_reset_connect_state(priv, reason_code); + } break; case EVENT_PS_SLEEP: diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 540c94f8505a..01dc8891070c 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2252,9 +2252,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, */ if (rt2x00_rt(rt2x00dev, RT3352)) { rt2800_bbp_write(rt2x00dev, 27, 0x0); - rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 27, 0x20); - rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); } else { rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 05593d882023..f2d6b78d901d 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -40,6 +40,7 @@ #include <net/tcp.h> +#include <xen/xen.h> #include <xen/events.h> #include <xen/interface/memory.h> @@ -334,21 +335,35 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); + unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; unsigned long bytes; + + offset &= ~PAGE_MASK; + while (size > 0) { + BUG_ON(offset >= PAGE_SIZE); BUG_ON(copy_off > MAX_BUFFER_OFFSET); - if (start_new_rx_buffer(copy_off, size, 0)) { + bytes = PAGE_SIZE - offset; + + if (bytes > size) + bytes = size; + + if (start_new_rx_buffer(copy_off, bytes, 0)) { count++; copy_off = 0; } - bytes = size; if (copy_off + bytes > MAX_BUFFER_OFFSET) bytes = MAX_BUFFER_OFFSET - copy_off; copy_off += bytes; + + offset += bytes; size -= bytes; + + if (offset == PAGE_SIZE) + offset = 0; } } return count; @@ -402,14 +417,24 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, unsigned long bytes; /* Data must not cross a page boundary. */ - BUG_ON(size + offset > PAGE_SIZE); + BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); meta = npo->meta + npo->meta_prod - 1; + /* Skip unused frames from start of page */ + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + while (size > 0) { + BUG_ON(offset >= PAGE_SIZE); BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); - if (start_new_rx_buffer(npo->copy_off, size, *head)) { + bytes = PAGE_SIZE - offset; + + if (bytes > size) + bytes = size; + + if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { /* * Netfront requires there to be some data in the head * buffer. @@ -419,7 +444,6 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, meta = get_next_rx_buffer(vif, npo); } - bytes = size; if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) bytes = MAX_BUFFER_OFFSET - npo->copy_off; @@ -452,6 +476,13 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, offset += bytes; size -= bytes; + /* Next frame */ + if (offset == PAGE_SIZE && size) { + BUG_ON(!PageCompound(page)); + page++; + offset = 0; + } + /* Leave a gap for the GSO descriptor. */ if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) vif->rx.req_cons++; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index c934fe8583f5..caa011008cd0 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -43,6 +43,7 @@ #include <linux/slab.h> #include <net/ip.h> +#include <asm/xen/page.h> #include <xen/xen.h> #include <xen/xenbus.h> #include <xen/events.h> |