diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-13 03:07:07 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-13 03:07:07 +0100 |
commit | 6be35c700f742e911ecedd07fcc43d4439922334 (patch) | |
tree | ca9f37214d204465fcc2d79c82efd291e357c53c /drivers/net/ethernet/sfc/nic.c | |
parent | Merge tag 'for-linus-20121212' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff) | |
parent | net/mlx4_en: Add support for destination MAC in steering rules (diff) | |
download | linux-6be35c700f742e911ecedd07fcc43d4439922334.tar.xz linux-6be35c700f742e911ecedd07fcc43d4439922334.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller:
1) Allow to dump, monitor, and change the bridge multicast database
using netlink. From Cong Wang.
2) RFC 5961 TCP blind data injection attack mitigation, from Eric
Dumazet.
3) Networking user namespace support from Eric W. Biederman.
4) tuntap/virtio-net multiqueue support by Jason Wang.
5) Support for checksum offload of encapsulated packets (basically,
tunneled traffic can still be checksummed by HW). From Joseph
Gasparakis.
6) Allow BPF filter access to VLAN tags, from Eric Dumazet and
Daniel Borkmann.
7) Bridge port parameters over netlink and BPDU blocking support
from Stephen Hemminger.
8) Improve data access patterns during inet socket demux by rearranging
socket layout, from Eric Dumazet.
9) TIPC protocol updates and cleanups from Ying Xue, Paul Gortmaker, and
Jon Maloy.
10) Update TCP socket hash sizing to be more in line with current day
realities. The existing heurstics were choosen a decade ago.
From Eric Dumazet.
11) Fix races, queue bloat, and excessive wakeups in ATM and
associated drivers, from Krzysztof Mazur and David Woodhouse.
12) Support DOVE (Distributed Overlay Virtual Ethernet) extensions
in VXLAN driver, from David Stevens.
13) Add "oops_only" mode to netconsole, from Amerigo Wang.
14) Support set and query of VEB/VEPA bridge mode via PF_BRIDGE, also
allow DCB netlink to work on namespaces other than the initial
namespace. From John Fastabend.
15) Support PTP in the Tigon3 driver, from Matt Carlson.
16) tun/vhost zero copy fixes and improvements, plus turn it on
by default, from Michael S. Tsirkin.
17) Support per-association statistics in SCTP, from Michele
Baldessari.
And many, many, driver updates, cleanups, and improvements. Too
numerous to mention individually.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1722 commits)
net/mlx4_en: Add support for destination MAC in steering rules
net/mlx4_en: Use generic etherdevice.h functions.
net: ethtool: Add destination MAC address to flow steering API
bridge: add support of adding and deleting mdb entries
bridge: notify mdb changes via netlink
ndisc: Unexport ndisc_{build,send}_skb().
uapi: add missing netconf.h to export list
pkt_sched: avoid requeues if possible
solos-pci: fix double-free of TX skb in DMA mode
bnx2: Fix accidental reversions.
bna: Driver Version Updated to 3.1.2.1
bna: Firmware update
bna: Add RX State
bna: Rx Page Based Allocation
bna: TX Intr Coalescing Fix
bna: Tx and Rx Optimizations
bna: Code Cleanup and Enhancements
ath9k: check pdata variable before dereferencing it
ath5k: RX timestamp is reported at end of frame
ath9k_htc: RX timestamp is reported at end of frame
...
Diffstat (limited to 'drivers/net/ethernet/sfc/nic.c')
-rw-r--r-- | drivers/net/ethernet/sfc/nic.c | 81 |
1 files changed, 66 insertions, 15 deletions
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index aab7cacb2e34..0ad790cc473c 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -73,6 +73,8 @@ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ (_tx_queue)->queue) +static void efx_magic_event(struct efx_channel *channel, u32 magic); + /************************************************************************** * * Solarstorm hardware access @@ -255,9 +257,6 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, buffer->entries = len / EFX_BUF_SIZE; BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); - /* All zeros is a potentially valid event so memset to 0xff */ - memset(buffer->addr, 0xff, len); - /* Select new buffer ID */ buffer->index = efx->next_buffer_table; efx->next_buffer_table += buffer->entries; @@ -494,6 +493,9 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) struct efx_nic *efx = tx_queue->efx; efx_oword_t tx_flush_descq; + WARN_ON(atomic_read(&tx_queue->flush_outstanding)); + atomic_set(&tx_queue->flush_outstanding, 1); + EFX_POPULATE_OWORD_2(tx_flush_descq, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); @@ -669,6 +671,47 @@ static bool efx_flush_wake(struct efx_nic *efx) && atomic_read(&efx->rxq_flush_pending) > 0)); } +static bool efx_check_tx_flush_complete(struct efx_nic *efx) +{ + bool i = true; + efx_oword_t txd_ptr_tbl; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_reado_table(efx, &txd_ptr_tbl, + FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); + if (EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_FLUSH) || + EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_EN)) { + netif_dbg(efx, hw, efx->net_dev, + "flush did not complete on TXQ %d\n", + tx_queue->queue); + i = false; + } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, + 1, 0)) { + /* The flush is complete, but we didn't + * receive a flush completion event + */ + netif_dbg(efx, hw, efx->net_dev, + "flush complete on TXQ %d, so drain " + "the queue\n", tx_queue->queue); + /* Don't need to increment drain_pending as it + * has already been incremented for the queues + * which did not drain + */ + efx_magic_event(channel, + EFX_CHANNEL_MAGIC_TX_DRAIN( + tx_queue)); + } + } + } + + return i; +} + /* Flush all the transmit queues, and continue flushing receive queues until * they're all flushed. Wait for the DRAIN events to be recieved so that there * are no more RX and TX events left on any channel. */ @@ -680,7 +723,6 @@ int efx_nic_flush_queues(struct efx_nic *efx) struct efx_tx_queue *tx_queue; int rc = 0; - efx->fc_disable++; efx->type->prepare_flush(efx); efx_for_each_channel(channel, efx) { @@ -730,7 +772,8 @@ int efx_nic_flush_queues(struct efx_nic *efx) timeout); } - if (atomic_read(&efx->drain_pending)) { + if (atomic_read(&efx->drain_pending) && + !efx_check_tx_flush_complete(efx)) { netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " "(rx %d+%d)\n", atomic_read(&efx->drain_pending), atomic_read(&efx->rxq_flush_outstanding), @@ -742,7 +785,7 @@ int efx_nic_flush_queues(struct efx_nic *efx) atomic_set(&efx->rxq_flush_outstanding, 0); } - efx->fc_disable--; + efx->type->finish_flush(efx); return rc; } @@ -766,8 +809,13 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel) EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr & channel->eventq_mask); - efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, - channel->channel); + + /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size + * of 4 bytes, but it is really 16 bytes just like later revisions. + */ + efx_writed(efx, ®, + efx->type->evq_rptr_tbl_base + + FR_BZ_EVQ_RPTR_STEP * channel->channel); } /* Use HW to insert a SW defined event */ @@ -1017,9 +1065,10 @@ efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, qid % EFX_TXQ_TYPES); - - efx_magic_event(tx_queue->channel, - EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); + if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { + efx_magic_event(tx_queue->channel, + EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); + } } } @@ -1565,7 +1614,9 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx) for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, efx->rx_indir_table[i]); - efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); + efx_writed(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); } } @@ -2029,15 +2080,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) for (i = 0; i < table->rows; i++) { switch (table->step) { - case 4: /* 32-bit register or SRAM */ - efx_readd_table(efx, buf, table->offset, i); + case 4: /* 32-bit SRAM */ + efx_readd(efx, buf, table->offset + 4 * i); break; case 8: /* 64-bit SRAM */ efx_sram_readq(efx, efx->membase + table->offset, buf, i); break; - case 16: /* 128-bit register */ + case 16: /* 128-bit-readable register */ efx_reado_table(efx, buf, table->offset, i); break; case 32: /* 128-bit register, interleaved */ |