diff options
author | Martin Habets <habetsm.xilinx@gmail.com> | 2022-05-09 17:31:55 +0200 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-05-11 00:38:14 +0200 |
commit | 956f2d86cb37dc6dae8174001a668cbc8b9bbd1f (patch) | |
tree | 1d27ce379352ffe65cda8afeecb8f74d51ae01f7 /drivers/net/ethernet/sfc/siena/tx.c | |
parent | sfc: Copy shared files needed for Siena (part 2) (diff) | |
download | linux-956f2d86cb37dc6dae8174001a668cbc8b9bbd1f.tar.xz linux-956f2d86cb37dc6dae8174001a668cbc8b9bbd1f.zip |
sfc/siena: Remove build references to missing functionality
Functionality not supported or needed on Siena includes:
- Anything for EF100
- EF10 specifics such as register access, PIO and TSO offload.
Also only bind to Siena NICs.
Remove EF10 specifics from nic.h.
The functions that start with efx_farch_ will be removed from sfc.ko
with a subsequent patch.
Add the efx_ prefix to siena_prepare_flush() to make it consistent
with the other APIs.
Signed-off-by: Martin Habets <habetsm.xilinx@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/sfc/siena/tx.c')
-rw-r--r-- | drivers/net/ethernet/sfc/siena/tx.c | 209 |
1 files changed, 5 insertions, 204 deletions
diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c index 138bca611341..9e68dc434832 100644 --- a/drivers/net/ethernet/sfc/siena/tx.c +++ b/drivers/net/ethernet/sfc/siena/tx.c @@ -22,14 +22,6 @@ #include "tx.h" #include "tx_common.h" #include "workarounds.h" -#include "ef10_regs.h" - -#ifdef EFX_USE_PIO - -#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) -unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; - -#endif /* EFX_USE_PIO */ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer) @@ -123,173 +115,6 @@ static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, return rc; } -#ifdef EFX_USE_PIO - -struct efx_short_copy_buffer { - int used; - u8 buf[L1_CACHE_BYTES]; -}; - -/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. - * Advances piobuf pointer. Leaves additional data in the copy buffer. - */ -static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, - u8 *data, int len, - struct efx_short_copy_buffer *copy_buf) -{ - int block_len = len & ~(sizeof(copy_buf->buf) - 1); - - __iowrite64_copy(*piobuf, data, block_len >> 3); - *piobuf += block_len; - len -= block_len; - - if (len) { - data += block_len; - BUG_ON(copy_buf->used); - BUG_ON(len > sizeof(copy_buf->buf)); - memcpy(copy_buf->buf, data, len); - copy_buf->used = len; - } -} - -/* Copy to PIO, respecting dword alignment, popping data from copy buffer first. - * Advances piobuf pointer. Leaves additional data in the copy buffer. - */ -static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, - u8 *data, int len, - struct efx_short_copy_buffer *copy_buf) -{ - if (copy_buf->used) { - /* if the copy buffer is partially full, fill it up and write */ - int copy_to_buf = - min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); - - memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); - copy_buf->used += copy_to_buf; - - /* if we didn't fill it up then we're done for now */ - if (copy_buf->used < sizeof(copy_buf->buf)) - return; - - __iowrite64_copy(*piobuf, copy_buf->buf, - sizeof(copy_buf->buf) >> 3); - *piobuf += sizeof(copy_buf->buf); - data += copy_to_buf; - len -= copy_to_buf; - copy_buf->used = 0; - } - - efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); -} - -static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, - struct efx_short_copy_buffer *copy_buf) -{ - /* if there's anything in it, write the whole buffer, including junk */ - if (copy_buf->used) - __iowrite64_copy(piobuf, copy_buf->buf, - sizeof(copy_buf->buf) >> 3); -} - -/* Traverse skb structure and copy fragments in to PIO buffer. - * Advances piobuf pointer. - */ -static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, - u8 __iomem **piobuf, - struct efx_short_copy_buffer *copy_buf) -{ - int i; - - efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), - copy_buf); - - for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { - skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - u8 *vaddr; - - vaddr = kmap_atomic(skb_frag_page(f)); - - efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f), - skb_frag_size(f), copy_buf); - kunmap_atomic(vaddr); - } - - EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list); -} - -static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, - struct sk_buff *skb) -{ - struct efx_tx_buffer *buffer = - efx_tx_queue_get_insert_buffer(tx_queue); - u8 __iomem *piobuf = tx_queue->piobuf; - - /* Copy to PIO buffer. Ensure the writes are padded to the end - * of a cache line, as this is required for write-combining to be - * effective on at least x86. - */ - - if (skb_shinfo(skb)->nr_frags) { - /* The size of the copy buffer will ensure all writes - * are the size of a cache line. - */ - struct efx_short_copy_buffer copy_buf; - - copy_buf.used = 0; - - efx_skb_copy_bits_to_pio(tx_queue->efx, skb, - &piobuf, ©_buf); - efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); - } else { - /* Pad the write to the size of a cache line. - * We can do this because we know the skb_shared_info struct is - * after the source, and the destination buffer is big enough. - */ - BUILD_BUG_ON(L1_CACHE_BYTES > - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); - __iowrite64_copy(tx_queue->piobuf, skb->data, - ALIGN(skb->len, L1_CACHE_BYTES) >> 3); - } - - buffer->skb = skb; - buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION; - - EFX_POPULATE_QWORD_5(buffer->option, - ESF_DZ_TX_DESC_IS_OPT, 1, - ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, - ESF_DZ_TX_PIO_CONT, 0, - ESF_DZ_TX_PIO_BYTE_CNT, skb->len, - ESF_DZ_TX_PIO_BUF_ADDR, - tx_queue->piobuf_offset); - ++tx_queue->insert_count; - return 0; -} - -/* Decide whether we can use TX PIO, ie. write packet data directly into - * a buffer on the device. This can reduce latency at the expense of - * throughput, so we only do this if both hardware and software TX rings - * are empty, including all queues for the channel. This also ensures that - * only one packet at a time can be using the PIO buffer. If the xmit_more - * flag is set then we don't use this - there'll be another packet along - * shortly and we want to hold off the doorbell. - */ -static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue) -{ - struct efx_channel *channel = tx_queue->channel; - - if (!tx_queue->piobuf) - return false; - - EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors); - - efx_for_each_channel_tx_queue(tx_queue, channel) - if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count)) - return false; - - return true; -} -#endif /* EFX_USE_PIO */ - /* Send any pending traffic for a channel. xmit_more is shared across all * queues for a channel, so we must check all of them. */ @@ -338,35 +163,11 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb * size limit. */ if (segments) { - switch (tx_queue->tso_version) { - case 1: - rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped); - break; - case 2: - rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped); - break; - case 0: /* No TSO on this queue, SW fallback needed */ - default: - rc = -EINVAL; - break; - } - if (rc == -EINVAL) { - rc = efx_tx_tso_fallback(tx_queue, skb); - tx_queue->tso_fallbacks++; - if (rc == 0) - return 0; - } - if (rc) - goto err; -#ifdef EFX_USE_PIO - } else if (skb_len <= efx_piobuf_size && !xmit_more && - efx_tx_may_pio(tx_queue)) { - /* Use PIO for short packets with an empty queue. */ - if (efx_enqueue_skb_pio(tx_queue, skb)) - goto err; - tx_queue->pio_packets++; - data_mapped = true; -#endif + rc = efx_tx_tso_fallback(tx_queue, skb); + tx_queue->tso_fallbacks++; + if (rc == 0) + return 0; + goto err; } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) { /* Pad short packets or coalesce short fragmented packets. */ if (efx_enqueue_skb_copy(tx_queue, skb)) |