2008-04-27 18:55:59 +07:00
|
|
|
/****************************************************************************
|
2013-08-30 05:32:48 +07:00
|
|
|
* Driver for Solarflare network controllers and boards
|
2008-04-27 18:55:59 +07:00
|
|
|
* Copyright 2005-2006 Fen Systems Ltd.
|
2013-08-30 05:32:48 +07:00
|
|
|
* Copyright 2005-2013 Solarflare Communications Inc.
|
2008-04-27 18:55:59 +07:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 as published
|
|
|
|
* by the Free Software Foundation, incorporated herein by reference.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/in.h>
|
2009-11-29 22:16:05 +07:00
|
|
|
#include <linux/ipv6.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/slab.h>
|
2009-11-29 22:16:05 +07:00
|
|
|
#include <net/ipv6.h>
|
2008-04-27 18:55:59 +07:00
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <linux/highmem.h>
|
2013-06-29 03:47:12 +07:00
|
|
|
#include <linux/cache.h>
|
2008-04-27 18:55:59 +07:00
|
|
|
#include "net_driver.h"
|
|
|
|
#include "efx.h"
|
2013-06-29 03:47:12 +07:00
|
|
|
#include "io.h"
|
2009-11-29 22:12:08 +07:00
|
|
|
#include "nic.h"
|
2016-11-17 17:51:54 +07:00
|
|
|
#include "tx.h"
|
2008-04-27 18:55:59 +07:00
|
|
|
#include "workarounds.h"
|
2013-03-09 04:20:09 +07:00
|
|
|
#include "ef10_regs.h"
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2013-06-29 03:47:12 +07:00
|
|
|
#ifdef EFX_USE_PIO
|
|
|
|
|
|
|
|
#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
|
|
|
|
unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
|
|
|
|
|
|
|
|
#endif /* EFX_USE_PIO */
|
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
|
|
|
|
struct efx_tx_buffer *buffer)
|
2013-06-29 03:47:15 +07:00
|
|
|
{
|
2016-11-17 17:51:54 +07:00
|
|
|
unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
|
|
|
|
struct efx_buffer *page_buf =
|
|
|
|
&tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
|
|
|
|
unsigned int offset =
|
|
|
|
((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
|
2013-06-29 03:47:15 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
if (unlikely(!page_buf->addr) &&
|
|
|
|
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
|
|
|
|
GFP_ATOMIC))
|
|
|
|
return NULL;
|
|
|
|
buffer->dma_addr = page_buf->dma_addr + offset;
|
|
|
|
buffer->unmap_len = 0;
|
|
|
|
return (u8 *)page_buf->addr + offset;
|
2013-06-29 03:47:15 +07:00
|
|
|
}
|
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
|
|
|
|
struct efx_tx_buffer *buffer, size_t len)
|
2013-06-29 03:47:15 +07:00
|
|
|
{
|
2016-11-17 17:51:54 +07:00
|
|
|
if (len > EFX_TX_CB_SIZE)
|
|
|
|
return NULL;
|
|
|
|
return efx_tx_get_copy_buffer(tx_queue, buffer);
|
2013-06-29 03:47:15 +07:00
|
|
|
}
|
|
|
|
|
2008-09-01 18:47:12 +07:00
|
|
|
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
2011-11-28 23:33:43 +07:00
|
|
|
struct efx_tx_buffer *buffer,
|
|
|
|
unsigned int *pkts_compl,
|
|
|
|
unsigned int *bytes_compl)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
|
|
|
if (buffer->unmap_len) {
|
2012-05-17 23:46:55 +07:00
|
|
|
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
|
2013-10-31 19:42:32 +07:00
|
|
|
dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
|
2012-05-18 02:52:20 +07:00
|
|
|
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
|
2012-05-17 23:46:55 +07:00
|
|
|
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
|
|
|
|
DMA_TO_DEVICE);
|
2008-04-27 18:55:59 +07:00
|
|
|
else
|
2012-05-17 23:46:55 +07:00
|
|
|
dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
|
|
|
|
DMA_TO_DEVICE);
|
2008-04-27 18:55:59 +07:00
|
|
|
buffer->unmap_len = 0;
|
|
|
|
}
|
|
|
|
|
2012-05-18 02:52:20 +07:00
|
|
|
if (buffer->flags & EFX_TX_BUF_SKB) {
|
2018-01-26 00:24:43 +07:00
|
|
|
struct sk_buff *skb = (struct sk_buff *)buffer->skb;
|
|
|
|
|
2017-12-08 00:18:58 +07:00
|
|
|
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
|
2011-11-28 23:33:43 +07:00
|
|
|
(*pkts_compl)++;
|
2018-01-26 00:24:43 +07:00
|
|
|
(*bytes_compl) += skb->len;
|
|
|
|
if (tx_queue->timestamping &&
|
|
|
|
(tx_queue->completed_timestamp_major ||
|
|
|
|
tx_queue->completed_timestamp_minor)) {
|
|
|
|
struct skb_shared_hwtstamps hwtstamp;
|
|
|
|
|
|
|
|
hwtstamp.hwtstamp =
|
|
|
|
efx_ptp_nic_to_kernel_time(tx_queue);
|
|
|
|
skb_tstamp_tx(skb, &hwtstamp);
|
|
|
|
|
|
|
|
tx_queue->completed_timestamp_major = 0;
|
|
|
|
tx_queue->completed_timestamp_minor = 0;
|
|
|
|
}
|
2014-09-10 04:43:27 +07:00
|
|
|
dev_consume_skb_any((struct sk_buff *)buffer->skb);
|
2010-06-23 18:30:07 +07:00
|
|
|
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
|
|
|
|
"TX queue %d transmission id %x complete\n",
|
|
|
|
tx_queue->queue, tx_queue->read_count);
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
2012-05-18 02:52:20 +07:00
|
|
|
|
2012-05-18 00:40:54 +07:00
|
|
|
buffer->len = 0;
|
|
|
|
buffer->flags = 0;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
2012-07-30 22:57:44 +07:00
|
|
|
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
|
|
|
|
{
|
|
|
|
/* Header and payload descriptor for each output segment, plus
|
|
|
|
* one for every input fragment boundary within a segment
|
|
|
|
*/
|
|
|
|
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
|
|
|
|
|
sfc: separate out SFC4000 ("Falcon") support into new sfc-falcon driver
Rationale: The differences between Falcon and Siena are in many ways larger
than those between Siena and EF10 (despite Siena being nominally "Falcon-
architecture"); for instance, Falcon has no MCPU, so there is no MCDI.
Removing Falcon support from the sfc driver should simplify the latter,
and avoid the possibility of Falcon support being broken by changes to sfc
(which are rarely if ever tested on Falcon, it being end-of-lifed hardware).
The sfc-falcon driver created in this changeset is essentially a copy of the
sfc driver, but with Siena- and EF10-specific code, including MCDI, removed
and with the "efx_" identifier prefix changed to "ef4_" (for "EFX 4000-
series") to avoid collisions when both drivers are built-in.
This changeset removes Falcon from the sfc driver's PCI ID table; then in
sfc I've removed obvious Falcon-related code: I removed the Falcon NIC
functions, Falcon PHY code, and EFX_REV_FALCON_*, then fixed up everything
that referenced them.
Also, increment minor version of both drivers (to 4.1).
For now, CONFIG_SFC selects CONFIG_SFC_FALCON, so that updating old configs
doesn't cause Falcon support to disappear; but that should be undone at
some point in the future.
Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-29 01:55:34 +07:00
|
|
|
/* Possibly one more per segment for option descriptors */
|
|
|
|
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
2012-07-30 22:57:44 +07:00
|
|
|
max_descs += EFX_TSO_MAX_SEGS;
|
|
|
|
|
|
|
|
/* Possibly more for PCIe page boundaries within input fragments */
|
|
|
|
if (PAGE_SIZE > EFX_PAGE_SIZE)
|
|
|
|
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
|
|
|
|
DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
|
|
|
|
|
|
|
|
return max_descs;
|
|
|
|
}
|
|
|
|
|
2012-05-22 07:27:58 +07:00
|
|
|
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
|
|
|
|
{
|
|
|
|
/* We need to consider both queues that the net core sees as one */
|
|
|
|
struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
|
|
|
|
struct efx_nic *efx = txq1->efx;
|
|
|
|
unsigned int fill_level;
|
|
|
|
|
|
|
|
fill_level = max(txq1->insert_count - txq1->old_read_count,
|
|
|
|
txq2->insert_count - txq2->old_read_count);
|
|
|
|
if (likely(fill_level < efx->txq_stop_thresh))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* We used the stale old_read_count above, which gives us a
|
|
|
|
* pessimistic estimate of the fill level (which may even
|
|
|
|
* validly be >= efx->txq_entries). Now try again using
|
|
|
|
* read_count (more likely to be a cache miss).
|
|
|
|
*
|
|
|
|
* If we read read_count and then conditionally stop the
|
|
|
|
* queue, it is possible for the completion path to race with
|
|
|
|
* us and complete all outstanding descriptors in the middle,
|
|
|
|
* after which there will be no more completions to wake it.
|
|
|
|
* Therefore we stop the queue first, then read read_count
|
|
|
|
* (with a memory barrier to ensure the ordering), then
|
|
|
|
* restart the queue if the fill level turns out to be low
|
|
|
|
* enough.
|
|
|
|
*/
|
|
|
|
netif_tx_stop_queue(txq1->core_txq);
|
|
|
|
smp_mb();
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 04:07:29 +07:00
|
|
|
txq1->old_read_count = READ_ONCE(txq1->read_count);
|
|
|
|
txq2->old_read_count = READ_ONCE(txq2->read_count);
|
2012-05-22 07:27:58 +07:00
|
|
|
|
|
|
|
fill_level = max(txq1->insert_count - txq1->old_read_count,
|
|
|
|
txq2->insert_count - txq2->old_read_count);
|
2016-12-02 22:51:33 +07:00
|
|
|
EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
|
2012-05-22 07:27:58 +07:00
|
|
|
if (likely(fill_level < efx->txq_stop_thresh)) {
|
|
|
|
smp_mb();
|
|
|
|
if (likely(!efx->loopback_selftest))
|
|
|
|
netif_tx_start_queue(txq1->core_txq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
unsigned int copy_len = skb->len;
|
|
|
|
struct efx_tx_buffer *buffer;
|
|
|
|
u8 *copy_buffer;
|
|
|
|
int rc;
|
|
|
|
|
2016-12-02 22:51:33 +07:00
|
|
|
EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
|
2016-11-17 17:51:54 +07:00
|
|
|
|
|
|
|
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
|
|
|
|
|
|
|
|
copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
|
|
|
|
if (unlikely(!copy_buffer))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
|
|
|
|
EFX_WARN_ON_PARANOID(rc);
|
sfc: separate out SFC4000 ("Falcon") support into new sfc-falcon driver
Rationale: The differences between Falcon and Siena are in many ways larger
than those between Siena and EF10 (despite Siena being nominally "Falcon-
architecture"); for instance, Falcon has no MCPU, so there is no MCDI.
Removing Falcon support from the sfc driver should simplify the latter,
and avoid the possibility of Falcon support being broken by changes to sfc
(which are rarely if ever tested on Falcon, it being end-of-lifed hardware).
The sfc-falcon driver created in this changeset is essentially a copy of the
sfc driver, but with Siena- and EF10-specific code, including MCDI, removed
and with the "efx_" identifier prefix changed to "ef4_" (for "EFX 4000-
series") to avoid collisions when both drivers are built-in.
This changeset removes Falcon from the sfc driver's PCI ID table; then in
sfc I've removed obvious Falcon-related code: I removed the Falcon NIC
functions, Falcon PHY code, and EFX_REV_FALCON_*, then fixed up everything
that referenced them.
Also, increment minor version of both drivers (to 4.1).
For now, CONFIG_SFC selects CONFIG_SFC_FALCON, so that updating old configs
doesn't cause Falcon support to disappear; but that should be undone at
some point in the future.
Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-29 01:55:34 +07:00
|
|
|
buffer->len = copy_len;
|
2016-11-17 17:51:54 +07:00
|
|
|
|
|
|
|
buffer->skb = skb;
|
|
|
|
buffer->flags = EFX_TX_BUF_SKB;
|
|
|
|
|
|
|
|
++tx_queue->insert_count;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2013-09-03 00:24:29 +07:00
|
|
|
#ifdef EFX_USE_PIO
|
|
|
|
|
|
|
|
struct efx_short_copy_buffer {
|
|
|
|
int used;
|
|
|
|
u8 buf[L1_CACHE_BYTES];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
|
|
|
|
* Advances piobuf pointer. Leaves additional data in the copy buffer.
|
|
|
|
*/
|
|
|
|
static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
|
|
|
|
u8 *data, int len,
|
|
|
|
struct efx_short_copy_buffer *copy_buf)
|
|
|
|
{
|
|
|
|
int block_len = len & ~(sizeof(copy_buf->buf) - 1);
|
|
|
|
|
2014-07-27 09:14:39 +07:00
|
|
|
__iowrite64_copy(*piobuf, data, block_len >> 3);
|
2013-09-03 00:24:29 +07:00
|
|
|
*piobuf += block_len;
|
|
|
|
len -= block_len;
|
|
|
|
|
|
|
|
if (len) {
|
|
|
|
data += block_len;
|
|
|
|
BUG_ON(copy_buf->used);
|
|
|
|
BUG_ON(len > sizeof(copy_buf->buf));
|
|
|
|
memcpy(copy_buf->buf, data, len);
|
|
|
|
copy_buf->used = len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
|
|
|
|
* Advances piobuf pointer. Leaves additional data in the copy buffer.
|
|
|
|
*/
|
|
|
|
static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
|
|
|
|
u8 *data, int len,
|
|
|
|
struct efx_short_copy_buffer *copy_buf)
|
|
|
|
{
|
|
|
|
if (copy_buf->used) {
|
|
|
|
/* if the copy buffer is partially full, fill it up and write */
|
|
|
|
int copy_to_buf =
|
|
|
|
min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
|
|
|
|
|
|
|
|
memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
|
|
|
|
copy_buf->used += copy_to_buf;
|
|
|
|
|
|
|
|
/* if we didn't fill it up then we're done for now */
|
|
|
|
if (copy_buf->used < sizeof(copy_buf->buf))
|
|
|
|
return;
|
|
|
|
|
2014-07-27 09:14:39 +07:00
|
|
|
__iowrite64_copy(*piobuf, copy_buf->buf,
|
|
|
|
sizeof(copy_buf->buf) >> 3);
|
2013-09-03 00:24:29 +07:00
|
|
|
*piobuf += sizeof(copy_buf->buf);
|
|
|
|
data += copy_to_buf;
|
|
|
|
len -= copy_to_buf;
|
|
|
|
copy_buf->used = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
|
|
|
|
struct efx_short_copy_buffer *copy_buf)
|
|
|
|
{
|
|
|
|
/* if there's anything in it, write the whole buffer, including junk */
|
|
|
|
if (copy_buf->used)
|
2014-07-27 09:14:39 +07:00
|
|
|
__iowrite64_copy(piobuf, copy_buf->buf,
|
|
|
|
sizeof(copy_buf->buf) >> 3);
|
2013-09-03 00:24:29 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Traverse skb structure and copy fragments in to PIO buffer.
|
|
|
|
* Advances piobuf pointer.
|
|
|
|
*/
|
|
|
|
static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
|
|
|
|
u8 __iomem **piobuf,
|
|
|
|
struct efx_short_copy_buffer *copy_buf)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
|
|
|
|
copy_buf);
|
|
|
|
|
|
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
|
|
|
|
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
|
|
|
|
u8 *vaddr;
|
|
|
|
|
|
|
|
vaddr = kmap_atomic(skb_frag_page(f));
|
|
|
|
|
|
|
|
efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
|
|
|
|
skb_frag_size(f), copy_buf);
|
|
|
|
kunmap_atomic(vaddr);
|
|
|
|
}
|
|
|
|
|
2016-12-02 22:51:33 +07:00
|
|
|
EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
|
2013-09-03 00:24:29 +07:00
|
|
|
}
|
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
|
|
|
|
struct sk_buff *skb)
|
2013-09-03 00:24:29 +07:00
|
|
|
{
|
|
|
|
struct efx_tx_buffer *buffer =
|
|
|
|
efx_tx_queue_get_insert_buffer(tx_queue);
|
|
|
|
u8 __iomem *piobuf = tx_queue->piobuf;
|
|
|
|
|
|
|
|
/* Copy to PIO buffer. Ensure the writes are padded to the end
|
|
|
|
* of a cache line, as this is required for write-combining to be
|
|
|
|
* effective on at least x86.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (skb_shinfo(skb)->nr_frags) {
|
|
|
|
/* The size of the copy buffer will ensure all writes
|
|
|
|
* are the size of a cache line.
|
|
|
|
*/
|
|
|
|
struct efx_short_copy_buffer copy_buf;
|
|
|
|
|
|
|
|
copy_buf.used = 0;
|
|
|
|
|
|
|
|
efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
|
|
|
|
&piobuf, ©_buf);
|
|
|
|
efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf);
|
|
|
|
} else {
|
|
|
|
/* Pad the write to the size of a cache line.
|
2016-11-17 17:51:54 +07:00
|
|
|
* We can do this because we know the skb_shared_info struct is
|
2013-09-03 00:24:29 +07:00
|
|
|
* after the source, and the destination buffer is big enough.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(L1_CACHE_BYTES >
|
|
|
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
|
2014-07-27 09:14:39 +07:00
|
|
|
__iowrite64_copy(tx_queue->piobuf, skb->data,
|
|
|
|
ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
|
2013-09-03 00:24:29 +07:00
|
|
|
}
|
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
buffer->skb = skb;
|
|
|
|
buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
|
|
|
|
|
2013-09-03 00:24:29 +07:00
|
|
|
EFX_POPULATE_QWORD_5(buffer->option,
|
|
|
|
ESF_DZ_TX_DESC_IS_OPT, 1,
|
|
|
|
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
|
|
|
|
ESF_DZ_TX_PIO_CONT, 0,
|
|
|
|
ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
|
|
|
|
ESF_DZ_TX_PIO_BUF_ADDR,
|
|
|
|
tx_queue->piobuf_offset);
|
|
|
|
++tx_queue->insert_count;
|
2016-11-17 17:51:54 +07:00
|
|
|
return 0;
|
2013-09-03 00:24:29 +07:00
|
|
|
}
|
|
|
|
#endif /* EFX_USE_PIO */
|
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
|
|
|
|
dma_addr_t dma_addr,
|
|
|
|
size_t len)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
2016-11-17 17:51:54 +07:00
|
|
|
const struct efx_nic_type *nic_type = tx_queue->efx->type;
|
2008-04-27 18:55:59 +07:00
|
|
|
struct efx_tx_buffer *buffer;
|
|
|
|
unsigned int dma_len;
|
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
/* Map the fragment taking account of NIC-dependent DMA limits. */
|
|
|
|
do {
|
|
|
|
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
|
|
|
|
dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
|
2008-05-07 18:51:12 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
buffer->len = dma_len;
|
|
|
|
buffer->dma_addr = dma_addr;
|
|
|
|
buffer->flags = EFX_TX_BUF_CONT;
|
|
|
|
len -= dma_len;
|
|
|
|
dma_addr += dma_len;
|
|
|
|
++tx_queue->insert_count;
|
|
|
|
} while (len);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
return buffer;
|
|
|
|
}
|
2009-03-20 20:25:39 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
/* Map all data from an SKB for DMA and create descriptors on the queue.
|
|
|
|
*/
|
|
|
|
static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
|
|
|
unsigned int segment_count)
|
|
|
|
{
|
|
|
|
struct efx_nic *efx = tx_queue->efx;
|
|
|
|
struct device *dma_dev = &efx->pci_dev->dev;
|
|
|
|
unsigned int frag_index, nr_frags;
|
|
|
|
dma_addr_t dma_addr, unmap_addr;
|
|
|
|
unsigned short dma_flags;
|
|
|
|
size_t len, unmap_len;
|
2013-09-03 00:24:29 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
frag_index = 0;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
/* Map header data. */
|
|
|
|
len = skb_headlen(skb);
|
|
|
|
dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
|
|
|
|
dma_flags = EFX_TX_BUF_MAP_SINGLE;
|
|
|
|
unmap_len = len;
|
|
|
|
unmap_addr = dma_addr;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
|
|
|
|
return -EIO;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
if (segment_count) {
|
|
|
|
/* For TSO we need to put the header in to a separate
|
|
|
|
* descriptor. Map this separately if necessary.
|
|
|
|
*/
|
|
|
|
size_t header_len = skb_transport_header(skb) - skb->data +
|
|
|
|
(tcp_hdr(skb)->doff << 2u);
|
|
|
|
|
|
|
|
if (header_len != len) {
|
|
|
|
tx_queue->tso_long_headers++;
|
|
|
|
efx_tx_map_chunk(tx_queue, dma_addr, header_len);
|
|
|
|
len -= header_len;
|
|
|
|
dma_addr += header_len;
|
|
|
|
}
|
|
|
|
}
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
/* Add descriptors for each fragment. */
|
|
|
|
do {
|
|
|
|
struct efx_tx_buffer *buffer;
|
|
|
|
skb_frag_t *fragment;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
/* The final descriptor for a fragment is responsible for
|
|
|
|
* unmapping the whole fragment.
|
|
|
|
*/
|
2012-05-18 02:52:20 +07:00
|
|
|
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
|
2008-04-27 18:55:59 +07:00
|
|
|
buffer->unmap_len = unmap_len;
|
2013-10-31 19:42:32 +07:00
|
|
|
buffer->dma_offset = buffer->dma_addr - unmap_addr;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
if (frag_index >= nr_frags) {
|
|
|
|
/* Store SKB details with the final buffer for
|
|
|
|
* the completion.
|
|
|
|
*/
|
|
|
|
buffer->skb = skb;
|
|
|
|
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move on to the next fragment. */
|
|
|
|
fragment = &skb_shinfo(skb)->frags[frag_index++];
|
2011-10-19 04:00:24 +07:00
|
|
|
len = skb_frag_size(fragment);
|
2016-11-17 17:51:54 +07:00
|
|
|
dma_addr = skb_frag_dma_map(dma_dev, fragment,
|
|
|
|
0, len, DMA_TO_DEVICE);
|
2012-05-18 02:52:20 +07:00
|
|
|
dma_flags = 0;
|
2016-11-17 17:51:54 +07:00
|
|
|
unmap_len = len;
|
|
|
|
unmap_addr = dma_addr;
|
|
|
|
|
|
|
|
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
|
|
|
|
return -EIO;
|
|
|
|
} while (1);
|
|
|
|
}
|
|
|
|
|
2018-05-24 16:14:00 +07:00
|
|
|
/* Remove buffers put into a tx_queue for the current packet.
|
|
|
|
* None of the buffers must have an skb attached.
|
2016-11-17 17:51:54 +07:00
|
|
|
*/
|
2018-05-24 16:14:00 +07:00
|
|
|
static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
|
|
|
unsigned int insert_count)
|
2016-11-17 17:51:54 +07:00
|
|
|
{
|
|
|
|
struct efx_tx_buffer *buffer;
|
2017-12-08 00:18:58 +07:00
|
|
|
unsigned int bytes_compl = 0;
|
|
|
|
unsigned int pkts_compl = 0;
|
2016-11-17 17:51:54 +07:00
|
|
|
|
|
|
|
/* Work backwards until we hit the original insert pointer value */
|
2018-05-24 16:14:00 +07:00
|
|
|
while (tx_queue->insert_count != insert_count) {
|
2016-11-17 17:51:54 +07:00
|
|
|
--tx_queue->insert_count;
|
|
|
|
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
|
2017-12-08 00:18:58 +07:00
|
|
|
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
2016-11-17 17:51:54 +07:00
|
|
|
}
|
|
|
|
|
2016-11-17 17:52:36 +07:00
|
|
|
/*
|
|
|
|
* Fallback to software TSO.
|
|
|
|
*
|
|
|
|
* This is used if we are unable to send a GSO packet through hardware TSO.
|
|
|
|
* This should only ever happen due to per-queue restrictions - unsupported
|
|
|
|
* packets should first be filtered by the feature flags.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, error code otherwise.
|
|
|
|
*/
|
|
|
|
static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
|
|
|
|
struct sk_buff *skb)
|
2016-11-17 17:51:54 +07:00
|
|
|
{
|
2016-11-17 17:52:36 +07:00
|
|
|
struct sk_buff *segments, *next;
|
|
|
|
|
|
|
|
segments = skb_gso_segment(skb, 0);
|
|
|
|
if (IS_ERR(segments))
|
|
|
|
return PTR_ERR(segments);
|
|
|
|
|
2019-02-14 13:42:13 +07:00
|
|
|
dev_consume_skb_any(skb);
|
2016-11-17 17:52:36 +07:00
|
|
|
skb = segments;
|
|
|
|
|
|
|
|
while (skb) {
|
|
|
|
next = skb->next;
|
|
|
|
skb->next = NULL;
|
|
|
|
|
|
|
|
efx_enqueue_skb(tx_queue, skb);
|
|
|
|
skb = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2016-11-17 17:51:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add a socket buffer to a TX queue
|
|
|
|
*
|
|
|
|
* This maps all fragments of a socket buffer for DMA and adds them to
|
|
|
|
* the TX queue. The queue's insert pointer will be incremented by
|
|
|
|
* the number of fragments in the socket buffer.
|
|
|
|
*
|
|
|
|
* If any DMA mapping fails, any mapped fragments will be unmapped,
|
|
|
|
* the queue's insert pointer will be restored to its original value.
|
|
|
|
*
|
|
|
|
* This function is split out from efx_hard_start_xmit to allow the
|
|
|
|
* loopback test to direct packets via specific TX queues.
|
|
|
|
*
|
|
|
|
* Returns NETDEV_TX_OK.
|
|
|
|
* You must hold netif_tx_lock() to call this function.
|
|
|
|
*/
|
|
|
|
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
|
|
|
{
|
2018-05-24 16:14:00 +07:00
|
|
|
unsigned int old_insert_count = tx_queue->insert_count;
|
2019-04-01 21:42:16 +07:00
|
|
|
bool xmit_more = netdev_xmit_more();
|
2016-11-17 17:51:54 +07:00
|
|
|
bool data_mapped = false;
|
|
|
|
unsigned int segments;
|
|
|
|
unsigned int skb_len;
|
2016-11-17 17:52:36 +07:00
|
|
|
int rc;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
skb_len = skb->len;
|
|
|
|
segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
|
|
|
|
if (segments == 1)
|
|
|
|
segments = 0; /* Don't use TSO for a single segment. */
|
|
|
|
|
|
|
|
/* Handle TSO first - it's *possible* (although unlikely) that we might
|
|
|
|
* be passed a packet to segment that's smaller than the copybreak/PIO
|
|
|
|
* size limit.
|
|
|
|
*/
|
|
|
|
if (segments) {
|
2016-12-02 22:51:33 +07:00
|
|
|
EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
|
2016-11-17 17:52:36 +07:00
|
|
|
rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
|
|
|
|
if (rc == -EINVAL) {
|
|
|
|
rc = efx_tx_tso_fallback(tx_queue, skb);
|
|
|
|
tx_queue->tso_fallbacks++;
|
|
|
|
if (rc == 0)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (rc)
|
2016-11-17 17:51:54 +07:00
|
|
|
goto err;
|
2014-02-06 23:45:12 +07:00
|
|
|
#ifdef EFX_USE_PIO
|
2019-04-01 21:42:16 +07:00
|
|
|
} else if (skb_len <= efx_piobuf_size && !xmit_more &&
|
2016-11-17 17:51:54 +07:00
|
|
|
efx_nic_may_tx_pio(tx_queue)) {
|
|
|
|
/* Use PIO for short packets with an empty queue. */
|
|
|
|
if (efx_enqueue_skb_pio(tx_queue, skb))
|
|
|
|
goto err;
|
|
|
|
tx_queue->pio_packets++;
|
|
|
|
data_mapped = true;
|
2014-02-06 23:45:12 +07:00
|
|
|
#endif
|
sfc: separate out SFC4000 ("Falcon") support into new sfc-falcon driver
Rationale: The differences between Falcon and Siena are in many ways larger
than those between Siena and EF10 (despite Siena being nominally "Falcon-
architecture"); for instance, Falcon has no MCPU, so there is no MCDI.
Removing Falcon support from the sfc driver should simplify the latter,
and avoid the possibility of Falcon support being broken by changes to sfc
(which are rarely if ever tested on Falcon, it being end-of-lifed hardware).
The sfc-falcon driver created in this changeset is essentially a copy of the
sfc driver, but with Siena- and EF10-specific code, including MCDI, removed
and with the "efx_" identifier prefix changed to "ef4_" (for "EFX 4000-
series") to avoid collisions when both drivers are built-in.
This changeset removes Falcon from the sfc driver's PCI ID table; then in
sfc I've removed obvious Falcon-related code: I removed the Falcon NIC
functions, Falcon PHY code, and EFX_REV_FALCON_*, then fixed up everything
that referenced them.
Also, increment minor version of both drivers (to 4.1).
For now, CONFIG_SFC selects CONFIG_SFC_FALCON, so that updating old configs
doesn't cause Falcon support to disappear; but that should be undone at
some point in the future.
Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-29 01:55:34 +07:00
|
|
|
} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
|
2016-11-17 17:51:54 +07:00
|
|
|
/* Pad short packets or coalesce short fragmented packets. */
|
|
|
|
if (efx_enqueue_skb_copy(tx_queue, skb))
|
|
|
|
goto err;
|
|
|
|
tx_queue->cb_packets++;
|
|
|
|
data_mapped = true;
|
|
|
|
}
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
/* Map for DMA and create descriptors if we haven't done so already. */
|
|
|
|
if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
|
|
|
|
goto err;
|
2011-11-28 23:33:43 +07:00
|
|
|
|
2018-05-24 16:14:00 +07:00
|
|
|
efx_tx_maybe_stop_queue(tx_queue);
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
/* Pass off to hardware */
|
2018-11-09 02:47:19 +07:00
|
|
|
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
|
2015-11-02 19:51:31 +07:00
|
|
|
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
|
|
|
|
|
2019-04-01 21:42:16 +07:00
|
|
|
/* There could be packets left on the partner queue if
|
|
|
|
* xmit_more was set. If we do not push those they
|
2015-11-02 19:51:31 +07:00
|
|
|
* could be left for a long time and cause a netdev watchdog.
|
|
|
|
*/
|
|
|
|
if (txq2->xmit_more_available)
|
|
|
|
efx_nic_push_buffers(txq2);
|
|
|
|
|
2014-10-17 21:32:25 +07:00
|
|
|
efx_nic_push_buffers(tx_queue);
|
2015-11-02 19:51:31 +07:00
|
|
|
} else {
|
2019-04-01 21:42:16 +07:00
|
|
|
tx_queue->xmit_more_available = xmit_more;
|
2015-11-02 19:51:31 +07:00
|
|
|
}
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
if (segments) {
|
|
|
|
tx_queue->tso_bursts++;
|
|
|
|
tx_queue->tso_packets += segments;
|
|
|
|
tx_queue->tx_packets += segments;
|
|
|
|
} else {
|
|
|
|
tx_queue->tx_packets++;
|
|
|
|
}
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
err:
|
2018-05-24 16:14:00 +07:00
|
|
|
efx_enqueue_unwind(tx_queue, old_insert_count);
|
2009-11-23 23:06:47 +07:00
|
|
|
dev_kfree_skb_any(skb);
|
2018-05-24 16:14:00 +07:00
|
|
|
|
|
|
|
/* If we're not expecting another transmit and we had something to push
|
|
|
|
* on this queue or a partner queue then we need to push here to get the
|
|
|
|
* previous packets out.
|
|
|
|
*/
|
|
|
|
if (!xmit_more) {
|
|
|
|
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
|
|
|
|
|
|
|
|
if (txq2->xmit_more_available)
|
|
|
|
efx_nic_push_buffers(txq2);
|
|
|
|
|
|
|
|
efx_nic_push_buffers(tx_queue);
|
|
|
|
}
|
|
|
|
|
2012-05-22 07:27:58 +07:00
|
|
|
return NETDEV_TX_OK;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove packets from the TX queue
|
|
|
|
*
|
|
|
|
* This removes packets from the TX queue, up to and including the
|
|
|
|
* specified index.
|
|
|
|
*/
|
2008-09-01 18:47:12 +07:00
|
|
|
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
|
2011-11-28 23:33:43 +07:00
|
|
|
unsigned int index,
|
|
|
|
unsigned int *pkts_compl,
|
|
|
|
unsigned int *bytes_compl)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = tx_queue->efx;
|
|
|
|
unsigned int stop_index, read_ptr;
|
|
|
|
|
2010-09-10 13:42:22 +07:00
|
|
|
stop_index = (index + 1) & tx_queue->ptr_mask;
|
|
|
|
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
while (read_ptr != stop_index) {
|
|
|
|
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
|
2013-01-09 06:43:19 +07:00
|
|
|
|
|
|
|
if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
|
|
|
|
unlikely(buffer->len == 0)) {
|
2010-06-23 18:30:07 +07:00
|
|
|
netif_err(efx, tx_err, efx->net_dev,
|
|
|
|
"TX queue %d spurious TX completion id %x\n",
|
|
|
|
tx_queue->queue, read_ptr);
|
2008-04-27 18:55:59 +07:00
|
|
|
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-11-28 23:33:43 +07:00
|
|
|
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
++tx_queue->read_count;
|
2010-09-10 13:42:22 +07:00
|
|
|
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initiate a packet transmission. We use one channel per CPU
|
|
|
|
* (sharing when we have more CPUs than channels). On Falcon, the TX
|
|
|
|
* completion events will be directed back to the CPU that transmitted
|
|
|
|
* the packet, which should be cache-efficient.
|
|
|
|
*
|
|
|
|
* Context: non-blocking.
|
|
|
|
* Note that returning anything other than NETDEV_TX_OK will cause the
|
|
|
|
* OS to free the skb.
|
|
|
|
*/
|
2009-09-01 02:50:58 +07:00
|
|
|
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
|
2012-02-17 07:10:45 +07:00
|
|
|
struct net_device *net_dev)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
2008-09-01 18:43:14 +07:00
|
|
|
struct efx_nic *efx = netdev_priv(net_dev);
|
2008-09-01 18:44:59 +07:00
|
|
|
struct efx_tx_queue *tx_queue;
|
2011-01-11 04:18:20 +07:00
|
|
|
unsigned index, type;
|
2008-09-01 18:44:59 +07:00
|
|
|
|
2011-05-17 00:51:24 +07:00
|
|
|
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
|
2009-03-04 16:52:37 +07:00
|
|
|
|
2012-09-03 17:09:36 +07:00
|
|
|
/* PTP "event" packet */
|
|
|
|
if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
|
|
|
|
unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
|
|
|
|
return efx_ptp_tx(efx, skb);
|
|
|
|
}
|
|
|
|
|
2011-01-11 04:18:20 +07:00
|
|
|
index = skb_get_queue_mapping(skb);
|
|
|
|
type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
|
|
|
|
if (index >= efx->n_tx_channels) {
|
|
|
|
index -= efx->n_tx_channels;
|
|
|
|
type |= EFX_TXQ_TYPE_HIGHPRI;
|
|
|
|
}
|
|
|
|
tx_queue = efx_get_tx_queue(efx, index, type);
|
2008-09-01 18:44:59 +07:00
|
|
|
|
2009-11-23 23:07:05 +07:00
|
|
|
return efx_enqueue_skb(tx_queue, skb);
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
2011-01-13 01:39:40 +07:00
|
|
|
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
|
|
|
|
{
|
2011-01-11 04:18:20 +07:00
|
|
|
struct efx_nic *efx = tx_queue->efx;
|
|
|
|
|
2011-01-13 01:39:40 +07:00
|
|
|
/* Must be inverse of queue lookup in efx_hard_start_xmit() */
|
2011-01-11 04:18:20 +07:00
|
|
|
tx_queue->core_txq =
|
|
|
|
netdev_get_tx_queue(efx->net_dev,
|
|
|
|
tx_queue->queue / EFX_TXQ_TYPES +
|
|
|
|
((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
|
|
|
|
efx->n_tx_channels : 0));
|
|
|
|
}
|
|
|
|
|
2017-08-07 15:15:17 +07:00
|
|
|
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
|
2017-08-07 15:15:32 +07:00
|
|
|
void *type_data)
|
2011-01-11 04:18:20 +07:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = netdev_priv(net_dev);
|
2017-08-07 15:15:32 +07:00
|
|
|
struct tc_mqprio_qopt *mqprio = type_data;
|
2011-01-11 04:18:20 +07:00
|
|
|
struct efx_channel *channel;
|
|
|
|
struct efx_tx_queue *tx_queue;
|
2016-02-17 12:16:43 +07:00
|
|
|
unsigned tc, num_tc;
|
2011-01-11 04:18:20 +07:00
|
|
|
int rc;
|
|
|
|
|
2017-11-06 13:23:42 +07:00
|
|
|
if (type != TC_SETUP_QDISC_MQPRIO)
|
2017-08-07 15:15:31 +07:00
|
|
|
return -EOPNOTSUPP;
|
2016-02-17 12:16:15 +07:00
|
|
|
|
2017-08-07 15:15:32 +07:00
|
|
|
num_tc = mqprio->num_tc;
|
2016-02-17 12:16:43 +07:00
|
|
|
|
sfc: separate out SFC4000 ("Falcon") support into new sfc-falcon driver
Rationale: The differences between Falcon and Siena are in many ways larger
than those between Siena and EF10 (despite Siena being nominally "Falcon-
architecture"); for instance, Falcon has no MCPU, so there is no MCDI.
Removing Falcon support from the sfc driver should simplify the latter,
and avoid the possibility of Falcon support being broken by changes to sfc
(which are rarely if ever tested on Falcon, it being end-of-lifed hardware).
The sfc-falcon driver created in this changeset is essentially a copy of the
sfc driver, but with Siena- and EF10-specific code, including MCDI, removed
and with the "efx_" identifier prefix changed to "ef4_" (for "EFX 4000-
series") to avoid collisions when both drivers are built-in.
This changeset removes Falcon from the sfc driver's PCI ID table; then in
sfc I've removed obvious Falcon-related code: I removed the Falcon NIC
functions, Falcon PHY code, and EFX_REV_FALCON_*, then fixed up everything
that referenced them.
Also, increment minor version of both drivers (to 4.1).
For now, CONFIG_SFC selects CONFIG_SFC_FALCON, so that updating old configs
doesn't cause Falcon support to disappear; but that should be undone at
some point in the future.
Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-29 01:55:34 +07:00
|
|
|
if (num_tc > EFX_MAX_TX_TC)
|
2011-01-11 04:18:20 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2017-08-07 15:15:32 +07:00
|
|
|
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
|
2017-03-16 00:39:25 +07:00
|
|
|
|
2011-01-11 04:18:20 +07:00
|
|
|
if (num_tc == net_dev->num_tc)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (tc = 0; tc < num_tc; tc++) {
|
|
|
|
net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
|
|
|
|
net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (num_tc > net_dev->num_tc) {
|
|
|
|
/* Initialise high-priority queues as necessary */
|
|
|
|
efx_for_each_channel(channel, efx) {
|
|
|
|
efx_for_each_possible_channel_tx_queue(tx_queue,
|
|
|
|
channel) {
|
|
|
|
if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
|
|
|
|
continue;
|
|
|
|
if (!tx_queue->buffer) {
|
|
|
|
rc = efx_probe_tx_queue(tx_queue);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
if (!tx_queue->initialised)
|
|
|
|
efx_init_tx_queue(tx_queue);
|
|
|
|
efx_init_tx_queue_core_txq(tx_queue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Reduce number of classes before number of queues */
|
|
|
|
net_dev->num_tc = num_tc;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = netif_set_real_num_tx_queues(net_dev,
|
|
|
|
max_t(int, num_tc, 1) *
|
|
|
|
efx->n_tx_channels);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/* Do not destroy high-priority queues when they become
|
|
|
|
* unused. We would have to flush them first, and it is
|
|
|
|
* fairly difficult to flush a subset of TX queues. Leave
|
|
|
|
* it to efx_fini_channels().
|
|
|
|
*/
|
|
|
|
|
|
|
|
net_dev->num_tc = num_tc;
|
|
|
|
return 0;
|
2011-01-13 01:39:40 +07:00
|
|
|
}
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
|
|
|
|
{
|
|
|
|
unsigned fill_level;
|
|
|
|
struct efx_nic *efx = tx_queue->efx;
|
2012-05-22 07:27:58 +07:00
|
|
|
struct efx_tx_queue *txq2;
|
2011-11-28 23:33:43 +07:00
|
|
|
unsigned int pkts_compl = 0, bytes_compl = 0;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-12-02 22:51:33 +07:00
|
|
|
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2011-11-28 23:33:43 +07:00
|
|
|
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
|
2015-07-08 16:05:10 +07:00
|
|
|
tx_queue->pkts_compl += pkts_compl;
|
|
|
|
tx_queue->bytes_compl += bytes_compl;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2013-04-27 07:55:21 +07:00
|
|
|
if (pkts_compl > 1)
|
|
|
|
++tx_queue->merge_events;
|
|
|
|
|
2012-05-22 07:27:58 +07:00
|
|
|
/* See if we need to restart the netif queue. This memory
|
|
|
|
* barrier ensures that we write read_count (inside
|
|
|
|
* efx_dequeue_buffers()) before reading the queue status.
|
|
|
|
*/
|
2008-04-27 18:55:59 +07:00
|
|
|
smp_mb();
|
2010-12-10 08:24:16 +07:00
|
|
|
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
|
2011-04-04 19:46:23 +07:00
|
|
|
likely(efx->port_enabled) &&
|
2011-05-17 00:51:24 +07:00
|
|
|
likely(netif_device_present(efx->net_dev))) {
|
2012-05-22 07:27:58 +07:00
|
|
|
txq2 = efx_tx_queue_partner(tx_queue);
|
|
|
|
fill_level = max(tx_queue->insert_count - tx_queue->read_count,
|
|
|
|
txq2->insert_count - txq2->read_count);
|
|
|
|
if (fill_level <= efx->txq_wake_thresh)
|
2010-12-10 08:24:16 +07:00
|
|
|
netif_tx_wake_queue(tx_queue->core_txq);
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
sfc: Use TX push whenever adding descriptors to an empty queue
Whenever we add DMA descriptors to a TX ring and update the ring
pointer, the TX DMA engine must first read the new DMA descriptors and
then start reading packet data. However, all released Solarflare 10G
controllers have a 'TX push' feature that allows us to reduce latency
by writing the first new DMA descriptor along with the pointer update.
This is only useful when the queue is empty. The hardware should
ignore the pushed descriptor if the queue is not empty, but this check
is buggy, so we must do it in software.
In order to tell whether a TX queue is empty, we need to compare the
previous transmission count (write_count) and completion count
(read_count). However, if we do that every time we update the ring
pointer then read_count may ping-pong between the caches of two CPUs
running the transmission and completion paths for the queue.
Therefore, we split the check for an empty queue between the
completion path and the transmission path:
- Add an empty_read_count field representing a point at which the
completion path saw the TX queue as empty.
- Add an old_write_count field for use on the completion path.
- On the completion path, whenever read_count reaches or passes
old_write_count the TX queue may be empty. We then read
write_count, set empty_read_count if read_count == write_count,
and update old_write_count.
- On the transmission path, we read empty_read_count. If it's set, we
compare it with the value of write_count before the current set of
descriptors was added. If they match, the queue really is empty and
we can use TX push.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
2010-11-16 06:53:11 +07:00
|
|
|
|
|
|
|
/* Check whether the hardware queue is now empty */
|
|
|
|
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 04:07:29 +07:00
|
|
|
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
|
sfc: Use TX push whenever adding descriptors to an empty queue
Whenever we add DMA descriptors to a TX ring and update the ring
pointer, the TX DMA engine must first read the new DMA descriptors and
then start reading packet data. However, all released Solarflare 10G
controllers have a 'TX push' feature that allows us to reduce latency
by writing the first new DMA descriptor along with the pointer update.
This is only useful when the queue is empty. The hardware should
ignore the pushed descriptor if the queue is not empty, but this check
is buggy, so we must do it in software.
In order to tell whether a TX queue is empty, we need to compare the
previous transmission count (write_count) and completion count
(read_count). However, if we do that every time we update the ring
pointer then read_count may ping-pong between the caches of two CPUs
running the transmission and completion paths for the queue.
Therefore, we split the check for an empty queue between the
completion path and the transmission path:
- Add an empty_read_count field representing a point at which the
completion path saw the TX queue as empty.
- Add an old_write_count field for use on the completion path.
- On the completion path, whenever read_count reaches or passes
old_write_count the TX queue may be empty. We then read
write_count, set empty_read_count if read_count == write_count,
and update old_write_count.
- On the transmission path, we read empty_read_count. If it's set, we
compare it with the value of write_count before the current set of
descriptors was added. If they match, the queue really is empty and
we can use TX push.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
2010-11-16 06:53:11 +07:00
|
|
|
if (tx_queue->read_count == tx_queue->old_write_count) {
|
|
|
|
smp_mb();
|
|
|
|
tx_queue->empty_read_count =
|
|
|
|
tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
|
|
|
|
}
|
|
|
|
}
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
|
2012-05-18 00:40:54 +07:00
|
|
|
{
|
2016-11-17 17:51:54 +07:00
|
|
|
return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
|
2012-05-18 00:40:54 +07:00
|
|
|
}
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
|
|
|
|
{
|
|
|
|
struct efx_nic *efx = tx_queue->efx;
|
2010-09-10 13:42:22 +07:00
|
|
|
unsigned int entries;
|
2012-05-18 02:52:20 +07:00
|
|
|
int rc;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2010-09-10 13:42:22 +07:00
|
|
|
/* Create the smallest power-of-two aligned ring */
|
|
|
|
entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
|
2016-12-02 22:51:33 +07:00
|
|
|
EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
|
2010-09-10 13:42:22 +07:00
|
|
|
tx_queue->ptr_mask = entries - 1;
|
|
|
|
|
|
|
|
netif_dbg(efx, probe, efx->net_dev,
|
|
|
|
"creating TX queue %d size %#x mask %#x\n",
|
|
|
|
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
/* Allocate software ring */
|
2011-12-02 19:36:13 +07:00
|
|
|
tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
|
2010-09-10 13:42:22 +07:00
|
|
|
GFP_KERNEL);
|
2008-09-01 18:44:59 +07:00
|
|
|
if (!tx_queue->buffer)
|
|
|
|
return -ENOMEM;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
|
|
|
|
sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
|
|
|
|
if (!tx_queue->cb_page) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto fail1;
|
2012-05-18 00:40:54 +07:00
|
|
|
}
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
/* Allocate hardware ring */
|
2009-11-29 10:43:56 +07:00
|
|
|
rc = efx_nic_probe_tx(tx_queue);
|
2008-04-27 18:55:59 +07:00
|
|
|
if (rc)
|
2012-05-18 00:40:54 +07:00
|
|
|
goto fail2;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2012-05-18 00:40:54 +07:00
|
|
|
fail2:
|
2016-11-17 17:51:54 +07:00
|
|
|
kfree(tx_queue->cb_page);
|
|
|
|
tx_queue->cb_page = NULL;
|
2012-05-18 00:40:54 +07:00
|
|
|
fail1:
|
2008-04-27 18:55:59 +07:00
|
|
|
kfree(tx_queue->buffer);
|
|
|
|
tx_queue->buffer = NULL;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-09-01 18:48:46 +07:00
|
|
|
void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
2016-11-17 17:51:54 +07:00
|
|
|
struct efx_nic *efx = tx_queue->efx;
|
|
|
|
|
|
|
|
netif_dbg(efx, drv, efx->net_dev,
|
2010-06-23 18:30:07 +07:00
|
|
|
"initialising TX queue %d\n", tx_queue->queue);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
tx_queue->insert_count = 0;
|
|
|
|
tx_queue->write_count = 0;
|
2017-01-14 04:20:14 +07:00
|
|
|
tx_queue->packet_write_count = 0;
|
sfc: Use TX push whenever adding descriptors to an empty queue
Whenever we add DMA descriptors to a TX ring and update the ring
pointer, the TX DMA engine must first read the new DMA descriptors and
then start reading packet data. However, all released Solarflare 10G
controllers have a 'TX push' feature that allows us to reduce latency
by writing the first new DMA descriptor along with the pointer update.
This is only useful when the queue is empty. The hardware should
ignore the pushed descriptor if the queue is not empty, but this check
is buggy, so we must do it in software.
In order to tell whether a TX queue is empty, we need to compare the
previous transmission count (write_count) and completion count
(read_count). However, if we do that every time we update the ring
pointer then read_count may ping-pong between the caches of two CPUs
running the transmission and completion paths for the queue.
Therefore, we split the check for an empty queue between the
completion path and the transmission path:
- Add an empty_read_count field representing a point at which the
completion path saw the TX queue as empty.
- Add an old_write_count field for use on the completion path.
- On the completion path, whenever read_count reaches or passes
old_write_count the TX queue may be empty. We then read
write_count, set empty_read_count if read_count == write_count,
and update old_write_count.
- On the transmission path, we read empty_read_count. If it's set, we
compare it with the value of write_count before the current set of
descriptors was added. If they match, the queue really is empty and
we can use TX push.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
2010-11-16 06:53:11 +07:00
|
|
|
tx_queue->old_write_count = 0;
|
2008-04-27 18:55:59 +07:00
|
|
|
tx_queue->read_count = 0;
|
|
|
|
tx_queue->old_read_count = 0;
|
sfc: Use TX push whenever adding descriptors to an empty queue
Whenever we add DMA descriptors to a TX ring and update the ring
pointer, the TX DMA engine must first read the new DMA descriptors and
then start reading packet data. However, all released Solarflare 10G
controllers have a 'TX push' feature that allows us to reduce latency
by writing the first new DMA descriptor along with the pointer update.
This is only useful when the queue is empty. The hardware should
ignore the pushed descriptor if the queue is not empty, but this check
is buggy, so we must do it in software.
In order to tell whether a TX queue is empty, we need to compare the
previous transmission count (write_count) and completion count
(read_count). However, if we do that every time we update the ring
pointer then read_count may ping-pong between the caches of two CPUs
running the transmission and completion paths for the queue.
Therefore, we split the check for an empty queue between the
completion path and the transmission path:
- Add an empty_read_count field representing a point at which the
completion path saw the TX queue as empty.
- Add an old_write_count field for use on the completion path.
- On the completion path, whenever read_count reaches or passes
old_write_count the TX queue may be empty. We then read
write_count, set empty_read_count if read_count == write_count,
and update old_write_count.
- On the transmission path, we read empty_read_count. If it's set, we
compare it with the value of write_count before the current set of
descriptors was added. If they match, the queue really is empty and
we can use TX push.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
2010-11-16 06:53:11 +07:00
|
|
|
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
|
2015-11-02 19:51:31 +07:00
|
|
|
tx_queue->xmit_more_available = false;
|
2018-01-26 00:26:06 +07:00
|
|
|
tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
|
|
|
|
tx_queue->channel == efx_ptp_channel(efx));
|
2018-01-26 00:24:43 +07:00
|
|
|
tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
|
|
|
|
tx_queue->completed_timestamp_major = 0;
|
|
|
|
tx_queue->completed_timestamp_minor = 0;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
/* Set up default function pointers. These may get replaced by
|
|
|
|
* efx_nic_init_tx() based off NIC/queue capabilities.
|
|
|
|
*/
|
2016-11-17 17:52:36 +07:00
|
|
|
tx_queue->handle_tso = efx_enqueue_skb_tso;
|
2016-11-17 17:51:54 +07:00
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
/* Set up TX descriptor ring */
|
2009-11-29 10:43:56 +07:00
|
|
|
efx_nic_init_tx(tx_queue);
|
2011-01-11 04:18:20 +07:00
|
|
|
|
|
|
|
tx_queue->initialised = true;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
2013-05-27 22:52:54 +07:00
|
|
|
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
|
|
|
struct efx_tx_buffer *buffer;
|
|
|
|
|
2013-05-27 22:52:54 +07:00
|
|
|
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
|
|
|
"shutting down TX queue %d\n", tx_queue->queue);
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
if (!tx_queue->buffer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Free any buffers left in the ring */
|
|
|
|
while (tx_queue->read_count != tx_queue->write_count) {
|
2011-11-28 23:33:43 +07:00
|
|
|
unsigned int pkts_compl = 0, bytes_compl = 0;
|
2010-09-10 13:42:22 +07:00
|
|
|
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
|
2011-11-28 23:33:43 +07:00
|
|
|
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
++tx_queue->read_count;
|
|
|
|
}
|
2015-11-02 19:51:31 +07:00
|
|
|
tx_queue->xmit_more_available = false;
|
2011-11-28 23:33:43 +07:00
|
|
|
netdev_tx_reset_queue(tx_queue->core_txq);
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
|
|
|
|
{
|
2012-05-18 00:40:54 +07:00
|
|
|
int i;
|
|
|
|
|
2011-01-11 04:18:20 +07:00
|
|
|
if (!tx_queue->buffer)
|
|
|
|
return;
|
|
|
|
|
2010-06-23 18:30:07 +07:00
|
|
|
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
|
|
|
"destroying TX queue %d\n", tx_queue->queue);
|
2009-11-29 10:43:56 +07:00
|
|
|
efx_nic_remove_tx(tx_queue);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2016-11-17 17:51:54 +07:00
|
|
|
if (tx_queue->cb_page) {
|
|
|
|
for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
|
2012-05-18 00:40:54 +07:00
|
|
|
efx_nic_free_buffer(tx_queue->efx,
|
2016-11-17 17:51:54 +07:00
|
|
|
&tx_queue->cb_page[i]);
|
|
|
|
kfree(tx_queue->cb_page);
|
|
|
|
tx_queue->cb_page = NULL;
|
2012-05-18 00:40:54 +07:00
|
|
|
}
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
kfree(tx_queue->buffer);
|
|
|
|
tx_queue->buffer = NULL;
|
|
|
|
}
|