2008-04-27 18:55:59 +07:00
|
|
|
/****************************************************************************
|
|
|
|
* Driver for Solarflare Solarstorm network controllers and boards
|
|
|
|
* Copyright 2005-2006 Fen Systems Ltd.
|
2011-02-25 07:01:34 +07:00
|
|
|
* Copyright 2005-2011 Solarflare Communications Inc.
|
2008-04-27 18:55:59 +07:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 as published
|
|
|
|
* by the Free Software Foundation, incorporated herein by reference.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/in.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/slab.h>
|
2008-04-27 18:55:59 +07:00
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/udp.h>
|
2011-05-23 03:47:17 +07:00
|
|
|
#include <linux/prefetch.h>
|
2011-09-16 06:46:05 +07:00
|
|
|
#include <linux/moduleparam.h>
|
2008-04-27 18:55:59 +07:00
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/checksum.h>
|
|
|
|
#include "net_driver.h"
|
|
|
|
#include "efx.h"
|
2009-11-29 22:12:08 +07:00
|
|
|
#include "nic.h"
|
2008-05-07 19:36:19 +07:00
|
|
|
#include "selftest.h"
|
2008-04-27 18:55:59 +07:00
|
|
|
#include "workarounds.h"
|
|
|
|
|
|
|
|
/* Number of RX descriptors pushed at once. */
|
|
|
|
#define EFX_RX_BATCH 8
|
|
|
|
|
2013-01-30 06:33:14 +07:00
|
|
|
/* Maximum length for an RX descriptor sharing a page */
|
|
|
|
#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state) \
|
|
|
|
- EFX_PAGE_IP_ALIGN)
|
2010-06-01 18:20:53 +07:00
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
/* Size of buffer allocated for skb header area. */
|
|
|
|
#define EFX_SKB_HEADERS 64u
|
|
|
|
|
|
|
|
/* This is the percentage fill level below which new RX descriptors
|
|
|
|
* will be added to the RX descriptor ring.
|
|
|
|
*/
|
2012-04-11 19:12:41 +07:00
|
|
|
static unsigned int rx_refill_threshold;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
|
|
|
|
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
|
|
|
|
EFX_RX_USR_BUF_SIZE)
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
/*
|
|
|
|
* RX maximum head room required.
|
|
|
|
*
|
2013-01-30 06:33:15 +07:00
|
|
|
* This must be at least 1 to prevent overflow, plus one packet-worth
|
|
|
|
* to allow pipelined receives.
|
2008-04-27 18:55:59 +07:00
|
|
|
*/
|
2013-01-30 06:33:15 +07:00
|
|
|
#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
|
2010-06-23 18:31:28 +07:00
|
|
|
{
|
2013-01-30 06:33:15 +07:00
|
|
|
return page_address(buf->page) + buf->page_offset;
|
2011-02-25 06:45:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 efx_rx_buf_hash(const u8 *eh)
|
|
|
|
{
|
|
|
|
/* The ethernet header is always directly after any hash. */
|
2010-06-23 18:31:28 +07:00
|
|
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
|
2011-02-25 06:45:16 +07:00
|
|
|
return __le32_to_cpup((const __le32 *)(eh - 4));
|
2010-06-23 18:31:28 +07:00
|
|
|
#else
|
2011-02-25 06:45:16 +07:00
|
|
|
const u8 *data = eh - 4;
|
2012-01-06 01:54:04 +07:00
|
|
|
return (u32)data[0] |
|
|
|
|
(u32)data[1] << 8 |
|
|
|
|
(u32)data[2] << 16 |
|
|
|
|
(u32)data[3] << 24;
|
2010-06-23 18:31:28 +07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
static inline struct efx_rx_buffer *
|
|
|
|
efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
|
|
|
|
{
|
|
|
|
if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
|
|
|
|
return efx_rx_buffer(rx_queue, 0);
|
|
|
|
else
|
|
|
|
return rx_buf + 1;
|
|
|
|
}
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
/**
|
2013-01-11 19:26:21 +07:00
|
|
|
* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
|
2008-04-27 18:55:59 +07:00
|
|
|
*
|
|
|
|
* @rx_queue: Efx RX queue
|
|
|
|
*
|
2010-06-01 18:33:17 +07:00
|
|
|
* This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
|
|
|
|
* and populates struct efx_rx_buffers for each one. Return a negative error
|
|
|
|
* code or 0 on success. If a single page can be split between two buffers,
|
|
|
|
* then the page will either be inserted fully, or not at at all.
|
2008-04-27 18:55:59 +07:00
|
|
|
*/
|
2013-01-11 19:26:21 +07:00
|
|
|
static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
2010-06-01 18:33:17 +07:00
|
|
|
struct efx_rx_buffer *rx_buf;
|
|
|
|
struct page *page;
|
2013-01-11 06:51:54 +07:00
|
|
|
unsigned int page_offset;
|
2010-06-01 18:20:53 +07:00
|
|
|
struct efx_rx_page_state *state;
|
2010-06-01 18:33:17 +07:00
|
|
|
dma_addr_t dma_addr;
|
|
|
|
unsigned index, count;
|
|
|
|
|
|
|
|
/* We can split a page between two buffers */
|
|
|
|
BUILD_BUG_ON(EFX_RX_BATCH & 1);
|
|
|
|
|
|
|
|
for (count = 0; count < EFX_RX_BATCH; ++count) {
|
|
|
|
page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
|
|
|
|
efx->rx_buffer_order);
|
|
|
|
if (unlikely(page == NULL))
|
2008-04-27 18:55:59 +07:00
|
|
|
return -ENOMEM;
|
2012-05-17 23:46:55 +07:00
|
|
|
dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
|
2013-01-30 06:33:14 +07:00
|
|
|
PAGE_SIZE << efx->rx_buffer_order,
|
2012-05-17 23:46:55 +07:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
|
2010-06-01 18:33:17 +07:00
|
|
|
__free_pages(page, efx->rx_buffer_order);
|
2008-04-27 18:55:59 +07:00
|
|
|
return -EIO;
|
|
|
|
}
|
2012-09-07 05:54:15 +07:00
|
|
|
state = page_address(page);
|
2010-06-01 18:20:53 +07:00
|
|
|
state->refcnt = 0;
|
|
|
|
state->dma_addr = dma_addr;
|
|
|
|
|
|
|
|
dma_addr += sizeof(struct efx_rx_page_state);
|
2013-01-11 06:51:54 +07:00
|
|
|
page_offset = sizeof(struct efx_rx_page_state);
|
2010-06-01 18:33:17 +07:00
|
|
|
|
|
|
|
split:
|
2010-09-10 13:42:22 +07:00
|
|
|
index = rx_queue->added_count & rx_queue->ptr_mask;
|
2010-06-01 18:33:17 +07:00
|
|
|
rx_buf = efx_rx_buffer(rx_queue, index);
|
2010-06-01 18:20:53 +07:00
|
|
|
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
|
2013-01-11 19:26:21 +07:00
|
|
|
rx_buf->page = page;
|
2013-03-06 00:49:39 +07:00
|
|
|
rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
|
2013-01-30 06:33:14 +07:00
|
|
|
rx_buf->len = efx->rx_dma_len;
|
2013-01-11 19:26:21 +07:00
|
|
|
rx_buf->flags = 0;
|
2010-06-01 18:33:17 +07:00
|
|
|
++rx_queue->added_count;
|
2010-06-01 18:20:53 +07:00
|
|
|
++state->refcnt;
|
2010-06-01 18:33:17 +07:00
|
|
|
|
2013-01-30 06:33:14 +07:00
|
|
|
if ((~count & 1) && (efx->rx_dma_len <= EFX_RX_HALF_PAGE)) {
|
2010-06-01 18:33:17 +07:00
|
|
|
/* Use the second half of the page */
|
|
|
|
get_page(page);
|
|
|
|
dma_addr += (PAGE_SIZE >> 1);
|
2013-01-11 06:51:54 +07:00
|
|
|
page_offset += (PAGE_SIZE >> 1);
|
2010-06-01 18:33:17 +07:00
|
|
|
++count;
|
|
|
|
goto split;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-09-01 18:47:12 +07:00
|
|
|
static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
2012-12-21 01:48:20 +07:00
|
|
|
struct efx_rx_buffer *rx_buf,
|
|
|
|
unsigned int used_len)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
2013-01-11 19:26:21 +07:00
|
|
|
if (rx_buf->page) {
|
2010-06-01 18:20:53 +07:00
|
|
|
struct efx_rx_page_state *state;
|
|
|
|
|
2013-01-11 19:26:21 +07:00
|
|
|
state = page_address(rx_buf->page);
|
2010-06-01 18:20:53 +07:00
|
|
|
if (--state->refcnt == 0) {
|
2012-05-17 23:46:55 +07:00
|
|
|
dma_unmap_page(&efx->pci_dev->dev,
|
2010-06-01 18:20:53 +07:00
|
|
|
state->dma_addr,
|
2013-01-30 06:33:14 +07:00
|
|
|
PAGE_SIZE << efx->rx_buffer_order,
|
2012-05-17 23:46:55 +07:00
|
|
|
DMA_FROM_DEVICE);
|
2012-12-21 01:48:20 +07:00
|
|
|
} else if (used_len) {
|
|
|
|
dma_sync_single_for_cpu(&efx->pci_dev->dev,
|
|
|
|
rx_buf->dma_addr, used_len,
|
|
|
|
DMA_FROM_DEVICE);
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-01 18:47:12 +07:00
|
|
|
static void efx_free_rx_buffer(struct efx_nic *efx,
|
|
|
|
struct efx_rx_buffer *rx_buf)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
2013-01-11 19:26:21 +07:00
|
|
|
if (rx_buf->page) {
|
|
|
|
__free_pages(rx_buf->page, efx->rx_buffer_order);
|
|
|
|
rx_buf->page = NULL;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-01 18:47:12 +07:00
|
|
|
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
|
|
|
|
struct efx_rx_buffer *rx_buf)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
2012-12-21 01:48:20 +07:00
|
|
|
efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
|
2008-04-27 18:55:59 +07:00
|
|
|
efx_free_rx_buffer(rx_queue->efx, rx_buf);
|
|
|
|
}
|
|
|
|
|
2010-06-01 18:20:34 +07:00
|
|
|
/* Attempt to resurrect the other receive buffer that used to share this page,
|
|
|
|
* which had previously been passed up to the kernel and freed. */
|
|
|
|
static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
|
|
|
|
struct efx_rx_buffer *rx_buf)
|
|
|
|
{
|
2013-01-11 19:26:21 +07:00
|
|
|
struct efx_rx_page_state *state = page_address(rx_buf->page);
|
2010-06-01 18:20:34 +07:00
|
|
|
struct efx_rx_buffer *new_buf;
|
2010-06-01 18:20:53 +07:00
|
|
|
unsigned fill_level, index;
|
|
|
|
|
|
|
|
/* +1 because efx_rx_packet() incremented removed_count. +1 because
|
|
|
|
* we'd like to insert an additional descriptor whilst leaving
|
|
|
|
* EFX_RXD_HEAD_ROOM for the non-recycle path */
|
|
|
|
fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
|
2010-09-10 13:42:22 +07:00
|
|
|
if (unlikely(fill_level > rx_queue->max_fill)) {
|
2010-06-01 18:20:53 +07:00
|
|
|
/* We could place "state" on a list, and drain the list in
|
|
|
|
* efx_fast_push_rx_descriptors(). For now, this will do. */
|
|
|
|
return;
|
|
|
|
}
|
2010-06-01 18:20:34 +07:00
|
|
|
|
2010-06-01 18:20:53 +07:00
|
|
|
++state->refcnt;
|
2013-01-11 19:26:21 +07:00
|
|
|
get_page(rx_buf->page);
|
2010-06-01 18:20:34 +07:00
|
|
|
|
2010-09-10 13:42:22 +07:00
|
|
|
index = rx_queue->added_count & rx_queue->ptr_mask;
|
2010-06-01 18:20:34 +07:00
|
|
|
new_buf = efx_rx_buffer(rx_queue, index);
|
2010-06-01 18:20:53 +07:00
|
|
|
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
|
2013-01-11 19:26:21 +07:00
|
|
|
new_buf->page = rx_buf->page;
|
2010-06-01 18:20:34 +07:00
|
|
|
new_buf->len = rx_buf->len;
|
|
|
|
++rx_queue->added_count;
|
|
|
|
}
|
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
/* Recycle buffers directly back into the rx_queue. There is always
|
|
|
|
* room to add these buffer, because we've just popped them.
|
|
|
|
*/
|
|
|
|
static void efx_recycle_rx_buffers(struct efx_channel *channel,
|
|
|
|
struct efx_rx_buffer *rx_buf,
|
|
|
|
unsigned int n_frags)
|
2010-06-01 18:20:34 +07:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = channel->efx;
|
2010-09-10 13:41:47 +07:00
|
|
|
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
2010-06-01 18:20:34 +07:00
|
|
|
struct efx_rx_buffer *new_buf;
|
|
|
|
unsigned index;
|
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
do {
|
|
|
|
rx_buf->flags = 0;
|
2011-08-27 00:05:11 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
if (efx->rx_dma_len <= EFX_RX_HALF_PAGE &&
|
|
|
|
page_count(rx_buf->page) == 1)
|
|
|
|
efx_resurrect_rx_buffer(rx_queue, rx_buf);
|
2010-06-01 18:20:34 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
index = rx_queue->added_count & rx_queue->ptr_mask;
|
|
|
|
new_buf = efx_rx_buffer(rx_queue, index);
|
2010-06-01 18:20:34 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
memcpy(new_buf, rx_buf, sizeof(*new_buf));
|
|
|
|
rx_buf->page = NULL;
|
|
|
|
++rx_queue->added_count;
|
|
|
|
|
|
|
|
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
|
|
|
} while (--n_frags);
|
2010-06-01 18:20:34 +07:00
|
|
|
}
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
/**
|
|
|
|
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
|
|
|
|
* @rx_queue: RX descriptor queue
|
2012-07-10 17:56:00 +07:00
|
|
|
*
|
2008-04-27 18:55:59 +07:00
|
|
|
* This will aim to fill the RX descriptor queue up to
|
2012-04-11 19:09:24 +07:00
|
|
|
* @rx_queue->@max_fill. If there is insufficient atomic
|
2010-06-01 18:19:39 +07:00
|
|
|
* memory to do so, a slow fill will be scheduled.
|
|
|
|
*
|
|
|
|
* The caller must provide serialisation (none is used here). In practise,
|
|
|
|
* this means this function must run from the NAPI handler, or be called
|
|
|
|
* when NAPI is disabled.
|
2008-04-27 18:55:59 +07:00
|
|
|
*/
|
2010-06-01 18:19:39 +07:00
|
|
|
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
2010-06-01 18:33:17 +07:00
|
|
|
unsigned fill_level;
|
|
|
|
int space, rc = 0;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2010-06-01 18:19:39 +07:00
|
|
|
/* Calculate current fill level, and exit if we don't need to fill */
|
2008-04-27 18:55:59 +07:00
|
|
|
fill_level = (rx_queue->added_count - rx_queue->removed_count);
|
2010-09-10 13:42:22 +07:00
|
|
|
EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
|
2008-04-27 18:55:59 +07:00
|
|
|
if (fill_level >= rx_queue->fast_fill_trigger)
|
2010-06-01 18:20:34 +07:00
|
|
|
goto out;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
/* Record minimum fill level */
|
2008-05-17 03:15:49 +07:00
|
|
|
if (unlikely(fill_level < rx_queue->min_fill)) {
|
2008-04-27 18:55:59 +07:00
|
|
|
if (fill_level)
|
|
|
|
rx_queue->min_fill = fill_level;
|
2008-05-17 03:15:49 +07:00
|
|
|
}
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2012-04-11 19:09:24 +07:00
|
|
|
space = rx_queue->max_fill - fill_level;
|
2012-04-11 19:12:41 +07:00
|
|
|
EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2010-06-23 18:30:07 +07:00
|
|
|
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
|
|
|
"RX queue %d fast-filling descriptor ring from"
|
2013-01-11 19:26:21 +07:00
|
|
|
" level %d to level %d\n",
|
2010-09-10 13:41:36 +07:00
|
|
|
efx_rx_queue_index(rx_queue), fill_level,
|
2013-01-11 19:26:21 +07:00
|
|
|
rx_queue->max_fill);
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
do {
|
2013-01-11 19:26:21 +07:00
|
|
|
rc = efx_init_rx_buffers(rx_queue);
|
2010-06-01 18:33:17 +07:00
|
|
|
if (unlikely(rc)) {
|
|
|
|
/* Ensure that we don't leave the rx queue empty */
|
|
|
|
if (rx_queue->added_count == rx_queue->removed_count)
|
|
|
|
efx_schedule_slow_fill(rx_queue);
|
|
|
|
goto out;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
|
|
|
|
|
2010-06-23 18:30:07 +07:00
|
|
|
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
|
|
|
"RX queue %d fast-filled descriptor ring "
|
2010-09-10 13:41:36 +07:00
|
|
|
"to level %d\n", efx_rx_queue_index(rx_queue),
|
2010-06-23 18:30:07 +07:00
|
|
|
rx_queue->added_count - rx_queue->removed_count);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
out:
|
2010-06-01 18:20:34 +07:00
|
|
|
if (rx_queue->notified_count != rx_queue->added_count)
|
|
|
|
efx_nic_notify_rx_desc(rx_queue);
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
2010-06-01 18:19:39 +07:00
|
|
|
void efx_rx_slow_fill(unsigned long context)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
2010-06-01 18:19:39 +07:00
|
|
|
struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2010-06-01 18:19:39 +07:00
|
|
|
/* Post an event to cause NAPI to run and refill the queue */
|
2012-02-08 06:49:52 +07:00
|
|
|
efx_nic_generate_fill_event(rx_queue);
|
2008-04-27 18:55:59 +07:00
|
|
|
++rx_queue->slow_fill_count;
|
|
|
|
}
|
|
|
|
|
2008-09-01 18:47:12 +07:00
|
|
|
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
|
|
|
struct efx_rx_buffer *rx_buf,
|
2013-01-11 19:26:21 +07:00
|
|
|
int len)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
|
|
|
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
|
|
|
|
|
|
|
|
if (likely(len <= max_len))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* The packet must be discarded, but this is only a fatal error
|
|
|
|
* if the caller indicated it was
|
|
|
|
*/
|
2011-08-27 00:05:11 +07:00
|
|
|
rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
|
2010-06-23 18:30:07 +07:00
|
|
|
if (net_ratelimit())
|
|
|
|
netif_err(efx, rx_err, efx->net_dev,
|
|
|
|
" RX queue %d seriously overlength "
|
|
|
|
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
|
2010-09-10 13:41:36 +07:00
|
|
|
efx_rx_queue_index(rx_queue), len, max_len,
|
2010-06-23 18:30:07 +07:00
|
|
|
efx->type->rx_buffer_padding);
|
2008-04-27 18:55:59 +07:00
|
|
|
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
|
|
|
|
} else {
|
2010-06-23 18:30:07 +07:00
|
|
|
if (net_ratelimit())
|
|
|
|
netif_err(efx, rx_err, efx->net_dev,
|
|
|
|
" RX queue %d overlength RX event "
|
|
|
|
"(0x%x > 0x%x)\n",
|
2010-09-10 13:41:36 +07:00
|
|
|
efx_rx_queue_index(rx_queue), len, max_len);
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
2010-09-10 13:41:36 +07:00
|
|
|
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
2012-02-25 08:58:35 +07:00
|
|
|
/* Pass a received packet up through GRO. GRO can handle pages
|
|
|
|
* regardless of checksum state and skbs with a good checksum.
|
2008-04-27 18:55:59 +07:00
|
|
|
*/
|
2013-01-30 06:33:15 +07:00
|
|
|
static void
|
|
|
|
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
|
|
|
|
unsigned int n_frags, u8 *eh)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
2009-01-19 12:50:16 +07:00
|
|
|
struct napi_struct *napi = &channel->napi_str;
|
2009-10-29 14:21:24 +07:00
|
|
|
gro_result_t gro_result;
|
2013-01-11 19:26:21 +07:00
|
|
|
struct efx_nic *efx = channel->efx;
|
|
|
|
struct sk_buff *skb;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2013-01-11 19:26:21 +07:00
|
|
|
skb = napi_get_frags(napi);
|
2013-01-30 06:33:15 +07:00
|
|
|
if (unlikely(!skb)) {
|
|
|
|
while (n_frags--) {
|
|
|
|
put_page(rx_buf->page);
|
|
|
|
rx_buf->page = NULL;
|
|
|
|
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
|
|
|
}
|
2013-01-11 19:26:21 +07:00
|
|
|
return;
|
|
|
|
}
|
2009-04-16 16:02:07 +07:00
|
|
|
|
2013-01-11 19:26:21 +07:00
|
|
|
if (efx->net_dev->features & NETIF_F_RXHASH)
|
|
|
|
skb->rxhash = efx_rx_buf_hash(eh);
|
|
|
|
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
|
|
|
|
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
for (;;) {
|
|
|
|
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
|
|
|
|
rx_buf->page, rx_buf->page_offset,
|
|
|
|
rx_buf->len);
|
|
|
|
rx_buf->page = NULL;
|
|
|
|
skb->len += rx_buf->len;
|
|
|
|
if (skb_shinfo(skb)->nr_frags == n_frags)
|
|
|
|
break;
|
2009-11-23 23:02:40 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
skb->data_len = skb->len;
|
|
|
|
skb->truesize += n_frags * efx->rx_buffer_truesize;
|
|
|
|
|
|
|
|
skb_record_rx_queue(skb, channel->rx_queue.core_index);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
gro_result = napi_gro_frags(napi);
|
2013-01-11 19:26:21 +07:00
|
|
|
if (gro_result != GRO_DROP)
|
|
|
|
channel->irq_mod_score += 2;
|
|
|
|
}
|
2009-11-23 23:02:25 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
/* Allocate and construct an SKB around page fragments */
|
2013-01-11 19:26:21 +07:00
|
|
|
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
|
|
|
|
struct efx_rx_buffer *rx_buf,
|
2013-01-30 06:33:15 +07:00
|
|
|
unsigned int n_frags,
|
2013-01-11 19:26:21 +07:00
|
|
|
u8 *eh, int hdr_len)
|
|
|
|
{
|
|
|
|
struct efx_nic *efx = channel->efx;
|
|
|
|
struct sk_buff *skb;
|
2009-10-29 14:21:24 +07:00
|
|
|
|
2013-01-11 19:26:21 +07:00
|
|
|
/* Allocate an SKB to store the headers */
|
|
|
|
skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
|
|
|
|
if (unlikely(skb == NULL))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
|
|
|
|
|
|
|
|
skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
|
2013-01-30 06:33:15 +07:00
|
|
|
memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
|
2013-01-11 19:26:21 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
/* Append the remaining page(s) onto the frag list */
|
2013-01-11 19:26:21 +07:00
|
|
|
if (rx_buf->len > hdr_len) {
|
2013-01-30 06:33:15 +07:00
|
|
|
rx_buf->page_offset += hdr_len;
|
|
|
|
rx_buf->len -= hdr_len;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
|
|
|
|
rx_buf->page, rx_buf->page_offset,
|
|
|
|
rx_buf->len);
|
|
|
|
rx_buf->page = NULL;
|
|
|
|
skb->len += rx_buf->len;
|
|
|
|
skb->data_len += rx_buf->len;
|
|
|
|
if (skb_shinfo(skb)->nr_frags == n_frags)
|
|
|
|
break;
|
|
|
|
|
|
|
|
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
|
|
|
}
|
2013-01-11 19:26:21 +07:00
|
|
|
} else {
|
|
|
|
__free_pages(rx_buf->page, efx->rx_buffer_order);
|
2013-01-30 06:33:15 +07:00
|
|
|
rx_buf->page = NULL;
|
|
|
|
n_frags = 0;
|
2009-10-29 14:21:24 +07:00
|
|
|
}
|
2013-01-11 19:26:21 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
skb->truesize += n_frags * efx->rx_buffer_truesize;
|
2013-01-11 19:26:21 +07:00
|
|
|
|
|
|
|
/* Move past the ethernet header */
|
|
|
|
skb->protocol = eth_type_trans(skb, efx->net_dev);
|
|
|
|
|
|
|
|
return skb;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
2013-01-30 06:33:15 +07:00
|
|
|
unsigned int n_frags, unsigned int len, u16 flags)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
2010-09-10 13:41:36 +07:00
|
|
|
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
|
2008-04-27 18:55:59 +07:00
|
|
|
struct efx_rx_buffer *rx_buf;
|
|
|
|
|
|
|
|
rx_buf = efx_rx_buffer(rx_queue, index);
|
2011-08-27 00:05:11 +07:00
|
|
|
rx_buf->flags |= flags;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
/* Validate the number of fragments and completed length */
|
|
|
|
if (n_frags == 1) {
|
|
|
|
efx_rx_packet__check_len(rx_queue, rx_buf, len);
|
|
|
|
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
|
|
|
|
unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
|
|
|
|
unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
|
|
|
|
unlikely(!efx->rx_scatter)) {
|
|
|
|
/* If this isn't an explicit discard request, either
|
|
|
|
* the hardware or the driver is broken.
|
|
|
|
*/
|
|
|
|
WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
|
|
|
|
rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
|
|
|
}
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2010-06-23 18:30:07 +07:00
|
|
|
netif_vdbg(efx, rx_status, efx->net_dev,
|
2013-01-30 06:33:15 +07:00
|
|
|
"RX queue %d received ids %x-%x len %d %s%s\n",
|
2010-09-10 13:41:36 +07:00
|
|
|
efx_rx_queue_index(rx_queue), index,
|
2013-01-30 06:33:15 +07:00
|
|
|
(index + n_frags - 1) & rx_queue->ptr_mask, len,
|
2011-08-27 00:05:11 +07:00
|
|
|
(rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
|
|
|
|
(rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
/* Discard packet, if instructed to do so. Process the
|
|
|
|
* previous receive first.
|
|
|
|
*/
|
2011-08-27 00:05:11 +07:00
|
|
|
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
|
2013-01-30 06:33:15 +07:00
|
|
|
efx_rx_flush_packet(channel);
|
|
|
|
efx_recycle_rx_buffers(channel, rx_buf, n_frags);
|
|
|
|
return;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
if (n_frags == 1)
|
|
|
|
rx_buf->len = len;
|
|
|
|
|
2012-12-21 01:48:20 +07:00
|
|
|
/* Release and/or sync DMA mapping - assumes all RX buffers
|
|
|
|
* consumed in-order per RX queue
|
2008-04-27 18:55:59 +07:00
|
|
|
*/
|
2013-01-30 06:33:15 +07:00
|
|
|
efx_unmap_rx_buffer(efx, rx_buf, rx_buf->len);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
/* Prefetch nice and early so data will (hopefully) be in cache by
|
|
|
|
* the time we look at it.
|
|
|
|
*/
|
2013-01-30 06:33:15 +07:00
|
|
|
prefetch(efx_rx_buf_va(rx_buf));
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
rx_buf->page_offset += efx->type->rx_buffer_hash_size;
|
2013-01-30 06:33:15 +07:00
|
|
|
rx_buf->len -= efx->type->rx_buffer_hash_size;
|
|
|
|
|
|
|
|
if (n_frags > 1) {
|
|
|
|
/* Release/sync DMA mapping for additional fragments.
|
|
|
|
* Fix length for last fragment.
|
|
|
|
*/
|
|
|
|
unsigned int tail_frags = n_frags - 1;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
|
|
|
if (--tail_frags == 0)
|
|
|
|
break;
|
|
|
|
efx_unmap_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
|
|
|
|
}
|
|
|
|
rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
|
|
|
|
efx_unmap_rx_buffer(efx, rx_buf, rx_buf->len);
|
|
|
|
}
|
2013-01-30 06:33:15 +07:00
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
/* Pipeline receives so that we give time for packet headers to be
|
|
|
|
* prefetched into cache.
|
|
|
|
*/
|
2013-01-30 06:33:14 +07:00
|
|
|
efx_rx_flush_packet(channel);
|
2013-01-30 06:33:15 +07:00
|
|
|
channel->rx_pkt_n_frags = n_frags;
|
|
|
|
channel->rx_pkt_index = index;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
2013-01-11 19:26:21 +07:00
|
|
|
static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
|
2013-01-30 06:33:15 +07:00
|
|
|
struct efx_rx_buffer *rx_buf,
|
|
|
|
unsigned int n_frags)
|
2012-01-24 05:41:30 +07:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
2013-01-11 19:26:21 +07:00
|
|
|
u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
|
2012-01-24 05:41:30 +07:00
|
|
|
|
2013-01-30 06:33:15 +07:00
|
|
|
skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
|
2013-01-11 19:26:21 +07:00
|
|
|
if (unlikely(skb == NULL)) {
|
|
|
|
efx_free_rx_buffer(channel->efx, rx_buf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
skb_record_rx_queue(skb, channel->rx_queue.core_index);
|
2012-01-24 05:41:30 +07:00
|
|
|
|
|
|
|
/* Set the SKB flags */
|
|
|
|
skb_checksum_none_assert(skb);
|
|
|
|
|
2012-07-18 15:52:11 +07:00
|
|
|
if (channel->type->receive_skb)
|
2013-03-06 03:13:54 +07:00
|
|
|
if (channel->type->receive_skb(channel, skb))
|
2013-01-11 19:26:21 +07:00
|
|
|
return;
|
2013-03-06 03:13:54 +07:00
|
|
|
|
|
|
|
/* Pass the packet up */
|
|
|
|
netif_receive_skb(skb);
|
2012-01-24 05:41:30 +07:00
|
|
|
}
|
|
|
|
|
2008-04-27 18:55:59 +07:00
|
|
|
/* Handle a received packet. Second half: Touches packet payload. */
|
2013-01-30 06:33:15 +07:00
|
|
|
void __efx_rx_packet(struct efx_channel *channel)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = channel->efx;
|
2013-01-30 06:33:15 +07:00
|
|
|
struct efx_rx_buffer *rx_buf =
|
|
|
|
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
|
2013-01-30 06:33:15 +07:00
|
|
|
u8 *eh = efx_rx_buf_va(rx_buf);
|
2010-06-25 14:05:33 +07:00
|
|
|
|
2008-05-07 19:36:19 +07:00
|
|
|
/* If we're in loopback test, then pass the packet directly to the
|
|
|
|
* loopback layer, and free the rx_buf here
|
|
|
|
*/
|
|
|
|
if (unlikely(efx->loopback_selftest)) {
|
2011-02-25 06:45:16 +07:00
|
|
|
efx_loopback_rx_packet(efx, eh, rx_buf->len);
|
2008-05-07 19:36:19 +07:00
|
|
|
efx_free_rx_buffer(efx, rx_buf);
|
2013-01-30 06:33:15 +07:00
|
|
|
goto out;
|
2008-05-07 19:36:19 +07:00
|
|
|
}
|
|
|
|
|
2011-04-05 21:00:02 +07:00
|
|
|
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
|
2011-08-27 00:05:11 +07:00
|
|
|
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
|
2011-04-02 04:20:06 +07:00
|
|
|
|
2013-01-11 19:26:21 +07:00
|
|
|
if (!channel->type->receive_skb)
|
2013-01-30 06:33:15 +07:00
|
|
|
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
|
2012-01-24 05:41:30 +07:00
|
|
|
else
|
2013-01-30 06:33:15 +07:00
|
|
|
efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
|
|
|
|
out:
|
|
|
|
channel->rx_pkt_n_frags = 0;
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
|
|
|
|
{
|
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
2010-09-10 13:42:22 +07:00
|
|
|
unsigned int entries;
|
2008-04-27 18:55:59 +07:00
|
|
|
int rc;
|
|
|
|
|
2010-09-10 13:42:22 +07:00
|
|
|
/* Create the smallest power-of-two aligned ring */
|
|
|
|
entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
|
|
|
|
EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
|
|
|
|
rx_queue->ptr_mask = entries - 1;
|
|
|
|
|
2010-06-23 18:30:07 +07:00
|
|
|
netif_dbg(efx, probe, efx->net_dev,
|
2010-09-10 13:42:22 +07:00
|
|
|
"creating RX queue %d size %#x mask %#x\n",
|
|
|
|
efx_rx_queue_index(rx_queue), efx->rxq_entries,
|
|
|
|
rx_queue->ptr_mask);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
/* Allocate RX buffers */
|
2011-12-02 19:36:13 +07:00
|
|
|
rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
|
2010-09-10 13:42:22 +07:00
|
|
|
GFP_KERNEL);
|
2008-09-01 18:47:48 +07:00
|
|
|
if (!rx_queue->buffer)
|
|
|
|
return -ENOMEM;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2009-11-29 10:43:56 +07:00
|
|
|
rc = efx_nic_probe_rx(rx_queue);
|
2008-09-01 18:47:48 +07:00
|
|
|
if (rc) {
|
|
|
|
kfree(rx_queue->buffer);
|
|
|
|
rx_queue->buffer = NULL;
|
|
|
|
}
|
2008-04-27 18:55:59 +07:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-09-01 18:48:46 +07:00
|
|
|
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
2008-04-27 18:55:59 +07:00
|
|
|
{
|
2010-09-10 13:42:22 +07:00
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
2012-04-11 19:12:41 +07:00
|
|
|
unsigned int max_fill, trigger, max_trigger;
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2010-06-23 18:30:07 +07:00
|
|
|
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
2010-09-10 13:41:36 +07:00
|
|
|
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
/* Initialise ptr fields */
|
|
|
|
rx_queue->added_count = 0;
|
|
|
|
rx_queue->notified_count = 0;
|
|
|
|
rx_queue->removed_count = 0;
|
|
|
|
rx_queue->min_fill = -1U;
|
|
|
|
|
|
|
|
/* Initialise limit fields */
|
2010-09-10 13:42:22 +07:00
|
|
|
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
|
2012-04-11 19:12:41 +07:00
|
|
|
max_trigger = max_fill - EFX_RX_BATCH;
|
|
|
|
if (rx_refill_threshold != 0) {
|
|
|
|
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
|
|
|
|
if (trigger > max_trigger)
|
|
|
|
trigger = max_trigger;
|
|
|
|
} else {
|
|
|
|
trigger = max_trigger;
|
|
|
|
}
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
rx_queue->max_fill = max_fill;
|
|
|
|
rx_queue->fast_fill_trigger = trigger;
|
|
|
|
|
|
|
|
/* Set up RX descriptor ring */
|
2012-02-08 07:11:20 +07:00
|
|
|
rx_queue->enabled = true;
|
2009-11-29 10:43:56 +07:00
|
|
|
efx_nic_init_rx(rx_queue);
|
2008-04-27 18:55:59 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct efx_rx_buffer *rx_buf;
|
|
|
|
|
2010-06-23 18:30:07 +07:00
|
|
|
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
2010-09-10 13:41:36 +07:00
|
|
|
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2012-02-08 07:11:20 +07:00
|
|
|
/* A flush failure might have left rx_queue->enabled */
|
|
|
|
rx_queue->enabled = false;
|
|
|
|
|
2010-06-01 18:19:39 +07:00
|
|
|
del_timer_sync(&rx_queue->slow_fill);
|
2009-11-29 10:43:56 +07:00
|
|
|
efx_nic_fini_rx(rx_queue);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
/* Release RX buffers NB start at index 0 not current HW ptr */
|
|
|
|
if (rx_queue->buffer) {
|
2010-09-10 13:42:22 +07:00
|
|
|
for (i = 0; i <= rx_queue->ptr_mask; i++) {
|
2008-04-27 18:55:59 +07:00
|
|
|
rx_buf = efx_rx_buffer(rx_queue, i);
|
|
|
|
efx_fini_rx_buffer(rx_queue, rx_buf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
|
|
|
|
{
|
2010-06-23 18:30:07 +07:00
|
|
|
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
2010-09-10 13:41:36 +07:00
|
|
|
"destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
|
2008-04-27 18:55:59 +07:00
|
|
|
|
2009-11-29 10:43:56 +07:00
|
|
|
efx_nic_remove_rx(rx_queue);
|
2008-04-27 18:55:59 +07:00
|
|
|
|
|
|
|
kfree(rx_queue->buffer);
|
|
|
|
rx_queue->buffer = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
module_param(rx_refill_threshold, uint, 0444);
|
|
|
|
MODULE_PARM_DESC(rx_refill_threshold,
|
2012-04-11 19:12:41 +07:00
|
|
|
"RX descriptor ring refill threshold (%)");
|
2008-04-27 18:55:59 +07:00
|
|
|
|