2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Common framework for low-level network console, dump, and debugger code
|
|
|
|
*
|
|
|
|
* Sep 8 2003 Matt Mackall <mpm@selenic.com>
|
|
|
|
*
|
|
|
|
* based on the netconsole code from:
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
|
|
|
|
* Copyright (C) 2002 Red Hat, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/string.h>
|
2005-12-27 11:43:12 +07:00
|
|
|
#include <linux/if_arp.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/inetdevice.h>
|
|
|
|
#include <linux/inet.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/netpoll.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <net/tcp.h>
|
|
|
|
#include <net/udp.h>
|
|
|
|
#include <asm/unaligned.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We maintain a small pool of fully-sized skbs, to make sure the
|
|
|
|
* message gets out even in extreme OOM situations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define MAX_UDP_CHUNK 1460
|
|
|
|
#define MAX_SKBS 32
|
|
|
|
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
|
|
|
|
|
2006-11-15 01:43:58 +07:00
|
|
|
static struct sk_buff_head skb_pool;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
static atomic_t trapped;
|
|
|
|
|
2006-10-27 05:46:54 +07:00
|
|
|
#define USEC_PER_POLL 50
|
2005-04-17 05:20:36 +07:00
|
|
|
#define NETPOLL_RX_ENABLED 1
|
|
|
|
#define NETPOLL_RX_DROP 2
|
|
|
|
|
|
|
|
#define MAX_SKB_SIZE \
|
|
|
|
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
|
|
|
|
sizeof(struct iphdr) + sizeof(struct ethhdr))
|
|
|
|
|
|
|
|
static void zap_completion_queue(void);
|
2006-06-26 14:04:27 +07:00
|
|
|
static void arp_reply(struct sk_buff *skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-11-22 21:57:56 +07:00
|
|
|
static void queue_process(struct work_struct *work)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-12-05 21:37:56 +07:00
|
|
|
struct netpoll_info *npinfo =
|
|
|
|
container_of(work, struct netpoll_info, tx_work.work);
|
2005-04-17 05:20:36 +07:00
|
|
|
struct sk_buff *skb;
|
2006-12-12 23:20:42 +07:00
|
|
|
unsigned long flags;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-10-27 05:46:53 +07:00
|
|
|
while ((skb = skb_dequeue(&npinfo->txq))) {
|
|
|
|
struct net_device *dev = skb->dev;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-10-27 05:46:53 +07:00
|
|
|
if (!netif_device_present(dev) || !netif_running(dev)) {
|
|
|
|
__kfree_skb(skb);
|
|
|
|
continue;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-12-12 23:20:42 +07:00
|
|
|
local_irq_save(flags);
|
|
|
|
netif_tx_lock(dev);
|
2007-07-07 03:36:20 +07:00
|
|
|
if ((netif_queue_stopped(dev) ||
|
|
|
|
netif_subqueue_stopped(dev, skb->queue_mapping)) ||
|
|
|
|
dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
|
2006-10-27 05:46:53 +07:00
|
|
|
skb_queue_head(&npinfo->txq, skb);
|
2006-12-12 23:20:42 +07:00
|
|
|
netif_tx_unlock(dev);
|
|
|
|
local_irq_restore(flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-07-06 07:42:44 +07:00
|
|
|
schedule_delayed_work(&npinfo->tx_work, HZ/10);
|
2006-10-27 05:46:53 +07:00
|
|
|
return;
|
|
|
|
}
|
2006-12-12 23:20:42 +07:00
|
|
|
netif_tx_unlock(dev);
|
|
|
|
local_irq_restore(flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-15 12:40:42 +07:00
|
|
|
static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
|
|
|
|
unsigned short ulen, __be32 saddr, __be32 daddr)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-11-15 12:26:08 +07:00
|
|
|
__wsum psum;
|
2005-11-11 04:01:24 +07:00
|
|
|
|
2007-04-10 01:59:39 +07:00
|
|
|
if (uh->check == 0 || skb_csum_unnecessary(skb))
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
|
2005-11-11 04:01:24 +07:00
|
|
|
psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
|
|
|
|
|
2006-08-30 06:44:56 +07:00
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE &&
|
2006-11-15 12:24:49 +07:00
|
|
|
!csum_fold(csum_add(psum, skb->csum)))
|
2005-11-11 04:01:24 +07:00
|
|
|
return 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-11-11 04:01:24 +07:00
|
|
|
skb->csum = psum;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-11-11 04:01:24 +07:00
|
|
|
return __skb_checksum_complete(skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether delayed processing was scheduled for our NIC. If so,
|
|
|
|
* we attempt to grab the poll lock and use ->poll() to pump the card.
|
|
|
|
* If this fails, either we've recursed in ->poll() or it's already
|
|
|
|
* running on another CPU.
|
|
|
|
*
|
|
|
|
* Note: we don't mask interrupts with this lock because we're using
|
|
|
|
* trylock here and interrupts are already disabled in the softirq
|
|
|
|
* case. Further, we test the poll_owner to avoid recursion on UP
|
|
|
|
* systems where the lock doesn't exist.
|
|
|
|
*
|
|
|
|
* In cases where there is bi-directional communications, reading only
|
|
|
|
* one message at a time can lead to packets being dropped by the
|
|
|
|
* network adapter, forcing superfluous retries and possibly timeouts.
|
|
|
|
* Thus, we set our budget to greater than 1.
|
|
|
|
*/
|
|
|
|
static void poll_napi(struct netpoll *np)
|
|
|
|
{
|
2005-06-23 12:05:31 +07:00
|
|
|
struct netpoll_info *npinfo = np->dev->npinfo;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
struct napi_struct *napi;
|
2005-04-17 05:20:36 +07:00
|
|
|
int budget = 16;
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
|
|
|
|
if (test_bit(NAPI_STATE_SCHED, &napi->state) &&
|
|
|
|
napi->poll_owner != smp_processor_id() &&
|
|
|
|
spin_trylock(&napi->poll_lock)) {
|
|
|
|
npinfo->rx_flags |= NETPOLL_RX_DROP;
|
|
|
|
atomic_inc(&trapped);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
napi->poll(napi, budget);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
atomic_dec(&trapped);
|
|
|
|
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
|
|
|
|
spin_unlock(&napi->poll_lock);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-26 14:04:27 +07:00
|
|
|
static void service_arp_queue(struct netpoll_info *npi)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
if (unlikely(!npi))
|
|
|
|
return;
|
|
|
|
|
|
|
|
skb = skb_dequeue(&npi->arp_tx);
|
|
|
|
|
|
|
|
while (skb != NULL) {
|
|
|
|
arp_reply(skb);
|
|
|
|
skb = skb_dequeue(&npi->arp_tx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
void netpoll_poll(struct netpoll *np)
|
|
|
|
{
|
2006-11-15 11:40:49 +07:00
|
|
|
if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Process pending work on NIC */
|
|
|
|
np->dev->poll_controller(np->dev);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
if (!list_empty(&np->dev->napi_list))
|
2005-04-17 05:20:36 +07:00
|
|
|
poll_napi(np);
|
|
|
|
|
2006-06-26 14:04:27 +07:00
|
|
|
service_arp_queue(np->dev->npinfo);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
zap_completion_queue();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void refill_skbs(void)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2006-11-15 01:43:58 +07:00
|
|
|
spin_lock_irqsave(&skb_pool.lock, flags);
|
|
|
|
while (skb_pool.qlen < MAX_SKBS) {
|
2005-04-17 05:20:36 +07:00
|
|
|
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
break;
|
|
|
|
|
2006-11-15 01:43:58 +07:00
|
|
|
__skb_queue_tail(&skb_pool, skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-11-15 01:43:58 +07:00
|
|
|
spin_unlock_irqrestore(&skb_pool.lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void zap_completion_queue(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct softnet_data *sd = &get_cpu_var(softnet_data);
|
|
|
|
|
|
|
|
if (sd->completion_queue) {
|
|
|
|
struct sk_buff *clist;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
clist = sd->completion_queue;
|
|
|
|
sd->completion_queue = NULL;
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
|
|
while (clist != NULL) {
|
|
|
|
struct sk_buff *skb = clist;
|
|
|
|
clist = clist->next;
|
2006-11-15 11:40:49 +07:00
|
|
|
if (skb->destructor)
|
2005-04-17 05:20:36 +07:00
|
|
|
dev_kfree_skb_any(skb); /* put this one back */
|
|
|
|
else
|
|
|
|
__kfree_skb(skb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
put_cpu_var(softnet_data);
|
|
|
|
}
|
|
|
|
|
2006-11-15 01:43:58 +07:00
|
|
|
static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-11-15 01:43:58 +07:00
|
|
|
int count = 0;
|
|
|
|
struct sk_buff *skb;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
zap_completion_queue();
|
2006-11-15 01:43:58 +07:00
|
|
|
refill_skbs();
|
2005-04-17 05:20:36 +07:00
|
|
|
repeat:
|
|
|
|
|
|
|
|
skb = alloc_skb(len, GFP_ATOMIC);
|
2006-11-15 01:43:58 +07:00
|
|
|
if (!skb)
|
|
|
|
skb = skb_dequeue(&skb_pool);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!skb) {
|
2006-11-15 01:43:58 +07:00
|
|
|
if (++count < 10) {
|
|
|
|
netpoll_poll(np);
|
|
|
|
goto repeat;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-11-15 01:43:58 +07:00
|
|
|
return NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
atomic_set(&skb->users, 1);
|
|
|
|
skb_reserve(skb, reserve);
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
static int netpoll_owner_active(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct napi_struct *napi;
|
|
|
|
|
|
|
|
list_for_each_entry(napi, &dev->napi_list, dev_list) {
|
|
|
|
if (napi->poll_owner == smp_processor_id())
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
|
|
|
{
|
2006-10-27 05:46:54 +07:00
|
|
|
int status = NETDEV_TX_BUSY;
|
|
|
|
unsigned long tries;
|
2007-02-09 21:24:36 +07:00
|
|
|
struct net_device *dev = np->dev;
|
|
|
|
struct netpoll_info *npinfo = np->dev->npinfo;
|
2006-10-27 05:46:54 +07:00
|
|
|
|
2007-02-09 21:24:36 +07:00
|
|
|
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
|
|
|
|
__kfree_skb(skb);
|
|
|
|
return;
|
|
|
|
}
|
2006-10-27 05:46:54 +07:00
|
|
|
|
|
|
|
/* don't get messages out of order, and no recursion */
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
|
2006-12-12 08:24:46 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
2007-06-27 14:39:42 +07:00
|
|
|
/* try until next clock tick */
|
|
|
|
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
|
|
|
|
tries > 0; --tries) {
|
|
|
|
if (netif_tx_trylock(dev)) {
|
2007-07-07 03:36:20 +07:00
|
|
|
if (!netif_queue_stopped(dev) &&
|
|
|
|
!netif_subqueue_stopped(dev, skb->queue_mapping))
|
2006-12-10 05:01:49 +07:00
|
|
|
status = dev->hard_start_xmit(skb, dev);
|
2007-06-27 14:39:42 +07:00
|
|
|
netif_tx_unlock(dev);
|
2006-12-10 05:01:49 +07:00
|
|
|
|
|
|
|
if (status == NETDEV_TX_OK)
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
2007-06-27 14:39:42 +07:00
|
|
|
|
|
|
|
/* tickle device maybe there is some cleanup */
|
|
|
|
netpoll_poll(np);
|
|
|
|
|
|
|
|
udelay(USEC_PER_POLL);
|
2005-08-12 09:25:54 +07:00
|
|
|
}
|
2006-12-12 08:24:46 +07:00
|
|
|
local_irq_restore(flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-10-27 05:46:54 +07:00
|
|
|
if (status != NETDEV_TX_OK) {
|
2006-10-27 05:46:55 +07:00
|
|
|
skb_queue_tail(&npinfo->txq, skb);
|
2006-12-05 21:37:56 +07:00
|
|
|
schedule_delayed_work(&npinfo->tx_work,0);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
|
|
|
|
{
|
|
|
|
int total_len, eth_len, ip_len, udp_len;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct udphdr *udph;
|
|
|
|
struct iphdr *iph;
|
|
|
|
struct ethhdr *eth;
|
|
|
|
|
|
|
|
udp_len = len + sizeof(*udph);
|
|
|
|
ip_len = eth_len = udp_len + sizeof(*iph);
|
|
|
|
total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
|
|
|
|
|
|
|
|
skb = find_skb(np, total_len, total_len - len);
|
|
|
|
if (!skb)
|
|
|
|
return;
|
|
|
|
|
2007-03-31 21:55:19 +07:00
|
|
|
skb_copy_to_linear_data(skb, msg, len);
|
2005-04-17 05:20:36 +07:00
|
|
|
skb->len += len;
|
|
|
|
|
2007-03-14 00:28:48 +07:00
|
|
|
skb_push(skb, sizeof(*udph));
|
|
|
|
skb_reset_transport_header(skb);
|
|
|
|
udph = udp_hdr(skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
udph->source = htons(np->local_port);
|
|
|
|
udph->dest = htons(np->remote_port);
|
|
|
|
udph->len = htons(udp_len);
|
|
|
|
udph->check = 0;
|
2006-11-08 05:56:19 +07:00
|
|
|
udph->check = csum_tcpudp_magic(htonl(np->local_ip),
|
|
|
|
htonl(np->remote_ip),
|
|
|
|
udp_len, IPPROTO_UDP,
|
|
|
|
csum_partial((unsigned char *)udph, udp_len, 0));
|
|
|
|
if (udph->check == 0)
|
2006-11-21 09:08:13 +07:00
|
|
|
udph->check = CSUM_MANGLED_0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-04-11 10:46:21 +07:00
|
|
|
skb_push(skb, sizeof(*iph));
|
|
|
|
skb_reset_network_header(skb);
|
2007-04-21 12:47:35 +07:00
|
|
|
iph = ip_hdr(skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* iph->version = 4; iph->ihl = 5; */
|
|
|
|
put_unaligned(0x45, (unsigned char *)iph);
|
|
|
|
iph->tos = 0;
|
|
|
|
put_unaligned(htons(ip_len), &(iph->tot_len));
|
|
|
|
iph->id = 0;
|
|
|
|
iph->frag_off = 0;
|
|
|
|
iph->ttl = 64;
|
|
|
|
iph->protocol = IPPROTO_UDP;
|
|
|
|
iph->check = 0;
|
|
|
|
put_unaligned(htonl(np->local_ip), &(iph->saddr));
|
|
|
|
put_unaligned(htonl(np->remote_ip), &(iph->daddr));
|
|
|
|
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
|
|
|
|
|
|
|
|
eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
|
2007-03-20 05:30:44 +07:00
|
|
|
skb_reset_mac_header(skb);
|
2006-10-20 13:58:23 +07:00
|
|
|
skb->protocol = eth->h_proto = htons(ETH_P_IP);
|
2005-04-17 05:20:36 +07:00
|
|
|
memcpy(eth->h_source, np->local_mac, 6);
|
|
|
|
memcpy(eth->h_dest, np->remote_mac, 6);
|
|
|
|
|
|
|
|
skb->dev = np->dev;
|
|
|
|
|
|
|
|
netpoll_send_skb(np, skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void arp_reply(struct sk_buff *skb)
|
|
|
|
{
|
2005-06-23 12:05:31 +07:00
|
|
|
struct netpoll_info *npinfo = skb->dev->npinfo;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct arphdr *arp;
|
|
|
|
unsigned char *arp_ptr;
|
|
|
|
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
|
2006-11-15 11:48:11 +07:00
|
|
|
__be32 sip, tip;
|
2006-12-08 15:05:55 +07:00
|
|
|
unsigned char *sha;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct sk_buff *send_skb;
|
2005-06-23 12:05:31 +07:00
|
|
|
struct netpoll *np = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-06-23 12:05:59 +07:00
|
|
|
if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
|
|
|
|
np = npinfo->rx_np;
|
2005-06-23 12:05:31 +07:00
|
|
|
if (!np)
|
|
|
|
return;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* No arp on this interface */
|
|
|
|
if (skb->dev->flags & IFF_NOARP)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
|
|
|
|
(2 * skb->dev->addr_len) +
|
|
|
|
(2 * sizeof(u32)))))
|
|
|
|
return;
|
|
|
|
|
2007-04-11 10:45:18 +07:00
|
|
|
skb_reset_network_header(skb);
|
2007-03-13 23:06:52 +07:00
|
|
|
skb_reset_transport_header(skb);
|
2007-03-13 06:56:31 +07:00
|
|
|
arp = arp_hdr(skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
|
|
|
|
arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
|
|
|
|
arp->ar_pro != htons(ETH_P_IP) ||
|
|
|
|
arp->ar_op != htons(ARPOP_REQUEST))
|
|
|
|
return;
|
|
|
|
|
2006-12-08 15:05:55 +07:00
|
|
|
arp_ptr = (unsigned char *)(arp+1);
|
|
|
|
/* save the location of the src hw addr */
|
|
|
|
sha = arp_ptr;
|
|
|
|
arp_ptr += skb->dev->addr_len;
|
2005-04-17 05:20:36 +07:00
|
|
|
memcpy(&sip, arp_ptr, 4);
|
2006-12-08 15:05:55 +07:00
|
|
|
arp_ptr += 4;
|
|
|
|
/* if we actually cared about dst hw addr, it would get copied here */
|
|
|
|
arp_ptr += skb->dev->addr_len;
|
2005-04-17 05:20:36 +07:00
|
|
|
memcpy(&tip, arp_ptr, 4);
|
|
|
|
|
|
|
|
/* Should we ignore arp? */
|
|
|
|
if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
|
|
|
|
return;
|
|
|
|
|
|
|
|
size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
|
|
|
|
send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
|
|
|
|
LL_RESERVED_SPACE(np->dev));
|
|
|
|
|
|
|
|
if (!send_skb)
|
|
|
|
return;
|
|
|
|
|
2007-04-11 10:45:18 +07:00
|
|
|
skb_reset_network_header(send_skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
arp = (struct arphdr *) skb_put(send_skb, size);
|
|
|
|
send_skb->dev = skb->dev;
|
|
|
|
send_skb->protocol = htons(ETH_P_ARP);
|
|
|
|
|
|
|
|
/* Fill the device header for the ARP frame */
|
|
|
|
|
|
|
|
if (np->dev->hard_header &&
|
|
|
|
np->dev->hard_header(send_skb, skb->dev, ptype,
|
2006-12-08 15:05:55 +07:00
|
|
|
sha, np->local_mac,
|
2006-11-15 11:40:49 +07:00
|
|
|
send_skb->len) < 0) {
|
2005-04-17 05:20:36 +07:00
|
|
|
kfree_skb(send_skb);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill out the arp protocol part.
|
|
|
|
*
|
|
|
|
* we only support ethernet device type,
|
|
|
|
* which (according to RFC 1390) should always equal 1 (Ethernet).
|
|
|
|
*/
|
|
|
|
|
|
|
|
arp->ar_hrd = htons(np->dev->type);
|
|
|
|
arp->ar_pro = htons(ETH_P_IP);
|
|
|
|
arp->ar_hln = np->dev->addr_len;
|
|
|
|
arp->ar_pln = 4;
|
|
|
|
arp->ar_op = htons(type);
|
|
|
|
|
|
|
|
arp_ptr=(unsigned char *)(arp + 1);
|
|
|
|
memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
|
|
|
|
arp_ptr += np->dev->addr_len;
|
|
|
|
memcpy(arp_ptr, &tip, 4);
|
|
|
|
arp_ptr += 4;
|
2006-12-08 15:05:55 +07:00
|
|
|
memcpy(arp_ptr, sha, np->dev->addr_len);
|
2005-04-17 05:20:36 +07:00
|
|
|
arp_ptr += np->dev->addr_len;
|
|
|
|
memcpy(arp_ptr, &sip, 4);
|
|
|
|
|
|
|
|
netpoll_send_skb(np, send_skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __netpoll_rx(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int proto, len, ulen;
|
|
|
|
struct iphdr *iph;
|
|
|
|
struct udphdr *uh;
|
2006-06-26 14:04:27 +07:00
|
|
|
struct netpoll_info *npi = skb->dev->npinfo;
|
|
|
|
struct netpoll *np = npi->rx_np;
|
|
|
|
|
2005-06-23 12:05:59 +07:00
|
|
|
if (!np)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out;
|
|
|
|
if (skb->dev->type != ARPHRD_ETHER)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* check if netpoll clients need ARP */
|
2007-03-26 10:13:04 +07:00
|
|
|
if (skb->protocol == htons(ETH_P_ARP) &&
|
2005-04-17 05:20:36 +07:00
|
|
|
atomic_read(&trapped)) {
|
2006-06-26 14:04:27 +07:00
|
|
|
skb_queue_tail(&npi->arp_tx, skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
proto = ntohs(eth_hdr(skb)->h_proto);
|
|
|
|
if (proto != ETH_P_IP)
|
|
|
|
goto out;
|
|
|
|
if (skb->pkt_type == PACKET_OTHERHOST)
|
|
|
|
goto out;
|
|
|
|
if (skb_shared(skb))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
iph = (struct iphdr *)skb->data;
|
|
|
|
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
|
|
|
goto out;
|
|
|
|
if (iph->ihl < 5 || iph->version != 4)
|
|
|
|
goto out;
|
|
|
|
if (!pskb_may_pull(skb, iph->ihl*4))
|
|
|
|
goto out;
|
|
|
|
if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
len = ntohs(iph->tot_len);
|
|
|
|
if (skb->len < len || len < iph->ihl*4)
|
|
|
|
goto out;
|
|
|
|
|
2007-04-18 02:40:20 +07:00
|
|
|
/*
|
|
|
|
* Our transport medium may have padded the buffer out.
|
|
|
|
* Now We trim to the true length of the frame.
|
|
|
|
*/
|
|
|
|
if (pskb_trim_rcsum(skb, len))
|
|
|
|
goto out;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (iph->protocol != IPPROTO_UDP)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
len -= iph->ihl*4;
|
|
|
|
uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
|
|
|
|
ulen = ntohs(uh->len);
|
|
|
|
|
|
|
|
if (ulen != len)
|
|
|
|
goto out;
|
2005-11-11 04:01:24 +07:00
|
|
|
if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out;
|
|
|
|
if (np->local_ip && np->local_ip != ntohl(iph->daddr))
|
|
|
|
goto out;
|
|
|
|
if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
|
|
|
|
goto out;
|
|
|
|
if (np->local_port && np->local_port != ntohs(uh->dest))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
np->rx_hook(np, ntohs(uh->source),
|
|
|
|
(char *)(uh+1),
|
|
|
|
ulen - sizeof(struct udphdr));
|
|
|
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (atomic_read(&trapped)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int netpoll_parse_options(struct netpoll *np, char *opt)
|
|
|
|
{
|
|
|
|
char *cur=opt, *delim;
|
|
|
|
|
2006-11-15 11:40:49 +07:00
|
|
|
if (*cur != '@') {
|
2005-04-17 05:20:36 +07:00
|
|
|
if ((delim = strchr(cur, '@')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 11:40:49 +07:00
|
|
|
*delim = 0;
|
|
|
|
np->local_port = simple_strtol(cur, NULL, 10);
|
|
|
|
cur = delim;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
cur++;
|
|
|
|
printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
|
|
|
|
|
2006-11-15 11:40:49 +07:00
|
|
|
if (*cur != '/') {
|
2005-04-17 05:20:36 +07:00
|
|
|
if ((delim = strchr(cur, '/')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 11:40:49 +07:00
|
|
|
*delim = 0;
|
|
|
|
np->local_ip = ntohl(in_aton(cur));
|
|
|
|
cur = delim;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
|
|
|
|
np->name, HIPQUAD(np->local_ip));
|
|
|
|
}
|
|
|
|
cur++;
|
|
|
|
|
2006-11-15 11:40:49 +07:00
|
|
|
if (*cur != ',') {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* parse out dev name */
|
|
|
|
if ((delim = strchr(cur, ',')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 11:40:49 +07:00
|
|
|
*delim = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
strlcpy(np->dev_name, cur, sizeof(np->dev_name));
|
2006-11-15 11:40:49 +07:00
|
|
|
cur = delim;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
cur++;
|
|
|
|
|
|
|
|
printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
|
|
|
|
|
2006-11-15 11:40:49 +07:00
|
|
|
if (*cur != '@') {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* dst port */
|
|
|
|
if ((delim = strchr(cur, '@')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 11:40:49 +07:00
|
|
|
*delim = 0;
|
|
|
|
np->remote_port = simple_strtol(cur, NULL, 10);
|
|
|
|
cur = delim;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
cur++;
|
|
|
|
printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
|
|
|
|
|
|
|
|
/* dst ip */
|
|
|
|
if ((delim = strchr(cur, '/')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 11:40:49 +07:00
|
|
|
*delim = 0;
|
|
|
|
np->remote_ip = ntohl(in_aton(cur));
|
|
|
|
cur = delim + 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
|
2006-11-15 11:40:49 +07:00
|
|
|
np->name, HIPQUAD(np->remote_ip));
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-11-15 11:40:49 +07:00
|
|
|
if (*cur != 0) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* MAC address */
|
|
|
|
if ((delim = strchr(cur, ':')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 11:40:49 +07:00
|
|
|
*delim = 0;
|
|
|
|
np->remote_mac[0] = simple_strtol(cur, NULL, 16);
|
|
|
|
cur = delim + 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
if ((delim = strchr(cur, ':')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 11:40:49 +07:00
|
|
|
*delim = 0;
|
|
|
|
np->remote_mac[1] = simple_strtol(cur, NULL, 16);
|
|
|
|
cur = delim + 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
if ((delim = strchr(cur, ':')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 11:40:49 +07:00
|
|
|
*delim = 0;
|
|
|
|
np->remote_mac[2] = simple_strtol(cur, NULL, 16);
|
|
|
|
cur = delim + 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
if ((delim = strchr(cur, ':')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 11:40:49 +07:00
|
|
|
*delim = 0;
|
|
|
|
np->remote_mac[3] = simple_strtol(cur, NULL, 16);
|
|
|
|
cur = delim + 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
if ((delim = strchr(cur, ':')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 11:40:49 +07:00
|
|
|
*delim = 0;
|
|
|
|
np->remote_mac[4] = simple_strtol(cur, NULL, 16);
|
|
|
|
cur = delim + 1;
|
|
|
|
np->remote_mac[5] = simple_strtol(cur, NULL, 16);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_INFO "%s: remote ethernet address "
|
|
|
|
"%02x:%02x:%02x:%02x:%02x:%02x\n",
|
|
|
|
np->name,
|
|
|
|
np->remote_mac[0],
|
|
|
|
np->remote_mac[1],
|
|
|
|
np->remote_mac[2],
|
|
|
|
np->remote_mac[3],
|
|
|
|
np->remote_mac[4],
|
|
|
|
np->remote_mac[5]);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
parse_failed:
|
|
|
|
printk(KERN_INFO "%s: couldn't parse config at %s!\n",
|
|
|
|
np->name, cur);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int netpoll_setup(struct netpoll *np)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = NULL;
|
|
|
|
struct in_device *in_dev;
|
2005-06-23 12:05:31 +07:00
|
|
|
struct netpoll_info *npinfo;
|
2005-06-23 12:05:59 +07:00
|
|
|
unsigned long flags;
|
2006-10-27 05:46:52 +07:00
|
|
|
int err;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (np->dev_name)
|
|
|
|
ndev = dev_get_by_name(np->dev_name);
|
|
|
|
if (!ndev) {
|
|
|
|
printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
|
|
|
|
np->name, np->dev_name);
|
2006-10-27 05:46:52 +07:00
|
|
|
return -ENODEV;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
np->dev = ndev;
|
2005-06-23 12:05:31 +07:00
|
|
|
if (!ndev->npinfo) {
|
|
|
|
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
|
2006-10-27 05:46:52 +07:00
|
|
|
if (!npinfo) {
|
|
|
|
err = -ENOMEM;
|
2005-06-23 12:05:31 +07:00
|
|
|
goto release;
|
2006-10-27 05:46:52 +07:00
|
|
|
}
|
2005-06-23 12:05:31 +07:00
|
|
|
|
2005-08-12 09:23:04 +07:00
|
|
|
npinfo->rx_flags = 0;
|
2005-06-23 12:05:59 +07:00
|
|
|
npinfo->rx_np = NULL;
|
2006-10-27 05:46:54 +07:00
|
|
|
|
2005-09-10 03:10:41 +07:00
|
|
|
spin_lock_init(&npinfo->rx_lock);
|
2006-06-26 14:04:27 +07:00
|
|
|
skb_queue_head_init(&npinfo->arp_tx);
|
2006-10-27 05:46:51 +07:00
|
|
|
skb_queue_head_init(&npinfo->txq);
|
2006-12-05 21:37:56 +07:00
|
|
|
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
|
2006-10-27 05:46:51 +07:00
|
|
|
|
2006-10-27 05:46:50 +07:00
|
|
|
atomic_set(&npinfo->refcnt, 1);
|
|
|
|
} else {
|
2005-06-23 12:05:31 +07:00
|
|
|
npinfo = ndev->npinfo;
|
2006-10-27 05:46:50 +07:00
|
|
|
atomic_inc(&npinfo->refcnt);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!ndev->poll_controller) {
|
|
|
|
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
|
|
|
|
np->name, np->dev_name);
|
2006-10-27 05:46:52 +07:00
|
|
|
err = -ENOTSUPP;
|
2005-04-17 05:20:36 +07:00
|
|
|
goto release;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!netif_running(ndev)) {
|
|
|
|
unsigned long atmost, atleast;
|
|
|
|
|
|
|
|
printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
|
|
|
|
np->name, np->dev_name);
|
|
|
|
|
2006-03-21 13:23:58 +07:00
|
|
|
rtnl_lock();
|
2006-10-27 05:46:52 +07:00
|
|
|
err = dev_open(ndev);
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
if (err) {
|
2005-04-17 05:20:36 +07:00
|
|
|
printk(KERN_ERR "%s: failed to open %s\n",
|
2006-10-27 05:46:52 +07:00
|
|
|
np->name, ndev->name);
|
2005-04-17 05:20:36 +07:00
|
|
|
goto release;
|
|
|
|
}
|
|
|
|
|
|
|
|
atleast = jiffies + HZ/10;
|
2007-02-09 21:24:36 +07:00
|
|
|
atmost = jiffies + 4*HZ;
|
2005-04-17 05:20:36 +07:00
|
|
|
while (!netif_carrier_ok(ndev)) {
|
|
|
|
if (time_after(jiffies, atmost)) {
|
|
|
|
printk(KERN_NOTICE
|
|
|
|
"%s: timeout waiting for carrier\n",
|
|
|
|
np->name);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If carrier appears to come up instantly, we don't
|
|
|
|
* trust it and pause so that we don't pump all our
|
|
|
|
* queued console messages into the bitbucket.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (time_before(jiffies, atleast)) {
|
|
|
|
printk(KERN_NOTICE "%s: carrier detect appears"
|
|
|
|
" untrustworthy, waiting 4 seconds\n",
|
|
|
|
np->name);
|
|
|
|
msleep(4000);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-01-18 06:15:38 +07:00
|
|
|
if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
|
2005-04-17 05:20:36 +07:00
|
|
|
memcpy(np->local_mac, ndev->dev_addr, 6);
|
|
|
|
|
|
|
|
if (!np->local_ip) {
|
|
|
|
rcu_read_lock();
|
2005-10-04 04:35:55 +07:00
|
|
|
in_dev = __in_dev_get_rcu(ndev);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!in_dev || !in_dev->ifa_list) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
printk(KERN_ERR "%s: no IP address for %s, aborting\n",
|
|
|
|
np->name, np->dev_name);
|
2006-10-27 05:46:52 +07:00
|
|
|
err = -EDESTADDRREQ;
|
2005-04-17 05:20:36 +07:00
|
|
|
goto release;
|
|
|
|
}
|
|
|
|
|
|
|
|
np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
|
|
|
|
rcu_read_unlock();
|
|
|
|
printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
|
|
|
|
np->name, HIPQUAD(np->local_ip));
|
|
|
|
}
|
|
|
|
|
2005-06-23 12:05:59 +07:00
|
|
|
if (np->rx_hook) {
|
|
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
|
|
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
|
|
|
|
npinfo->rx_np = np;
|
|
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
|
|
}
|
2005-08-12 09:26:42 +07:00
|
|
|
|
|
|
|
/* fill up the skb queue */
|
|
|
|
refill_skbs();
|
|
|
|
|
2005-06-23 12:05:59 +07:00
|
|
|
/* last thing to do is link it to the net device structure */
|
2005-06-23 12:05:31 +07:00
|
|
|
ndev->npinfo = npinfo;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-08-12 09:27:43 +07:00
|
|
|
/* avoid racing with NAPI reading npinfo */
|
|
|
|
synchronize_rcu();
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
release:
|
2005-06-23 12:05:31 +07:00
|
|
|
if (!ndev->npinfo)
|
|
|
|
kfree(npinfo);
|
2005-04-17 05:20:36 +07:00
|
|
|
np->dev = NULL;
|
|
|
|
dev_put(ndev);
|
2006-10-27 05:46:52 +07:00
|
|
|
return err;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-11-15 11:40:49 +07:00
|
|
|
static int __init netpoll_init(void)
|
|
|
|
{
|
2006-11-15 01:43:58 +07:00
|
|
|
skb_queue_head_init(&skb_pool);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
core_initcall(netpoll_init);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
void netpoll_cleanup(struct netpoll *np)
|
|
|
|
{
|
2005-06-23 12:05:59 +07:00
|
|
|
struct netpoll_info *npinfo;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2005-06-23 12:05:31 +07:00
|
|
|
if (np->dev) {
|
2005-06-23 12:05:59 +07:00
|
|
|
npinfo = np->dev->npinfo;
|
2006-10-27 05:46:50 +07:00
|
|
|
if (npinfo) {
|
|
|
|
if (npinfo->rx_np == np) {
|
|
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
|
|
npinfo->rx_np = NULL;
|
|
|
|
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
|
|
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (atomic_dec_and_test(&npinfo->refcnt)) {
|
|
|
|
skb_queue_purge(&npinfo->arp_tx);
|
2007-02-09 21:24:36 +07:00
|
|
|
skb_queue_purge(&npinfo->txq);
|
2007-07-06 07:42:44 +07:00
|
|
|
cancel_rearming_delayed_work(&npinfo->tx_work);
|
2006-10-27 05:46:50 +07:00
|
|
|
|
2007-06-29 12:11:47 +07:00
|
|
|
/* clean after last, unfinished work */
|
|
|
|
if (!skb_queue_empty(&npinfo->txq)) {
|
|
|
|
struct sk_buff *skb;
|
|
|
|
skb = __skb_dequeue(&npinfo->txq);
|
|
|
|
kfree_skb(skb);
|
|
|
|
}
|
2006-10-27 05:46:50 +07:00
|
|
|
kfree(npinfo);
|
2007-07-10 05:22:23 +07:00
|
|
|
np->dev->npinfo = NULL;
|
2006-10-27 05:46:50 +07:00
|
|
|
}
|
2005-06-23 12:05:59 +07:00
|
|
|
}
|
2006-10-27 05:46:50 +07:00
|
|
|
|
2005-06-23 12:05:31 +07:00
|
|
|
dev_put(np->dev);
|
|
|
|
}
|
2005-06-23 12:05:59 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
np->dev = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int netpoll_trap(void)
|
|
|
|
{
|
|
|
|
return atomic_read(&trapped);
|
|
|
|
}
|
|
|
|
|
|
|
|
void netpoll_set_trap(int trap)
|
|
|
|
{
|
|
|
|
if (trap)
|
|
|
|
atomic_inc(&trapped);
|
|
|
|
else
|
|
|
|
atomic_dec(&trapped);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(netpoll_set_trap);
|
|
|
|
EXPORT_SYMBOL(netpoll_trap);
|
|
|
|
EXPORT_SYMBOL(netpoll_parse_options);
|
|
|
|
EXPORT_SYMBOL(netpoll_setup);
|
|
|
|
EXPORT_SYMBOL(netpoll_cleanup);
|
|
|
|
EXPORT_SYMBOL(netpoll_send_udp);
|
|
|
|
EXPORT_SYMBOL(netpoll_poll);
|