mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-29 23:36:45 +07:00
d565b0a1a9
This patch adds the top-level GRO (Generic Receive Offload) infrastructure. This is pretty similar to LRO except that this is protocol-independent. Instead of holding packets in an lro_mgr structure, they're now held in napi_struct. For drivers that intend to use this, they can set the NETIF_F_GRO bit and call napi_gro_receive instead of netif_receive_skb or just call netif_rx. The latter will call napi_receive_skb automatically. When napi_gro_receive is used, the driver must either call napi_complete/napi_rx_complete, or call napi_gro_flush in softirq context if the driver uses the primitives __napi_complete/__napi_rx_complete. Protocols will set the gro_receive and gro_complete function pointers in order to participate in this scheme. In addition to the packet, gro_receive will get a list of currently held packets. Each packet in the list has a same_flow field which is non-zero if it is a potential match for the new packet. For each packet that may match, they also have a flush field which is non-zero if the held packet must not be merged with the new packet. Once gro_receive has determined that the new skb matches a held packet, the held packet may be processed immediately if the new skb cannot be merged with it. In this case gro_receive should return the pointer to the existing skb in gro_list. Otherwise the new skb should be merged into the existing packet and NULL should be returned, unless the new skb makes it impossible for any further merges to be made (e.g., FIN packet) where the merged skb should be returned. Whenever the skb is merged into an existing entry, the gro_receive function should set NAPI_GRO_CB(skb)->same_flow. Note that if an skb merely matches an existing entry but can't be merged with it, then this shouldn't be set. If gro_receive finds it pointless to hold the new skb for future merging, it should set NAPI_GRO_CB(skb)->flush. Held packets will be flushed by napi_gro_flush which is called by napi_complete and napi_rx_complete. Currently held packets are stored in a singly liked list just like LRO. The list is limited to a maximum of 8 entries. In future, this may be expanded to use a hash table to allow more flows to be held for merging. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
119 lines
2.6 KiB
C
119 lines
2.6 KiB
C
/*
|
|
* Common code for low-level network console, dump, and debugger code
|
|
*
|
|
* Derived from netconsole, kgdb-over-ethernet, and netdump patches
|
|
*/
|
|
|
|
#ifndef _LINUX_NETPOLL_H
|
|
#define _LINUX_NETPOLL_H
|
|
|
|
#include <linux/netdevice.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/list.h>
|
|
|
|
struct netpoll {
|
|
struct net_device *dev;
|
|
char dev_name[IFNAMSIZ];
|
|
const char *name;
|
|
void (*rx_hook)(struct netpoll *, int, char *, int);
|
|
|
|
u32 local_ip, remote_ip;
|
|
u16 local_port, remote_port;
|
|
u8 remote_mac[ETH_ALEN];
|
|
};
|
|
|
|
struct netpoll_info {
|
|
atomic_t refcnt;
|
|
int rx_flags;
|
|
spinlock_t rx_lock;
|
|
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
|
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
|
|
struct sk_buff_head txq;
|
|
struct delayed_work tx_work;
|
|
};
|
|
|
|
void netpoll_poll(struct netpoll *np);
|
|
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
|
|
void netpoll_print_options(struct netpoll *np);
|
|
int netpoll_parse_options(struct netpoll *np, char *opt);
|
|
int netpoll_setup(struct netpoll *np);
|
|
int netpoll_trap(void);
|
|
void netpoll_set_trap(int trap);
|
|
void netpoll_cleanup(struct netpoll *np);
|
|
int __netpoll_rx(struct sk_buff *skb);
|
|
|
|
|
|
#ifdef CONFIG_NETPOLL
|
|
static inline int netpoll_rx(struct sk_buff *skb)
|
|
{
|
|
struct netpoll_info *npinfo = skb->dev->npinfo;
|
|
unsigned long flags;
|
|
int ret = 0;
|
|
|
|
if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
|
|
return 0;
|
|
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
/* check rx_flags again with the lock held */
|
|
if (npinfo->rx_flags && __netpoll_rx(skb))
|
|
ret = 1;
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline int netpoll_receive_skb(struct sk_buff *skb)
|
|
{
|
|
if (!list_empty(&skb->dev->napi_list))
|
|
return netpoll_rx(skb);
|
|
return 0;
|
|
}
|
|
|
|
static inline void *netpoll_poll_lock(struct napi_struct *napi)
|
|
{
|
|
struct net_device *dev = napi->dev;
|
|
|
|
rcu_read_lock(); /* deal with race on ->npinfo */
|
|
if (dev && dev->npinfo) {
|
|
spin_lock(&napi->poll_lock);
|
|
napi->poll_owner = smp_processor_id();
|
|
return napi;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static inline void netpoll_poll_unlock(void *have)
|
|
{
|
|
struct napi_struct *napi = have;
|
|
|
|
if (napi) {
|
|
napi->poll_owner = -1;
|
|
spin_unlock(&napi->poll_lock);
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
#else
|
|
static inline int netpoll_rx(struct sk_buff *skb)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline int netpoll_receive_skb(struct sk_buff *skb)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void *netpoll_poll_lock(struct napi_struct *napi)
|
|
{
|
|
return NULL;
|
|
}
|
|
static inline void netpoll_poll_unlock(void *have)
|
|
{
|
|
}
|
|
static inline void netpoll_netdev_init(struct net_device *dev)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#endif
|