2007-10-15 16:24:19 +07:00
|
|
|
#ifndef __NET_FRAG_H__
|
|
|
|
#define __NET_FRAG_H__
|
|
|
|
|
2008-01-22 21:02:14 +07:00
|
|
|
struct netns_frags {
|
2008-01-22 21:06:23 +07:00
|
|
|
int nqueues;
|
2008-01-22 21:11:48 +07:00
|
|
|
struct list_head lru_list;
|
2008-01-22 21:09:37 +07:00
|
|
|
|
2013-01-29 06:44:14 +07:00
|
|
|
/* Its important for performance to keep lru_list and mem on
|
|
|
|
* separate cachelines
|
|
|
|
*/
|
|
|
|
atomic_t mem ____cacheline_aligned_in_smp;
|
2008-01-22 21:09:37 +07:00
|
|
|
/* sysctls */
|
|
|
|
int timeout;
|
2008-01-22 21:10:13 +07:00
|
|
|
int high_thresh;
|
|
|
|
int low_thresh;
|
2008-01-22 21:02:14 +07:00
|
|
|
};
|
|
|
|
|
2007-10-15 16:24:19 +07:00
|
|
|
struct inet_frag_queue {
|
|
|
|
spinlock_t lock;
|
|
|
|
struct timer_list timer; /* when will this queue expire? */
|
2013-01-29 06:44:49 +07:00
|
|
|
struct list_head lru_list; /* lru list member */
|
|
|
|
struct hlist_node list;
|
|
|
|
atomic_t refcnt;
|
2007-10-15 16:24:19 +07:00
|
|
|
struct sk_buff *fragments; /* list of received fragments */
|
2010-06-29 11:39:37 +07:00
|
|
|
struct sk_buff *fragments_tail;
|
2007-10-15 16:24:19 +07:00
|
|
|
ktime_t stamp;
|
|
|
|
int len; /* total length of orig datagram */
|
|
|
|
int meat;
|
|
|
|
__u8 last_in; /* first/last segment arrived? */
|
|
|
|
|
2008-03-29 06:35:27 +07:00
|
|
|
#define INET_FRAG_COMPLETE 4
|
|
|
|
#define INET_FRAG_FIRST_IN 2
|
|
|
|
#define INET_FRAG_LAST_IN 1
|
2012-08-27 00:13:55 +07:00
|
|
|
|
|
|
|
u16 max_size;
|
2013-01-29 06:44:49 +07:00
|
|
|
|
|
|
|
struct netns_frags *net;
|
2007-10-15 16:24:19 +07:00
|
|
|
};
|
|
|
|
|
2007-10-15 16:31:52 +07:00
|
|
|
#define INETFRAGS_HASHSZ 64
|
|
|
|
|
|
|
|
struct inet_frags {
|
|
|
|
struct hlist_head hash[INETFRAGS_HASHSZ];
|
2013-01-29 06:44:37 +07:00
|
|
|
/* This rwlock is a global lock (seperate per IPv4, IPv6 and
|
|
|
|
* netfilter). Important to keep this on a seperate cacheline.
|
|
|
|
*/
|
|
|
|
rwlock_t lock ____cacheline_aligned_in_smp;
|
2008-01-22 21:11:04 +07:00
|
|
|
int secret_interval;
|
2007-10-15 16:31:52 +07:00
|
|
|
struct timer_list secret_timer;
|
2013-01-29 06:44:37 +07:00
|
|
|
u32 rnd;
|
|
|
|
int qsize;
|
2007-10-15 16:38:08 +07:00
|
|
|
|
|
|
|
unsigned int (*hashfn)(struct inet_frag_queue *);
|
2013-01-29 06:44:37 +07:00
|
|
|
bool (*match)(struct inet_frag_queue *q, void *arg);
|
2007-10-18 09:46:47 +07:00
|
|
|
void (*constructor)(struct inet_frag_queue *q,
|
|
|
|
void *arg);
|
2007-10-15 16:39:14 +07:00
|
|
|
void (*destructor)(struct inet_frag_queue *);
|
|
|
|
void (*skb_free)(struct sk_buff *);
|
2007-10-18 09:45:23 +07:00
|
|
|
void (*frag_expire)(unsigned long data);
|
2007-10-15 16:31:52 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
void inet_frags_init(struct inet_frags *);
|
|
|
|
void inet_frags_fini(struct inet_frags *);
|
|
|
|
|
2008-01-22 21:06:23 +07:00
|
|
|
void inet_frags_init_net(struct netns_frags *nf);
|
2008-01-22 21:12:39 +07:00
|
|
|
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
|
2008-01-22 21:06:23 +07:00
|
|
|
|
2007-10-15 16:37:18 +07:00
|
|
|
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
|
2007-10-15 16:39:14 +07:00
|
|
|
void inet_frag_destroy(struct inet_frag_queue *q,
|
|
|
|
struct inet_frags *f, int *work);
|
2012-09-18 23:50:11 +07:00
|
|
|
int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
|
2008-01-22 21:02:14 +07:00
|
|
|
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
2009-02-25 17:32:52 +07:00
|
|
|
struct inet_frags *f, void *key, unsigned int hash)
|
|
|
|
__releases(&f->lock);
|
2007-10-15 16:37:18 +07:00
|
|
|
|
2007-10-15 16:41:56 +07:00
|
|
|
static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&q->refcnt))
|
|
|
|
inet_frag_destroy(q, f, NULL);
|
|
|
|
}
|
|
|
|
|
2007-10-15 16:24:19 +07:00
|
|
|
#endif
|