2005-04-17 05:20:36 +07:00
|
|
|
#ifndef __LINUX_NETLINK_H
|
|
|
|
#define __LINUX_NETLINK_H
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/capability.h>
|
|
|
|
#include <linux/skbuff.h>
|
2012-09-21 16:35:38 +07:00
|
|
|
#include <linux/export.h>
|
2012-09-07 01:20:01 +07:00
|
|
|
#include <net/scm.h>
|
2012-10-13 16:46:48 +07:00
|
|
|
#include <uapi/linux/netlink.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-09-22 12:54:54 +07:00
|
|
|
struct net;
|
|
|
|
|
2007-04-26 09:08:35 +07:00
|
|
|
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return (struct nlmsghdr *)skb->data;
|
|
|
|
}
|
|
|
|
|
2013-04-17 13:47:02 +07:00
|
|
|
enum netlink_skb_flags {
|
2014-05-31 01:04:00 +07:00
|
|
|
NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
|
|
|
|
NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
|
|
|
|
NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
|
|
|
|
NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
|
2013-04-17 13:47:02 +07:00
|
|
|
};
|
|
|
|
|
2009-11-05 00:50:58 +07:00
|
|
|
struct netlink_skb_parms {
|
2012-09-07 01:20:01 +07:00
|
|
|
struct scm_creds creds; /* Skb credentials */
|
2012-09-08 03:12:54 +07:00
|
|
|
__u32 portid;
|
2005-08-15 09:27:50 +07:00
|
|
|
__u32 dst_group;
|
2013-04-17 13:47:02 +07:00
|
|
|
__u32 flags;
|
2013-04-17 13:46:57 +07:00
|
|
|
struct sock *sk;
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
|
|
|
|
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
|
|
|
|
|
|
|
|
|
2009-09-12 10:03:15 +07:00
|
|
|
extern void netlink_table_grab(void);
|
|
|
|
extern void netlink_table_ungrab(void);
|
|
|
|
|
2012-09-08 09:53:53 +07:00
|
|
|
#define NL_CFG_F_NONROOT_RECV (1 << 0)
|
|
|
|
#define NL_CFG_F_NONROOT_SEND (1 << 1)
|
|
|
|
|
2012-06-29 13:15:21 +07:00
|
|
|
/* optional Netlink kernel configuration parameters */
|
|
|
|
struct netlink_kernel_cfg {
|
|
|
|
unsigned int groups;
|
2012-09-23 13:09:23 +07:00
|
|
|
unsigned int flags;
|
2012-06-29 13:15:21 +07:00
|
|
|
void (*input)(struct sk_buff *skb);
|
|
|
|
struct mutex *cb_mutex;
|
2014-12-24 03:00:06 +07:00
|
|
|
int (*bind)(struct net *net, int group);
|
|
|
|
void (*unbind)(struct net *net, int group);
|
2013-06-06 13:49:11 +07:00
|
|
|
bool (*compare)(struct net *net, struct sock *sk);
|
2012-06-29 13:15:21 +07:00
|
|
|
};
|
|
|
|
|
2012-09-08 09:53:54 +07:00
|
|
|
extern struct sock *__netlink_kernel_create(struct net *net, int unit,
|
|
|
|
struct module *module,
|
|
|
|
struct netlink_kernel_cfg *cfg);
|
|
|
|
static inline struct sock *
|
|
|
|
netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
|
|
|
|
{
|
|
|
|
return __netlink_kernel_create(net, unit, THIS_MODULE, cfg);
|
|
|
|
}
|
|
|
|
|
2008-01-29 05:41:19 +07:00
|
|
|
extern void netlink_kernel_release(struct sock *sk);
|
2009-09-12 10:03:15 +07:00
|
|
|
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
|
2007-07-19 05:46:06 +07:00
|
|
|
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
|
2009-09-25 05:44:05 +07:00
|
|
|
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
|
2005-04-17 05:20:36 +07:00
|
|
|
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
|
2006-03-21 09:52:01 +07:00
|
|
|
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
|
2013-04-17 13:47:04 +07:00
|
|
|
extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
|
|
|
|
u32 dst_portid, gfp_t gfp_mask);
|
2012-09-08 03:12:54 +07:00
|
|
|
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
|
|
|
|
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
|
2005-10-07 13:46:04 +07:00
|
|
|
__u32 group, gfp_t allocation);
|
2010-05-05 07:36:46 +07:00
|
|
|
extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
|
2012-09-08 03:12:54 +07:00
|
|
|
__u32 portid, __u32 group, gfp_t allocation,
|
2010-05-05 07:36:46 +07:00
|
|
|
int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
|
|
|
|
void *filter_data);
|
2012-09-08 03:12:54 +07:00
|
|
|
extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
|
2005-04-17 05:20:36 +07:00
|
|
|
extern int netlink_register_notifier(struct notifier_block *nb);
|
|
|
|
extern int netlink_unregister_notifier(struct notifier_block *nb);
|
|
|
|
|
|
|
|
/* finegrained unicast helpers: */
|
|
|
|
struct sock *netlink_getsockbyfilp(struct file *filp);
|
2008-06-06 01:23:39 +07:00
|
|
|
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
|
2007-11-07 17:42:09 +07:00
|
|
|
long *timeo, struct sock *ssk);
|
2005-04-17 05:20:36 +07:00
|
|
|
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
|
2007-10-11 11:14:03 +07:00
|
|
|
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-06-28 08:04:23 +07:00
|
|
|
static inline struct sk_buff *
|
|
|
|
netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
struct sk_buff *nskb;
|
|
|
|
|
|
|
|
nskb = skb_clone(skb, gfp_mask);
|
|
|
|
if (!nskb)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* This is a large skb, set destructor callback to release head */
|
|
|
|
if (is_vmalloc_addr(skb->head))
|
|
|
|
nskb->destructor = skb->destructor;
|
|
|
|
|
|
|
|
return nskb;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* skb should fit one page. This choice is good for headerless malloc.
|
2007-03-26 10:27:59 +07:00
|
|
|
* But we should limit to 8K so that userspace does not have to
|
|
|
|
* use enormous buffer sizes on recvmsg() calls just to avoid
|
|
|
|
* MSG_TRUNC when PAGE_SIZE is very large.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2007-03-26 10:27:59 +07:00
|
|
|
#if PAGE_SIZE < 8192UL
|
|
|
|
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE)
|
|
|
|
#else
|
|
|
|
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL)
|
|
|
|
#endif
|
|
|
|
|
2006-11-11 05:10:15 +07:00
|
|
|
#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
|
2009-11-05 00:50:58 +07:00
|
|
|
struct netlink_callback {
|
2009-08-25 21:07:40 +07:00
|
|
|
struct sk_buff *skb;
|
|
|
|
const struct nlmsghdr *nlh;
|
|
|
|
int (*dump)(struct sk_buff * skb,
|
|
|
|
struct netlink_callback *cb);
|
|
|
|
int (*done)(struct netlink_callback *cb);
|
2012-02-24 21:30:16 +07:00
|
|
|
void *data;
|
2012-10-05 03:15:48 +07:00
|
|
|
/* the module that dump function belong to */
|
|
|
|
struct module *module;
|
2011-06-10 08:27:09 +07:00
|
|
|
u16 family;
|
|
|
|
u16 min_dump_alloc;
|
netlink: advertise incomplete dumps
Consider the following situation:
* a dump that would show 8 entries, four in the first
round, and four in the second
* between the first and second rounds, 6 entries are
removed
* now the second round will not show any entry, and
even if there is a sequence/generation counter the
application will not know
To solve this problem, add a new flag NLM_F_DUMP_INTR
to the netlink header that indicates the dump wasn't
consistent, this flag can also be set on the MSG_DONE
message that terminates the dump, and as such above
situation can be detected.
To achieve this, add a sequence counter to the netlink
callback struct. Of course, netlink code still needs
to use this new functionality. The correct way to do
that is to always set cb->seq when a dumpit callback
is invoked and call nl_dump_check_consistent() for
each new message. The core code will also call this
function for the final MSG_DONE message.
To make it usable with generic netlink, a new function
genlmsg_nlhdr() is needed to obtain the netlink header
from the genetlink user header.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-06-20 18:40:46 +07:00
|
|
|
unsigned int prev_seq, seq;
|
2009-08-25 21:07:40 +07:00
|
|
|
long args[6];
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2009-11-05 00:50:58 +07:00
|
|
|
struct netlink_notify {
|
2007-09-12 18:05:38 +07:00
|
|
|
struct net *net;
|
2012-09-08 03:12:54 +07:00
|
|
|
int portid;
|
2005-04-17 05:20:36 +07:00
|
|
|
int protocol;
|
|
|
|
};
|
|
|
|
|
2012-01-31 03:22:06 +07:00
|
|
|
struct nlmsghdr *
|
2012-09-08 03:12:54 +07:00
|
|
|
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2012-02-24 21:30:15 +07:00
|
|
|
struct netlink_dump_control {
|
|
|
|
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
|
2012-10-05 03:15:48 +07:00
|
|
|
int (*done)(struct netlink_callback *);
|
2012-02-24 21:30:16 +07:00
|
|
|
void *data;
|
2012-10-05 03:15:48 +07:00
|
|
|
struct module *module;
|
2012-02-24 21:30:15 +07:00
|
|
|
u16 min_dump_alloc;
|
|
|
|
};
|
|
|
|
|
2012-10-05 03:15:48 +07:00
|
|
|
extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
struct netlink_dump_control *control);
|
|
|
|
static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
struct netlink_dump_control *control)
|
|
|
|
{
|
|
|
|
if (!control->module)
|
|
|
|
control->module = THIS_MODULE;
|
|
|
|
|
|
|
|
return __netlink_dump_start(ssk, skb, nlh, control);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-06-22 00:38:07 +07:00
|
|
|
struct netlink_tap {
|
|
|
|
struct net_device *dev;
|
|
|
|
struct module *module;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern int netlink_add_tap(struct netlink_tap *nt);
|
|
|
|
extern int netlink_remove_tap(struct netlink_tap *nt);
|
|
|
|
|
2014-04-24 04:28:03 +07:00
|
|
|
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
|
|
|
|
struct user_namespace *ns, int cap);
|
|
|
|
bool netlink_ns_capable(const struct sk_buff *skb,
|
|
|
|
struct user_namespace *ns, int cap);
|
|
|
|
bool netlink_capable(const struct sk_buff *skb, int cap);
|
|
|
|
bool netlink_net_capable(const struct sk_buff *skb, int cap);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif /* __LINUX_NETLINK_H */
|