mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 10:24:15 +07:00
0642840b8b
The way people generally use netlink_dump is that they fill in the skb as much as possible, breaking when nla_put returns an error. Then, they get called again and start filling out the next skb, and again, and so forth. The mechanism at work here is the ability for the iterative dumping function to detect when the skb is filled up and not fill it past the brim, waiting for a fresh skb for the rest of the data. However, if the attributes are small and nicely packed, it is possible that a dump callback function successfully fills in attributes until the skb is of size 4080 (libmnl's default page-sized receive buffer size). The dump function completes, satisfied, and then, if it happens to be that this is actually the last skb, and no further ones are to be sent, then netlink_dump will add on the NLMSG_DONE part: nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); It is very important that netlink_dump does this, of course. However, in this example, that call to nlmsg_put_answer will fail, because the previous filling by the dump function did not leave it enough room. And how could it possibly have done so? All of the nla_put variety of functions simply check to see if the skb has enough tailroom, independent of the context it is in. In order to keep the important assumptions of all netlink dump users, it is therefore important to give them an skb that has this end part of the tail already reserved, so that the call to nlmsg_put_answer does not fail. Otherwise, library authors are forced to find some bizarre sized receive buffer that has a large modulo relative to the common sizes of messages received, which is ugly and buggy. This patch thus saves the NLMSG_DONE for an additional message, for the case that things are dangerously close to the brim. This requires keeping track of the errno from ->dump() across calls. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Signed-off-by: David S. Miller <davem@davemloft.net>
74 lines
1.8 KiB
C
74 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _AF_NETLINK_H
|
|
#define _AF_NETLINK_H
|
|
|
|
#include <linux/rhashtable.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/workqueue.h>
|
|
#include <net/sock.h>
|
|
|
|
/* flags */
|
|
#define NETLINK_F_KERNEL_SOCKET 0x1
|
|
#define NETLINK_F_RECV_PKTINFO 0x2
|
|
#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
|
|
#define NETLINK_F_RECV_NO_ENOBUFS 0x8
|
|
#define NETLINK_F_LISTEN_ALL_NSID 0x10
|
|
#define NETLINK_F_CAP_ACK 0x20
|
|
#define NETLINK_F_EXT_ACK 0x40
|
|
|
|
#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
|
|
#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
|
|
|
|
struct netlink_sock {
|
|
/* struct sock has to be the first member of netlink_sock */
|
|
struct sock sk;
|
|
u32 portid;
|
|
u32 dst_portid;
|
|
u32 dst_group;
|
|
u32 flags;
|
|
u32 subscriptions;
|
|
u32 ngroups;
|
|
unsigned long *groups;
|
|
unsigned long state;
|
|
size_t max_recvmsg_len;
|
|
wait_queue_head_t wait;
|
|
bool bound;
|
|
bool cb_running;
|
|
int dump_done_errno;
|
|
struct netlink_callback cb;
|
|
struct mutex *cb_mutex;
|
|
struct mutex cb_def_mutex;
|
|
void (*netlink_rcv)(struct sk_buff *skb);
|
|
int (*netlink_bind)(struct net *net, int group);
|
|
void (*netlink_unbind)(struct net *net, int group);
|
|
struct module *module;
|
|
|
|
struct rhash_head node;
|
|
struct rcu_head rcu;
|
|
struct work_struct work;
|
|
};
|
|
|
|
static inline struct netlink_sock *nlk_sk(struct sock *sk)
|
|
{
|
|
return container_of(sk, struct netlink_sock, sk);
|
|
}
|
|
|
|
struct netlink_table {
|
|
struct rhashtable hash;
|
|
struct hlist_head mc_list;
|
|
struct listeners __rcu *listeners;
|
|
unsigned int flags;
|
|
unsigned int groups;
|
|
struct mutex *cb_mutex;
|
|
struct module *module;
|
|
int (*bind)(struct net *net, int group);
|
|
void (*unbind)(struct net *net, int group);
|
|
bool (*compare)(struct net *net, struct sock *sock);
|
|
int registered;
|
|
};
|
|
|
|
extern struct netlink_table *nl_table;
|
|
extern rwlock_t nl_table_lock;
|
|
|
|
#endif
|