mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 10:24:15 +07:00
acdcecc612
UDP reuseport groups can hold a mix unconnected and connected sockets.
Ensure that connections only receive all traffic to their 4-tuple.
Fast reuseport returns on the first reuseport match on the assumption
that all matches are equal. Only if connections are present, return to
the previous behavior of scoring all sockets.
Record if connections are present and if so (1) treat such connected
sockets as an independent match from the group, (2) only return
2-tuple matches from reuseport and (3) do not return on the first
2-tuple reuseport match to allow for a higher scoring match later.
New field has_conns is set without locks. No other fields in the
bitmap are modified at runtime and the field is only ever set
unconditionally, so an RMW cannot miss a change.
Fixes: e32ea7e747
("soreuseport: fast reuseport UDP socket selection")
Link: http://lkml.kernel.org/r/CA+FuTSfRP09aJNYRt04SS6qj22ViiOEWaWmLAwX0psk8-PGNxw@mail.gmail.com
Signed-off-by: Willem de Bruijn <willemb@google.com>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Acked-by: Craig Gallek <kraig@google.com>
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
61 lines
1.6 KiB
C
61 lines
1.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _SOCK_REUSEPORT_H
|
|
#define _SOCK_REUSEPORT_H
|
|
|
|
#include <linux/filter.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/types.h>
|
|
#include <linux/spinlock.h>
|
|
#include <net/sock.h>
|
|
|
|
extern spinlock_t reuseport_lock;
|
|
|
|
struct sock_reuseport {
|
|
struct rcu_head rcu;
|
|
|
|
u16 max_socks; /* length of socks */
|
|
u16 num_socks; /* elements in socks */
|
|
/* The last synq overflow event timestamp of this
|
|
* reuse->socks[] group.
|
|
*/
|
|
unsigned int synq_overflow_ts;
|
|
/* ID stays the same even after the size of socks[] grows. */
|
|
unsigned int reuseport_id;
|
|
unsigned int bind_inany:1;
|
|
unsigned int has_conns:1;
|
|
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
|
|
struct sock *socks[0]; /* array of sock pointers */
|
|
};
|
|
|
|
extern int reuseport_alloc(struct sock *sk, bool bind_inany);
|
|
extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
|
|
bool bind_inany);
|
|
extern void reuseport_detach_sock(struct sock *sk);
|
|
extern struct sock *reuseport_select_sock(struct sock *sk,
|
|
u32 hash,
|
|
struct sk_buff *skb,
|
|
int hdr_len);
|
|
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
|
|
extern int reuseport_detach_prog(struct sock *sk);
|
|
|
|
static inline bool reuseport_has_conns(struct sock *sk, bool set)
|
|
{
|
|
struct sock_reuseport *reuse;
|
|
bool ret = false;
|
|
|
|
rcu_read_lock();
|
|
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
|
if (reuse) {
|
|
if (set)
|
|
reuse->has_conns = 1;
|
|
ret = reuse->has_conns;
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
}
|
|
|
|
int reuseport_get_id(struct sock_reuseport *reuse);
|
|
|
|
#endif /* _SOCK_REUSEPORT_H */
|