mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-11 17:16:41 +07:00
42a2d923cc
Pull networking updates from David Miller: 1) The addition of nftables. No longer will we need protocol aware firewall filtering modules, it can all live in userspace. At the core of nftables is a, for lack of a better term, virtual machine that executes byte codes to inspect packet or metadata (arriving interface index, etc.) and make verdict decisions. Besides support for loading packet contents and comparing them, the interpreter supports lookups in various datastructures as fundamental operations. For example sets are supports, and therefore one could create a set of whitelist IP address entries which have ACCEPT verdicts attached to them, and use the appropriate byte codes to do such lookups. Since the interpreted code is composed in userspace, userspace can do things like optimize things before giving it to the kernel. Another major improvement is the capability of atomically updating portions of the ruleset. In the existing netfilter implementation, one has to update the entire rule set in order to make a change and this is very expensive. Userspace tools exist to create nftables rules using existing netfilter rule sets, but both kernel implementations will need to co-exist for quite some time as we transition from the old to the new stuff. Kudos to Patrick McHardy, Pablo Neira Ayuso, and others who have worked so hard on this. 2) Daniel Borkmann and Hannes Frederic Sowa made several improvements to our pseudo-random number generator, mostly used for things like UDP port randomization and netfitler, amongst other things. In particular the taus88 generater is updated to taus113, and test cases are added. 3) Support 64-bit rates in HTB and TBF schedulers, from Eric Dumazet and Yang Yingliang. 4) Add support for new 577xx tigon3 chips to tg3 driver, from Nithin Sujir. 5) Fix two fatal flaws in TCP dynamic right sizing, from Eric Dumazet, Neal Cardwell, and Yuchung Cheng. 6) Allow IP_TOS and IP_TTL to be specified in sendmsg() ancillary control message data, much like other socket option attributes. From Francesco Fusco. 7) Allow applications to specify a cap on the rate computed automatically by the kernel for pacing flows, via a new SO_MAX_PACING_RATE socket option. From Eric Dumazet. 8) Make the initial autotuned send buffer sizing in TCP more closely reflect actual needs, from Eric Dumazet. 9) Currently early socket demux only happens for TCP sockets, but we can do it for connected UDP sockets too. Implementation from Shawn Bohrer. 10) Refactor inet socket demux with the goal of improving hash demux performance for listening sockets. With the main goals being able to use RCU lookups on even request sockets, and eliminating the listening lock contention. From Eric Dumazet. 11) The bonding layer has many demuxes in it's fast path, and an RCU conversion was started back in 3.11, several changes here extend the RCU usage to even more locations. From Ding Tianhong and Wang Yufen, based upon suggestions by Nikolay Aleksandrov and Veaceslav Falico. 12) Allow stackability of segmentation offloads to, in particular, allow segmentation offloading over tunnels. From Eric Dumazet. 13) Significantly improve the handling of secret keys we input into the various hash functions in the inet hashtables, TCP fast open, as well as syncookies. From Hannes Frederic Sowa. The key fundamental operation is "net_get_random_once()" which uses static keys. Hannes even extended this to ipv4/ipv6 fragmentation handling and our generic flow dissector. 14) The generic driver layer takes care now to set the driver data to NULL on device removal, so it's no longer necessary for drivers to explicitly set it to NULL any more. Many drivers have been cleaned up in this way, from Jingoo Han. 15) Add a BPF based packet scheduler classifier, from Daniel Borkmann. 16) Improve CRC32 interfaces and generic SKB checksum iterators so that SCTP's checksumming can more cleanly be handled. Also from Daniel Borkmann. 17) Add a new PMTU discovery mode, IP_PMTUDISC_INTERFACE, which forces using the interface MTU value. This helps avoid PMTU attacks, particularly on DNS servers. From Hannes Frederic Sowa. 18) Use generic XPS for transmit queue steering rather than internal (re-)implementation in virtio-net. From Jason Wang. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1622 commits) random32: add test cases for taus113 implementation random32: upgrade taus88 generator to taus113 from errata paper random32: move rnd_state to linux/random.h random32: add prandom_reseed_late() and call when nonblocking pool becomes initialized random32: add periodic reseeding random32: fix off-by-one in seeding requirement PHY: Add RTL8201CP phy_driver to realtek xtsonic: add missing platform_set_drvdata() in xtsonic_probe() macmace: add missing platform_set_drvdata() in mace_probe() ethernet/arc/arc_emac: add missing platform_set_drvdata() in arc_emac_probe() ipv6: protect for_each_sk_fl_rcu in mem_check with rcu_read_lock_bh vlan: Implement vlan_dev_get_egress_qos_mask as an inline. ixgbe: add warning when max_vfs is out of range. igb: Update link modes display in ethtool netfilter: push reasm skb through instead of original frag skbs ip6_output: fragment outgoing reassembled skb properly MAINTAINERS: mv643xx_eth: take over maintainership from Lennart net_sched: tbf: support of 64bit rates ixgbe: deleting dfwd stations out of order can cause null ptr deref ixgbe: fix build err, num_rx_queues is only available with CONFIG_RPS ...
196 lines
5.5 KiB
C
196 lines
5.5 KiB
C
#ifndef _LINUX_JUMP_LABEL_H
|
|
#define _LINUX_JUMP_LABEL_H
|
|
|
|
/*
|
|
* Jump label support
|
|
*
|
|
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
|
|
* Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
|
|
*
|
|
* Jump labels provide an interface to generate dynamic branches using
|
|
* self-modifying code. Assuming toolchain and architecture support the result
|
|
* of a "if (static_key_false(&key))" statement is a unconditional branch (which
|
|
* defaults to false - and the true block is placed out of line).
|
|
*
|
|
* However at runtime we can change the branch target using
|
|
* static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
|
|
* object and for as long as there are references all branches referring to
|
|
* that particular key will point to the (out of line) true block.
|
|
*
|
|
* Since this relies on modifying code the static_key_slow_{inc,dec}() functions
|
|
* must be considered absolute slow paths (machine wide synchronization etc.).
|
|
* OTOH, since the affected branches are unconditional their runtime overhead
|
|
* will be absolutely minimal, esp. in the default (off) case where the total
|
|
* effect is a single NOP of appropriate size. The on case will patch in a jump
|
|
* to the out-of-line block.
|
|
*
|
|
* When the control is directly exposed to userspace it is prudent to delay the
|
|
* decrement to avoid high frequency code modifications which can (and do)
|
|
* cause significant performance degradation. Struct static_key_deferred and
|
|
* static_key_slow_dec_deferred() provide for this.
|
|
*
|
|
* Lacking toolchain and or architecture support, it falls back to a simple
|
|
* conditional branch.
|
|
*
|
|
* struct static_key my_key = STATIC_KEY_INIT_TRUE;
|
|
*
|
|
* if (static_key_true(&my_key)) {
|
|
* }
|
|
*
|
|
* will result in the true case being in-line and starts the key with a single
|
|
* reference. Mixing static_key_true() and static_key_false() on the same key is not
|
|
* allowed.
|
|
*
|
|
* Not initializing the key (static data is initialized to 0s anyway) is the
|
|
* same as using STATIC_KEY_INIT_FALSE.
|
|
*
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/bug.h>
|
|
|
|
extern bool static_key_initialized;
|
|
|
|
#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
|
|
"%s used before call to jump_label_init", \
|
|
__func__)
|
|
|
|
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
|
|
|
|
struct static_key {
|
|
atomic_t enabled;
|
|
/* Set lsb bit to 1 if branch is default true, 0 ot */
|
|
struct jump_entry *entries;
|
|
#ifdef CONFIG_MODULES
|
|
struct static_key_mod *next;
|
|
#endif
|
|
};
|
|
|
|
# include <asm/jump_label.h>
|
|
# define HAVE_JUMP_LABEL
|
|
#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
|
|
|
|
enum jump_label_type {
|
|
JUMP_LABEL_DISABLE = 0,
|
|
JUMP_LABEL_ENABLE,
|
|
};
|
|
|
|
struct module;
|
|
|
|
#include <linux/atomic.h>
|
|
#ifdef HAVE_JUMP_LABEL
|
|
|
|
#define JUMP_LABEL_TRUE_BRANCH 1UL
|
|
|
|
static
|
|
inline struct jump_entry *jump_label_get_entries(struct static_key *key)
|
|
{
|
|
return (struct jump_entry *)((unsigned long)key->entries
|
|
& ~JUMP_LABEL_TRUE_BRANCH);
|
|
}
|
|
|
|
static inline bool jump_label_get_branch_default(struct static_key *key)
|
|
{
|
|
if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH)
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
static __always_inline bool static_key_false(struct static_key *key)
|
|
{
|
|
return arch_static_branch(key);
|
|
}
|
|
|
|
static __always_inline bool static_key_true(struct static_key *key)
|
|
{
|
|
return !static_key_false(key);
|
|
}
|
|
|
|
extern struct jump_entry __start___jump_table[];
|
|
extern struct jump_entry __stop___jump_table[];
|
|
|
|
extern void jump_label_init(void);
|
|
extern void jump_label_lock(void);
|
|
extern void jump_label_unlock(void);
|
|
extern void arch_jump_label_transform(struct jump_entry *entry,
|
|
enum jump_label_type type);
|
|
extern void arch_jump_label_transform_static(struct jump_entry *entry,
|
|
enum jump_label_type type);
|
|
extern int jump_label_text_reserved(void *start, void *end);
|
|
extern void static_key_slow_inc(struct static_key *key);
|
|
extern void static_key_slow_dec(struct static_key *key);
|
|
extern void jump_label_apply_nops(struct module *mod);
|
|
|
|
#define STATIC_KEY_INIT_TRUE ((struct static_key) \
|
|
{ .enabled = ATOMIC_INIT(1), .entries = (void *)1 })
|
|
#define STATIC_KEY_INIT_FALSE ((struct static_key) \
|
|
{ .enabled = ATOMIC_INIT(0), .entries = (void *)0 })
|
|
|
|
#else /* !HAVE_JUMP_LABEL */
|
|
|
|
struct static_key {
|
|
atomic_t enabled;
|
|
};
|
|
|
|
static __always_inline void jump_label_init(void)
|
|
{
|
|
static_key_initialized = true;
|
|
}
|
|
|
|
static __always_inline bool static_key_false(struct static_key *key)
|
|
{
|
|
if (unlikely(atomic_read(&key->enabled) > 0))
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
static __always_inline bool static_key_true(struct static_key *key)
|
|
{
|
|
if (likely(atomic_read(&key->enabled) > 0))
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
static inline void static_key_slow_inc(struct static_key *key)
|
|
{
|
|
STATIC_KEY_CHECK_USE();
|
|
atomic_inc(&key->enabled);
|
|
}
|
|
|
|
static inline void static_key_slow_dec(struct static_key *key)
|
|
{
|
|
STATIC_KEY_CHECK_USE();
|
|
atomic_dec(&key->enabled);
|
|
}
|
|
|
|
static inline int jump_label_text_reserved(void *start, void *end)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void jump_label_lock(void) {}
|
|
static inline void jump_label_unlock(void) {}
|
|
|
|
static inline int jump_label_apply_nops(struct module *mod)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#define STATIC_KEY_INIT_TRUE ((struct static_key) \
|
|
{ .enabled = ATOMIC_INIT(1) })
|
|
#define STATIC_KEY_INIT_FALSE ((struct static_key) \
|
|
{ .enabled = ATOMIC_INIT(0) })
|
|
|
|
#endif /* HAVE_JUMP_LABEL */
|
|
|
|
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
|
|
#define jump_label_enabled static_key_enabled
|
|
|
|
static inline bool static_key_enabled(struct static_key *key)
|
|
{
|
|
return (atomic_read(&key->enabled) > 0);
|
|
}
|
|
|
|
#endif /* _LINUX_JUMP_LABEL_H */
|