mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 11:50:52 +07:00
5bbcc0f595
Pull networking updates from David Miller: "Highlights: 1) Maintain the TCP retransmit queue using an rbtree, with 1GB windows at 100Gb this really has become necessary. From Eric Dumazet. 2) Multi-program support for cgroup+bpf, from Alexei Starovoitov. 3) Perform broadcast flooding in hardware in mv88e6xxx, from Andrew Lunn. 4) Add meter action support to openvswitch, from Andy Zhou. 5) Add a data meta pointer for BPF accessible packets, from Daniel Borkmann. 6) Namespace-ify almost all TCP sysctl knobs, from Eric Dumazet. 7) Turn on Broadcom Tags in b53 driver, from Florian Fainelli. 8) More work to move the RTNL mutex down, from Florian Westphal. 9) Add 'bpftool' utility, to help with bpf program introspection. From Jakub Kicinski. 10) Add new 'cpumap' type for XDP_REDIRECT action, from Jesper Dangaard Brouer. 11) Support 'blocks' of transformations in the packet scheduler which can span multiple network devices, from Jiri Pirko. 12) TC flower offload support in cxgb4, from Kumar Sanghvi. 13) Priority based stream scheduler for SCTP, from Marcelo Ricardo Leitner. 14) Thunderbolt networking driver, from Amir Levy and Mika Westerberg. 15) Add RED qdisc offloadability, and use it in mlxsw driver. From Nogah Frankel. 16) eBPF based device controller for cgroup v2, from Roman Gushchin. 17) Add some fundamental tracepoints for TCP, from Song Liu. 18) Remove garbage collection from ipv6 route layer, this is a significant accomplishment. From Wei Wang. 19) Add multicast route offload support to mlxsw, from Yotam Gigi" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2177 commits) tcp: highest_sack fix geneve: fix fill_info when link down bpf: fix lockdep splat net: cdc_ncm: GetNtbFormat endian fix openvswitch: meter: fix NULL pointer dereference in ovs_meter_cmd_reply_start netem: remove unnecessary 64 bit modulus netem: use 64 bit divide by rate tcp: Namespace-ify sysctl_tcp_default_congestion_control net: Protect iterations over net::fib_notifier_ops in fib_seq_sum() ipv6: set all.accept_dad to 0 by default uapi: fix linux/tls.h userspace compilation error usbnet: ipheth: prevent TX queue timeouts when device not ready vhost_net: conditionally enable tx polling uapi: fix linux/rxrpc.h userspace compilation errors net: stmmac: fix LPI transitioning for dwmac4 atm: horizon: Fix irq release error net-sysfs: trigger netlink notification on ifalias change via sysfs openvswitch: Using kfree_rcu() to simplify the code openvswitch: Make local function ovs_nsh_key_attr_size() static openvswitch: Fix return value check in ovs_meter_cmd_features() ...
107 lines
3.7 KiB
C
107 lines
3.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Dynamic queue limits (dql) - Definitions
|
|
*
|
|
* Copyright (c) 2011, Tom Herbert <therbert@google.com>
|
|
*
|
|
* This header file contains the definitions for dynamic queue limits (dql).
|
|
* dql would be used in conjunction with a producer/consumer type queue
|
|
* (possibly a HW queue). Such a queue would have these general properties:
|
|
*
|
|
* 1) Objects are queued up to some limit specified as number of objects.
|
|
* 2) Periodically a completion process executes which retires consumed
|
|
* objects.
|
|
* 3) Starvation occurs when limit has been reached, all queued data has
|
|
* actually been consumed, but completion processing has not yet run
|
|
* so queuing new data is blocked.
|
|
* 4) Minimizing the amount of queued data is desirable.
|
|
*
|
|
* The goal of dql is to calculate the limit as the minimum number of objects
|
|
* needed to prevent starvation.
|
|
*
|
|
* The primary functions of dql are:
|
|
* dql_queued - called when objects are enqueued to record number of objects
|
|
* dql_avail - returns how many objects are available to be queued based
|
|
* on the object limit and how many objects are already enqueued
|
|
* dql_completed - called at completion time to indicate how many objects
|
|
* were retired from the queue
|
|
*
|
|
* The dql implementation does not implement any locking for the dql data
|
|
* structures, the higher layer should provide this. dql_queued should
|
|
* be serialized to prevent concurrent execution of the function; this
|
|
* is also true for dql_completed. However, dql_queued and dlq_completed can
|
|
* be executed concurrently (i.e. they can be protected by different locks).
|
|
*/
|
|
|
|
#ifndef _LINUX_DQL_H
|
|
#define _LINUX_DQL_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
struct dql {
|
|
/* Fields accessed in enqueue path (dql_queued) */
|
|
unsigned int num_queued; /* Total ever queued */
|
|
unsigned int adj_limit; /* limit + num_completed */
|
|
unsigned int last_obj_cnt; /* Count at last queuing */
|
|
|
|
/* Fields accessed only by completion path (dql_completed) */
|
|
|
|
unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */
|
|
unsigned int num_completed; /* Total ever completed */
|
|
|
|
unsigned int prev_ovlimit; /* Previous over limit */
|
|
unsigned int prev_num_queued; /* Previous queue total */
|
|
unsigned int prev_last_obj_cnt; /* Previous queuing cnt */
|
|
|
|
unsigned int lowest_slack; /* Lowest slack found */
|
|
unsigned long slack_start_time; /* Time slacks seen */
|
|
|
|
/* Configuration */
|
|
unsigned int max_limit; /* Max limit */
|
|
unsigned int min_limit; /* Minimum limit */
|
|
unsigned int slack_hold_time; /* Time to measure slack */
|
|
};
|
|
|
|
/* Set some static maximums */
|
|
#define DQL_MAX_OBJECT (UINT_MAX / 16)
|
|
#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT)
|
|
|
|
/*
|
|
* Record number of objects queued. Assumes that caller has already checked
|
|
* availability in the queue with dql_avail.
|
|
*/
|
|
static inline void dql_queued(struct dql *dql, unsigned int count)
|
|
{
|
|
BUG_ON(count > DQL_MAX_OBJECT);
|
|
|
|
dql->last_obj_cnt = count;
|
|
|
|
/* We want to force a write first, so that cpu do not attempt
|
|
* to get cache line containing last_obj_cnt, num_queued, adj_limit
|
|
* in Shared state, but directly does a Request For Ownership
|
|
* It is only a hint, we use barrier() only.
|
|
*/
|
|
barrier();
|
|
|
|
dql->num_queued += count;
|
|
}
|
|
|
|
/* Returns how many objects can be queued, < 0 indicates over limit. */
|
|
static inline int dql_avail(const struct dql *dql)
|
|
{
|
|
return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued);
|
|
}
|
|
|
|
/* Record number of completed objects and recalculate the limit. */
|
|
void dql_completed(struct dql *dql, unsigned int count);
|
|
|
|
/* Reset dql state */
|
|
void dql_reset(struct dql *dql);
|
|
|
|
/* Initialize dql state */
|
|
void dql_init(struct dql *dql, unsigned int hold_time);
|
|
|
|
#endif /* _KERNEL_ */
|
|
|
|
#endif /* _LINUX_DQL_H */
|