mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-21 11:48:06 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: netfilter: xt_connbytes: handle negation correctly net: relax rcvbuf limits rps: fix insufficient bounds checking in store_rps_dev_flow_table_cnt() net: introduce DST_NOPEER dst flag mqprio: Avoid panic if no options are provided bridge: provide a mtu() method for fake_dst_ops
This commit is contained in:
commit
155d4551bd
@ -53,6 +53,7 @@ struct dst_entry {
|
|||||||
#define DST_NOHASH 0x0008
|
#define DST_NOHASH 0x0008
|
||||||
#define DST_NOCACHE 0x0010
|
#define DST_NOCACHE 0x0010
|
||||||
#define DST_NOCOUNT 0x0020
|
#define DST_NOCOUNT 0x0020
|
||||||
|
#define DST_NOPEER 0x0040
|
||||||
|
|
||||||
short error;
|
short error;
|
||||||
short obsolete;
|
short obsolete;
|
||||||
|
@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Take into account size of receive queue and backlog queue
|
* Take into account size of receive queue and backlog queue
|
||||||
|
* Do not take into account this skb truesize,
|
||||||
|
* to allow even a single big packet to come.
|
||||||
*/
|
*/
|
||||||
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
|
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
|
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
|
||||||
|
|
||||||
return qsize + skb->truesize > sk->sk_rcvbuf;
|
return qsize > sk->sk_rcvbuf;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The per-socket spinlock must be held here. */
|
/* The per-socket spinlock must be held here. */
|
||||||
|
@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int fake_mtu(const struct dst_entry *dst)
|
||||||
|
{
|
||||||
|
return dst->dev->mtu;
|
||||||
|
}
|
||||||
|
|
||||||
static struct dst_ops fake_dst_ops = {
|
static struct dst_ops fake_dst_ops = {
|
||||||
.family = AF_INET,
|
.family = AF_INET,
|
||||||
.protocol = cpu_to_be16(ETH_P_IP),
|
.protocol = cpu_to_be16(ETH_P_IP),
|
||||||
.update_pmtu = fake_update_pmtu,
|
.update_pmtu = fake_update_pmtu,
|
||||||
.cow_metrics = fake_cow_metrics,
|
.cow_metrics = fake_cow_metrics,
|
||||||
.neigh_lookup = fake_neigh_lookup,
|
.neigh_lookup = fake_neigh_lookup,
|
||||||
|
.mtu = fake_mtu,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
|
|||||||
rt->dst.dev = br->dev;
|
rt->dst.dev = br->dev;
|
||||||
rt->dst.path = &rt->dst;
|
rt->dst.path = &rt->dst;
|
||||||
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
|
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
|
||||||
rt->dst.flags = DST_NOXFRM;
|
rt->dst.flags = DST_NOXFRM | DST_NOPEER;
|
||||||
rt->dst.ops = &fake_dst_ops;
|
rt->dst.ops = &fake_dst_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
|
|||||||
if (count) {
|
if (count) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (count > 1<<30) {
|
if (count > INT_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
count = roundup_pow_of_two(count);
|
||||||
|
if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
|
||||||
|
/ sizeof(struct rps_dev_flow)) {
|
||||||
/* Enforce a limit to prevent overflow */
|
/* Enforce a limit to prevent overflow */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
count = roundup_pow_of_two(count);
|
|
||||||
table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
|
table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
|
||||||
if (!table)
|
if (!table)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct sk_buff_head *list = &sk->sk_receive_queue;
|
struct sk_buff_head *list = &sk->sk_receive_queue;
|
||||||
|
|
||||||
/* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
|
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
|
||||||
number of warnings when compiling with -W --ANK
|
|
||||||
*/
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
|
||||||
(unsigned)sk->sk_rcvbuf) {
|
|
||||||
atomic_inc(&sk->sk_drops);
|
atomic_inc(&sk->sk_drops);
|
||||||
trace_sock_rcvqueue_full(sk, skb);
|
trace_sock_rcvqueue_full(sk, skb);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1367,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
|
|||||||
{
|
{
|
||||||
struct rtable *rt = (struct rtable *) dst;
|
struct rtable *rt = (struct rtable *) dst;
|
||||||
|
|
||||||
if (rt) {
|
if (rt && !(rt->dst.flags & DST_NOPEER)) {
|
||||||
if (rt->peer == NULL)
|
if (rt->peer == NULL)
|
||||||
rt_bind_peer(rt, rt->rt_dst, 1);
|
rt_bind_peer(rt, rt->rt_dst, 1);
|
||||||
|
|
||||||
@ -1378,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
|
|||||||
iph->id = htons(inet_getid(rt->peer, more));
|
iph->id = htons(inet_getid(rt->peer, more));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else
|
} else if (!rt)
|
||||||
printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
|
printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
|
||||||
__builtin_return_address(0));
|
__builtin_return_address(0));
|
||||||
|
|
||||||
|
@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
|
|||||||
static atomic_t ipv6_fragmentation_id;
|
static atomic_t ipv6_fragmentation_id;
|
||||||
int old, new;
|
int old, new;
|
||||||
|
|
||||||
if (rt) {
|
if (rt && !(rt->dst.flags & DST_NOPEER)) {
|
||||||
struct inet_peer *peer;
|
struct inet_peer *peer;
|
||||||
|
|
||||||
if (!rt->rt6i_peer)
|
if (!rt->rt6i_peer)
|
||||||
|
@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sinfo->count.to)
|
if (sinfo->count.to >= sinfo->count.from)
|
||||||
return what <= sinfo->count.to && what >= sinfo->count.from;
|
return what <= sinfo->count.to && what >= sinfo->count.from;
|
||||||
else
|
else /* inverted */
|
||||||
return what >= sinfo->count.from;
|
return what < sinfo->count.to || what > sinfo->count.from;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int connbytes_mt_check(const struct xt_mtchk_param *par)
|
static int connbytes_mt_check(const struct xt_mtchk_param *par)
|
||||||
|
@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (snaplen > res)
|
if (snaplen > res)
|
||||||
snaplen = res;
|
snaplen = res;
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
||||||
(unsigned)sk->sk_rcvbuf)
|
|
||||||
goto drop_n_acct;
|
goto drop_n_acct;
|
||||||
|
|
||||||
if (skb_shared(skb)) {
|
if (skb_shared(skb)) {
|
||||||
@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (po->tp_version <= TPACKET_V2) {
|
if (po->tp_version <= TPACKET_V2) {
|
||||||
if (macoff + snaplen > po->rx_ring.frame_size) {
|
if (macoff + snaplen > po->rx_ring.frame_size) {
|
||||||
if (po->copy_thresh &&
|
if (po->copy_thresh &&
|
||||||
atomic_read(&sk->sk_rmem_alloc) + skb->truesize
|
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
|
||||||
< (unsigned)sk->sk_rcvbuf) {
|
|
||||||
if (skb_shared(skb)) {
|
if (skb_shared(skb)) {
|
||||||
copy_skb = skb_clone(skb, GFP_ATOMIC);
|
copy_skb = skb_clone(skb, GFP_ATOMIC);
|
||||||
} else {
|
} else {
|
||||||
|
@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
|
|||||||
if (!netif_is_multiqueue(dev))
|
if (!netif_is_multiqueue(dev))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (nla_len(opt) < sizeof(*qopt))
|
if (!opt || nla_len(opt) < sizeof(*qopt))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
qopt = nla_data(opt);
|
qopt = nla_data(opt);
|
||||||
|
Loading…
Reference in New Issue
Block a user