mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 00:30:52 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Things seem to be settling down as far as networking is concerned, let's hope this trend continues... 1) Add iov_iter_revert() and use it to fix the behavior of skb_copy_datagram_msg() et al., from Al Viro. 2) Fix the protocol used in the synthetic SKB we cons up for the purposes of doing a simulated route lookup for RTM_GETROUTE requests. From Florian Larysch. 3) Don't add noop_qdisc to the per-device qdisc hashes, from Cong Wang. 4) Don't call netdev_change_features with the team lock held, from Xin Long. 5) Revert TCP F-RTO extension to catch more spurious timeouts because it interacts very badly with some middle-boxes. From Yuchung Cheng. 6) Fix the loss of error values in l2tp {s,g}etsockopt calls, from Guillaume Nault. 7) ctnetlink uses bit positions where it should be using bit masks, fix from Liping Zhang. 8) Missing RCU locking in netfilter helper code, from Gao Feng. 9) Avoid double frees and use-after-frees in tcp_disconnect(), from Eric Dumazet. 10) Don't do a changelink before we register the netdevice in bridging, from Ido Schimmel. 11) Lock the ipv6 device address list properly, from Rabin Vincent" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (29 commits) netfilter: ipt_CLUSTERIP: Fix wrong conntrack netns refcnt usage netfilter: nft_hash: do not dump the auto generated seed drivers: net: usb: qmi_wwan: add QMI_QUIRK_SET_DTR for Telit PID 0x1201 ipv6: Fix idev->addr_list corruption net: xdp: don't export dev_change_xdp_fd() bridge: netlink: register netdevice before executing changelink bridge: implement missing ndo_uninit() bpf: reference may_access_skb() from __bpf_prog_run() tcp: clear saved_syn in tcp_disconnect() netfilter: nf_ct_expect: use proper RCU list traversal/update APIs netfilter: ctnetlink: skip dumping expect when nfct_help(ct) is NULL netfilter: make it safer during the inet6_dev->addr_list traversal netfilter: ctnetlink: make it safer when checking the ct helper name netfilter: helper: Add the rcu lock when call __nf_conntrack_helper_find netfilter: ctnetlink: using bit to represent the ct event netfilter: xt_TCPMSS: add more sanity tests on tcph->doff net: tcp: Increase TCP_MIB_OUTRSTS even though fail to alloc skb l2tp: don't mask errors in pppol2tp_getsockopt() l2tp: don't mask errors in pppol2tp_setsockopt() tcp: restrict F-RTO to work-around broken middle-boxes ...
This commit is contained in:
commit
7e703eccf0
@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
|
||||
int work_done = 0;
|
||||
|
||||
u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
|
||||
u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
|
||||
u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
|
||||
u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
|
||||
|
||||
/* Handle bus state changes */
|
||||
|
@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev)
|
||||
|
||||
devm_can_led_init(ndev);
|
||||
|
||||
dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
|
||||
priv->regs, ndev->irq);
|
||||
dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
|
||||
|
||||
return 0;
|
||||
fail_candev:
|
||||
|
@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
|
||||
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
|
||||
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
|
||||
|
||||
static void ___team_compute_features(struct team *team)
|
||||
static void __team_compute_features(struct team *team)
|
||||
{
|
||||
struct team_port *port;
|
||||
u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
|
||||
@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team)
|
||||
team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
|
||||
}
|
||||
|
||||
static void __team_compute_features(struct team *team)
|
||||
{
|
||||
___team_compute_features(team);
|
||||
netdev_change_features(team->dev);
|
||||
}
|
||||
|
||||
static void team_compute_features(struct team *team)
|
||||
{
|
||||
mutex_lock(&team->lock);
|
||||
___team_compute_features(team);
|
||||
__team_compute_features(team);
|
||||
mutex_unlock(&team->lock);
|
||||
netdev_change_features(team->dev);
|
||||
}
|
||||
@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev)
|
||||
team_notify_peers_fini(team);
|
||||
team_queue_override_fini(team);
|
||||
mutex_unlock(&team->lock);
|
||||
netdev_change_features(dev);
|
||||
}
|
||||
|
||||
static void team_destructor(struct net_device *dev)
|
||||
@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
|
||||
mutex_lock(&team->lock);
|
||||
err = team_port_add(team, port_dev);
|
||||
mutex_unlock(&team->lock);
|
||||
|
||||
if (!err)
|
||||
netdev_change_features(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
|
||||
mutex_lock(&team->lock);
|
||||
err = team_port_del(team, port_dev);
|
||||
mutex_unlock(&team->lock);
|
||||
|
||||
if (!err)
|
||||
netdev_change_features(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -908,7 +908,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
|
||||
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
|
||||
{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
|
||||
{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
|
||||
|
@ -1929,7 +1929,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
|
||||
" value=0x%04x index=0x%04x size=%d\n",
|
||||
cmd, reqtype, value, index, size);
|
||||
|
||||
if (data) {
|
||||
if (size) {
|
||||
buf = kmalloc(size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
goto out;
|
||||
@ -1938,8 +1938,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
|
||||
err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
|
||||
cmd, reqtype, value, index, buf, size,
|
||||
USB_CTRL_GET_TIMEOUT);
|
||||
if (err > 0 && err <= size)
|
||||
memcpy(data, buf, err);
|
||||
if (err > 0 && err <= size) {
|
||||
if (data)
|
||||
memcpy(data, buf, err);
|
||||
else
|
||||
netdev_dbg(dev->net,
|
||||
"Huh? Data requested but thrown away.\n");
|
||||
}
|
||||
kfree(buf);
|
||||
out:
|
||||
return err;
|
||||
@ -1960,7 +1965,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
|
||||
buf = kmemdup(data, size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
if (size) {
|
||||
WARN_ON_ONCE(1);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
|
||||
cmd, reqtype, value, index, buf, size,
|
||||
|
@ -39,7 +39,10 @@ struct iov_iter {
|
||||
};
|
||||
union {
|
||||
unsigned long nr_segs;
|
||||
int idx;
|
||||
struct {
|
||||
int idx;
|
||||
int start_idx;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
|
||||
size_t iov_iter_copy_from_user_atomic(struct page *page,
|
||||
struct iov_iter *i, unsigned long offset, size_t bytes);
|
||||
void iov_iter_advance(struct iov_iter *i, size_t bytes);
|
||||
void iov_iter_revert(struct iov_iter *i, size_t bytes);
|
||||
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
|
||||
size_t iov_iter_single_seg_count(const struct iov_iter *i);
|
||||
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||
|
@ -1162,12 +1162,12 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
|
||||
LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
|
||||
off = IMM;
|
||||
load_word:
|
||||
/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
|
||||
* only appearing in the programs where ctx ==
|
||||
* skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
|
||||
* == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
|
||||
* internal BPF verifier will check that BPF_R6 ==
|
||||
* ctx.
|
||||
/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
|
||||
* appearing in the programs where ctx == skb
|
||||
* (see may_access_skb() in the verifier). All programs
|
||||
* keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
|
||||
* bpf_convert_filter() saves it in BPF_R6, internal BPF
|
||||
* verifier will check that BPF_R6 == ctx.
|
||||
*
|
||||
* BPF_ABS and BPF_IND are wrappers of function calls,
|
||||
* so they scratch BPF_R1-BPF_R5 registers, preserve
|
||||
|
@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
|
||||
}
|
||||
EXPORT_SYMBOL(iov_iter_advance);
|
||||
|
||||
void iov_iter_revert(struct iov_iter *i, size_t unroll)
|
||||
{
|
||||
if (!unroll)
|
||||
return;
|
||||
i->count += unroll;
|
||||
if (unlikely(i->type & ITER_PIPE)) {
|
||||
struct pipe_inode_info *pipe = i->pipe;
|
||||
int idx = i->idx;
|
||||
size_t off = i->iov_offset;
|
||||
while (1) {
|
||||
size_t n = off - pipe->bufs[idx].offset;
|
||||
if (unroll < n) {
|
||||
off -= (n - unroll);
|
||||
break;
|
||||
}
|
||||
unroll -= n;
|
||||
if (!unroll && idx == i->start_idx) {
|
||||
off = 0;
|
||||
break;
|
||||
}
|
||||
if (!idx--)
|
||||
idx = pipe->buffers - 1;
|
||||
off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
|
||||
}
|
||||
i->iov_offset = off;
|
||||
i->idx = idx;
|
||||
pipe_truncate(i);
|
||||
return;
|
||||
}
|
||||
if (unroll <= i->iov_offset) {
|
||||
i->iov_offset -= unroll;
|
||||
return;
|
||||
}
|
||||
unroll -= i->iov_offset;
|
||||
if (i->type & ITER_BVEC) {
|
||||
const struct bio_vec *bvec = i->bvec;
|
||||
while (1) {
|
||||
size_t n = (--bvec)->bv_len;
|
||||
i->nr_segs++;
|
||||
if (unroll <= n) {
|
||||
i->bvec = bvec;
|
||||
i->iov_offset = n - unroll;
|
||||
return;
|
||||
}
|
||||
unroll -= n;
|
||||
}
|
||||
} else { /* same logics for iovec and kvec */
|
||||
const struct iovec *iov = i->iov;
|
||||
while (1) {
|
||||
size_t n = (--iov)->iov_len;
|
||||
i->nr_segs++;
|
||||
if (unroll <= n) {
|
||||
i->iov = iov;
|
||||
i->iov_offset = n - unroll;
|
||||
return;
|
||||
}
|
||||
unroll -= n;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(iov_iter_revert);
|
||||
|
||||
/*
|
||||
* Return the count of just the current iov_iter segment.
|
||||
*/
|
||||
@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
|
||||
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
|
||||
i->iov_offset = 0;
|
||||
i->count = count;
|
||||
i->start_idx = i->idx;
|
||||
}
|
||||
EXPORT_SYMBOL(iov_iter_pipe);
|
||||
|
||||
|
@ -119,6 +119,15 @@ static int br_dev_init(struct net_device *dev)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void br_dev_uninit(struct net_device *dev)
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
|
||||
br_multicast_uninit_stats(br);
|
||||
br_vlan_flush(br);
|
||||
free_percpu(br->stats);
|
||||
}
|
||||
|
||||
static int br_dev_open(struct net_device *dev)
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
@ -332,6 +341,7 @@ static const struct net_device_ops br_netdev_ops = {
|
||||
.ndo_open = br_dev_open,
|
||||
.ndo_stop = br_dev_stop,
|
||||
.ndo_init = br_dev_init,
|
||||
.ndo_uninit = br_dev_uninit,
|
||||
.ndo_start_xmit = br_dev_xmit,
|
||||
.ndo_get_stats64 = br_get_stats64,
|
||||
.ndo_set_mac_address = br_set_mac_address,
|
||||
@ -356,14 +366,6 @@ static const struct net_device_ops br_netdev_ops = {
|
||||
.ndo_features_check = passthru_features_check,
|
||||
};
|
||||
|
||||
static void br_dev_free(struct net_device *dev)
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
|
||||
free_percpu(br->stats);
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
||||
static struct device_type br_type = {
|
||||
.name = "bridge",
|
||||
};
|
||||
@ -376,7 +378,7 @@ void br_dev_setup(struct net_device *dev)
|
||||
ether_setup(dev);
|
||||
|
||||
dev->netdev_ops = &br_netdev_ops;
|
||||
dev->destructor = br_dev_free;
|
||||
dev->destructor = free_netdev;
|
||||
dev->ethtool_ops = &br_ethtool_ops;
|
||||
SET_NETDEV_DEVTYPE(dev, &br_type);
|
||||
dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
|
||||
|
@ -311,7 +311,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
|
||||
|
||||
br_fdb_delete_by_port(br, NULL, 0, 1);
|
||||
|
||||
br_vlan_flush(br);
|
||||
br_multicast_dev_del(br);
|
||||
cancel_delayed_work_sync(&br->gc_work);
|
||||
|
||||
|
@ -2031,8 +2031,6 @@ void br_multicast_dev_del(struct net_bridge *br)
|
||||
|
||||
out:
|
||||
spin_unlock_bh(&br->multicast_lock);
|
||||
|
||||
free_percpu(br->mcast_stats);
|
||||
}
|
||||
|
||||
int br_multicast_set_router(struct net_bridge *br, unsigned long val)
|
||||
@ -2531,6 +2529,11 @@ int br_multicast_init_stats(struct net_bridge *br)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void br_multicast_uninit_stats(struct net_bridge *br)
|
||||
{
|
||||
free_percpu(br->mcast_stats);
|
||||
}
|
||||
|
||||
static void mcast_stats_add_dir(u64 *dst, u64 *src)
|
||||
{
|
||||
dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
|
||||
|
@ -1165,11 +1165,14 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
|
||||
spin_unlock_bh(&br->lock);
|
||||
}
|
||||
|
||||
err = br_changelink(dev, tb, data);
|
||||
err = register_netdevice(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return register_netdevice(dev);
|
||||
err = br_changelink(dev, tb, data);
|
||||
if (err)
|
||||
unregister_netdevice(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static size_t br_get_size(const struct net_device *brdev)
|
||||
|
@ -620,6 +620,7 @@ void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
|
||||
void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
|
||||
const struct sk_buff *skb, u8 type, u8 dir);
|
||||
int br_multicast_init_stats(struct net_bridge *br);
|
||||
void br_multicast_uninit_stats(struct net_bridge *br);
|
||||
void br_multicast_get_stats(const struct net_bridge *br,
|
||||
const struct net_bridge_port *p,
|
||||
struct br_mcast_stats *dest);
|
||||
@ -760,6 +761,10 @@ static inline int br_multicast_init_stats(struct net_bridge *br)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void br_multicast_uninit_stats(struct net_bridge *br)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int br_multicast_igmp_type(const struct sk_buff *skb)
|
||||
{
|
||||
return 0;
|
||||
|
@ -398,7 +398,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
struct iov_iter *to, int len)
|
||||
{
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
int i, copy = start - offset, start_off = offset, n;
|
||||
struct sk_buff *frag_iter;
|
||||
|
||||
trace_skb_copy_datagram_iovec(skb, len);
|
||||
@ -407,11 +407,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
if (copy > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
if (copy_to_iter(skb->data + offset, copy, to) != copy)
|
||||
n = copy_to_iter(skb->data + offset, copy, to);
|
||||
offset += n;
|
||||
if (n != copy)
|
||||
goto short_copy;
|
||||
if ((len -= copy) == 0)
|
||||
return 0;
|
||||
offset += copy;
|
||||
}
|
||||
|
||||
/* Copy paged appendix. Hmm... why does this look so complicated? */
|
||||
@ -425,13 +426,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
if (copy_page_to_iter(skb_frag_page(frag),
|
||||
n = copy_page_to_iter(skb_frag_page(frag),
|
||||
frag->page_offset + offset -
|
||||
start, copy, to) != copy)
|
||||
start, copy, to);
|
||||
offset += n;
|
||||
if (n != copy)
|
||||
goto short_copy;
|
||||
if (!(len -= copy))
|
||||
return 0;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
@ -463,6 +465,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
*/
|
||||
|
||||
fault:
|
||||
iov_iter_revert(to, offset - start_off);
|
||||
return -EFAULT;
|
||||
|
||||
short_copy:
|
||||
@ -613,7 +616,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
||||
__wsum *csump)
|
||||
{
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
int i, copy = start - offset, start_off = offset;
|
||||
struct sk_buff *frag_iter;
|
||||
int pos = 0;
|
||||
int n;
|
||||
@ -623,11 +626,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
|
||||
offset += n;
|
||||
if (n != copy)
|
||||
goto fault;
|
||||
if ((len -= copy) == 0)
|
||||
return 0;
|
||||
offset += copy;
|
||||
pos = copy;
|
||||
}
|
||||
|
||||
@ -649,12 +652,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
||||
offset - start, copy,
|
||||
&csum2, to);
|
||||
kunmap(page);
|
||||
offset += n;
|
||||
if (n != copy)
|
||||
goto fault;
|
||||
*csump = csum_block_add(*csump, csum2, pos);
|
||||
if (!(len -= copy))
|
||||
return 0;
|
||||
offset += copy;
|
||||
pos += copy;
|
||||
}
|
||||
start = end;
|
||||
@ -687,6 +690,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
||||
return 0;
|
||||
|
||||
fault:
|
||||
iov_iter_revert(to, offset - start_off);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
@ -771,6 +775,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
|
||||
}
|
||||
return 0;
|
||||
csum_error:
|
||||
iov_iter_revert(&msg->msg_iter, chunk);
|
||||
return -EINVAL;
|
||||
fault:
|
||||
return -EFAULT;
|
||||
|
@ -6757,7 +6757,6 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags)
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_change_xdp_fd);
|
||||
|
||||
/**
|
||||
* dev_new_index - allocate an ifindex
|
||||
|
@ -461,7 +461,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
|
||||
|
||||
clusterip_config_put(cipinfo->config);
|
||||
|
||||
nf_ct_netns_get(par->net, par->family);
|
||||
nf_ct_netns_put(par->net, par->family);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -2620,7 +2620,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
|
||||
ip_hdr(skb)->protocol = IPPROTO_ICMP;
|
||||
ip_hdr(skb)->protocol = IPPROTO_UDP;
|
||||
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
|
||||
|
||||
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
|
||||
|
@ -2322,6 +2322,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
tcp_init_send_head(sk);
|
||||
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
|
||||
__sk_dst_reset(sk);
|
||||
tcp_saved_syn_free(tp);
|
||||
|
||||
/* Clean up fastopen related fields */
|
||||
tcp_free_fastopen_req(tp);
|
||||
|
@ -1935,6 +1935,7 @@ void tcp_enter_loss(struct sock *sk)
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct net *net = sock_net(sk);
|
||||
struct sk_buff *skb;
|
||||
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
|
||||
bool is_reneg; /* is receiver reneging on SACKs? */
|
||||
bool mark_lost;
|
||||
|
||||
@ -1994,15 +1995,18 @@ void tcp_enter_loss(struct sock *sk)
|
||||
tp->high_seq = tp->snd_nxt;
|
||||
tcp_ecn_queue_cwr(tp);
|
||||
|
||||
/* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO
|
||||
* if a previous recovery is underway, otherwise it may incorrectly
|
||||
* call a timeout spurious if some previously retransmitted packets
|
||||
* are s/acked (sec 3.2). We do not apply that retriction since
|
||||
* retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS
|
||||
* so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO
|
||||
* on PTMU discovery to avoid sending new data.
|
||||
/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
|
||||
* loss recovery is underway except recurring timeout(s) on
|
||||
* the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
|
||||
*
|
||||
* In theory F-RTO can be used repeatedly during loss recovery.
|
||||
* In practice this interacts badly with broken middle-boxes that
|
||||
* falsely raise the receive window, which results in repeated
|
||||
* timeouts and stop-and-go behavior.
|
||||
*/
|
||||
tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size;
|
||||
tp->frto = sysctl_tcp_frto &&
|
||||
(new_recovery || icsk->icsk_retransmits) &&
|
||||
!inet_csk(sk)->icsk_mtup.probe_size;
|
||||
}
|
||||
|
||||
/* If ACK arrived pointing to a remembered SACK, it means that our
|
||||
|
@ -2999,6 +2999,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
|
||||
|
||||
/* NOTE: No TCP options attached and we never retransmit this. */
|
||||
skb = alloc_skb(MAX_TCP_HEADER, priority);
|
||||
if (!skb) {
|
||||
@ -3014,8 +3016,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
|
||||
/* Send it off. */
|
||||
if (tcp_transmit_skb(sk, skb, 0, priority))
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
|
||||
|
||||
TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
|
||||
}
|
||||
|
||||
/* Send a crossed SYN-ACK during socket establishment.
|
||||
|
@ -3626,14 +3626,19 @@ static int addrconf_ifdown(struct net_device *dev, int how)
|
||||
INIT_LIST_HEAD(&del_list);
|
||||
list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
|
||||
struct rt6_info *rt = NULL;
|
||||
bool keep;
|
||||
|
||||
addrconf_del_dad_work(ifa);
|
||||
|
||||
keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
|
||||
!addr_is_local(&ifa->addr);
|
||||
if (!keep)
|
||||
list_move(&ifa->if_list, &del_list);
|
||||
|
||||
write_unlock_bh(&idev->lock);
|
||||
spin_lock_bh(&ifa->lock);
|
||||
|
||||
if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
|
||||
!addr_is_local(&ifa->addr)) {
|
||||
if (keep) {
|
||||
/* set state to skip the notifier below */
|
||||
state = INET6_IFADDR_STATE_DEAD;
|
||||
ifa->state = 0;
|
||||
@ -3645,8 +3650,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
|
||||
} else {
|
||||
state = ifa->state;
|
||||
ifa->state = INET6_IFADDR_STATE_DEAD;
|
||||
|
||||
list_move(&ifa->if_list, &del_list);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&ifa->lock);
|
||||
|
@ -1383,8 +1383,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
|
||||
} else
|
||||
err = pppol2tp_session_setsockopt(sk, session, optname, val);
|
||||
|
||||
err = 0;
|
||||
|
||||
end_put_sess:
|
||||
sock_put(sk);
|
||||
end:
|
||||
@ -1507,8 +1505,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
|
||||
|
||||
err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
|
||||
sock_put(ps->tunnel_sock);
|
||||
} else
|
||||
if (err)
|
||||
goto end_put_sess;
|
||||
} else {
|
||||
err = pppol2tp_session_getsockopt(sk, session, optname, &val);
|
||||
if (err)
|
||||
goto end_put_sess;
|
||||
}
|
||||
|
||||
err = -EFAULT;
|
||||
if (put_user(len, optlen))
|
||||
|
@ -57,7 +57,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
|
||||
hlist_del_rcu(&exp->hnode);
|
||||
net->ct.expect_count--;
|
||||
|
||||
hlist_del(&exp->lnode);
|
||||
hlist_del_rcu(&exp->lnode);
|
||||
master_help->expecting[exp->class]--;
|
||||
|
||||
nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
|
||||
@ -363,7 +363,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
|
||||
/* two references : one for hash insert, one for the timer */
|
||||
atomic_add(2, &exp->use);
|
||||
|
||||
hlist_add_head(&exp->lnode, &master_help->expectations);
|
||||
hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
|
||||
master_help->expecting[exp->class]++;
|
||||
|
||||
hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
|
||||
|
@ -158,16 +158,25 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
|
||||
{
|
||||
struct nf_conntrack_helper *h;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
h = __nf_conntrack_helper_find(name, l3num, protonum);
|
||||
#ifdef CONFIG_MODULES
|
||||
if (h == NULL) {
|
||||
if (request_module("nfct-helper-%s", name) == 0)
|
||||
rcu_read_unlock();
|
||||
if (request_module("nfct-helper-%s", name) == 0) {
|
||||
rcu_read_lock();
|
||||
h = __nf_conntrack_helper_find(name, l3num, protonum);
|
||||
} else {
|
||||
return h;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (h != NULL && !try_module_get(h->me))
|
||||
h = NULL;
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return h;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
|
||||
@ -311,38 +320,36 @@ void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
|
||||
|
||||
/* Caller should hold the rcu lock */
|
||||
struct nf_ct_helper_expectfn *
|
||||
nf_ct_helper_expectfn_find_by_name(const char *name)
|
||||
{
|
||||
struct nf_ct_helper_expectfn *cur;
|
||||
bool found = false;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
|
||||
if (!strcmp(cur->name, name)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return found ? cur : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
|
||||
|
||||
/* Caller should hold the rcu lock */
|
||||
struct nf_ct_helper_expectfn *
|
||||
nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
|
||||
{
|
||||
struct nf_ct_helper_expectfn *cur;
|
||||
bool found = false;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
|
||||
if (cur->expectfn == symbol) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return found ? cur : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
|
||||
|
@ -1488,11 +1488,16 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
|
||||
* treat the second attempt as a no-op instead of returning
|
||||
* an error.
|
||||
*/
|
||||
if (help && help->helper &&
|
||||
!strcmp(help->helper->name, helpname))
|
||||
return 0;
|
||||
else
|
||||
return -EBUSY;
|
||||
err = -EBUSY;
|
||||
if (help) {
|
||||
rcu_read_lock();
|
||||
helper = rcu_dereference(help->helper);
|
||||
if (helper && !strcmp(helper->name, helpname))
|
||||
err = 0;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!strcmp(helpname, "")) {
|
||||
@ -1929,9 +1934,9 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
|
||||
|
||||
err = 0;
|
||||
if (test_bit(IPS_EXPECTED_BIT, &ct->status))
|
||||
events = IPCT_RELATED;
|
||||
events = 1 << IPCT_RELATED;
|
||||
else
|
||||
events = IPCT_NEW;
|
||||
events = 1 << IPCT_NEW;
|
||||
|
||||
if (cda[CTA_LABELS] &&
|
||||
ctnetlink_attach_labels(ct, cda) == 0)
|
||||
@ -2675,8 +2680,8 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
last = (struct nf_conntrack_expect *)cb->args[1];
|
||||
for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
|
||||
restart:
|
||||
hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]],
|
||||
hnode) {
|
||||
hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
|
||||
hnode) {
|
||||
if (l3proto && exp->tuple.src.l3num != l3proto)
|
||||
continue;
|
||||
|
||||
@ -2727,7 +2732,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
rcu_read_lock();
|
||||
last = (struct nf_conntrack_expect *)cb->args[1];
|
||||
restart:
|
||||
hlist_for_each_entry(exp, &help->expectations, lnode) {
|
||||
hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
|
||||
if (l3proto && exp->tuple.src.l3num != l3proto)
|
||||
continue;
|
||||
if (cb->args[1]) {
|
||||
@ -2789,6 +2794,12 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
|
||||
return -ENOENT;
|
||||
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
/* No expectation linked to this connection tracking. */
|
||||
if (!nfct_help(ct)) {
|
||||
nf_ct_put(ct);
|
||||
return 0;
|
||||
}
|
||||
|
||||
c.data = ct;
|
||||
|
||||
err = netlink_dump_start(ctnl, skb, nlh, &c);
|
||||
@ -3133,23 +3144,27 @@ ctnetlink_create_expect(struct net *net,
|
||||
return -ENOENT;
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
rcu_read_lock();
|
||||
if (cda[CTA_EXPECT_HELP_NAME]) {
|
||||
const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
|
||||
|
||||
helper = __nf_conntrack_helper_find(helpname, u3,
|
||||
nf_ct_protonum(ct));
|
||||
if (helper == NULL) {
|
||||
rcu_read_unlock();
|
||||
#ifdef CONFIG_MODULES
|
||||
if (request_module("nfct-helper-%s", helpname) < 0) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_ct;
|
||||
}
|
||||
rcu_read_lock();
|
||||
helper = __nf_conntrack_helper_find(helpname, u3,
|
||||
nf_ct_protonum(ct));
|
||||
if (helper) {
|
||||
err = -EAGAIN;
|
||||
goto err_ct;
|
||||
goto err_rcu;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
#endif
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_ct;
|
||||
@ -3159,11 +3174,13 @@ ctnetlink_create_expect(struct net *net,
|
||||
exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
|
||||
if (IS_ERR(exp)) {
|
||||
err = PTR_ERR(exp);
|
||||
goto err_ct;
|
||||
goto err_rcu;
|
||||
}
|
||||
|
||||
err = nf_ct_expect_related_report(exp, portid, report);
|
||||
nf_ct_expect_put(exp);
|
||||
err_rcu:
|
||||
rcu_read_unlock();
|
||||
err_ct:
|
||||
nf_ct_put(ct);
|
||||
return err;
|
||||
|
@ -101,11 +101,13 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
|
||||
rcu_read_lock();
|
||||
idev = __in6_dev_get(skb->dev);
|
||||
if (idev != NULL) {
|
||||
read_lock_bh(&idev->lock);
|
||||
list_for_each_entry(ifa, &idev->addr_list, if_list) {
|
||||
newdst = ifa->addr;
|
||||
addr = true;
|
||||
break;
|
||||
}
|
||||
read_unlock_bh(&idev->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -21,6 +21,7 @@ struct nft_hash {
|
||||
enum nft_registers sreg:8;
|
||||
enum nft_registers dreg:8;
|
||||
u8 len;
|
||||
bool autogen_seed:1;
|
||||
u32 modulus;
|
||||
u32 seed;
|
||||
u32 offset;
|
||||
@ -82,10 +83,12 @@ static int nft_hash_init(const struct nft_ctx *ctx,
|
||||
if (priv->offset + priv->modulus - 1 < priv->offset)
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (tb[NFTA_HASH_SEED])
|
||||
if (tb[NFTA_HASH_SEED]) {
|
||||
priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
|
||||
else
|
||||
} else {
|
||||
priv->autogen_seed = true;
|
||||
get_random_bytes(&priv->seed, sizeof(priv->seed));
|
||||
}
|
||||
|
||||
return nft_validate_register_load(priv->sreg, len) &&
|
||||
nft_validate_register_store(ctx, priv->dreg, NULL,
|
||||
@ -105,7 +108,8 @@ static int nft_hash_dump(struct sk_buff *skb,
|
||||
goto nla_put_failure;
|
||||
if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
|
||||
if (!priv->autogen_seed &&
|
||||
nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
|
||||
goto nla_put_failure;
|
||||
if (priv->offset != 0)
|
||||
if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
|
||||
|
@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
|
||||
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
|
||||
tcp_hdrlen = tcph->doff * 4;
|
||||
|
||||
if (len < tcp_hdrlen)
|
||||
if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
|
||||
return -1;
|
||||
|
||||
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
|
||||
@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
|
||||
if (len > tcp_hdrlen)
|
||||
return 0;
|
||||
|
||||
/* tcph->doff has 4 bits, do not wrap it to 0 */
|
||||
if (tcp_hdrlen >= 15 * 4)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* MSS Option not found ?! add it..
|
||||
*/
|
||||
|
@ -393,7 +393,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
|
||||
|
||||
rcu_read_lock();
|
||||
indev = __in6_dev_get(skb->dev);
|
||||
if (indev)
|
||||
if (indev) {
|
||||
read_lock_bh(&indev->lock);
|
||||
list_for_each_entry(ifa, &indev->addr_list, if_list) {
|
||||
if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
|
||||
continue;
|
||||
@ -401,6 +402,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
|
||||
laddr = &ifa->addr;
|
||||
break;
|
||||
}
|
||||
read_unlock_bh(&indev->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return laddr ? laddr : daddr;
|
||||
|
@ -794,7 +794,7 @@ static void attach_default_qdiscs(struct net_device *dev)
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_NET_SCHED
|
||||
if (dev->qdisc)
|
||||
if (dev->qdisc != &noop_qdisc)
|
||||
qdisc_hash_add(dev->qdisc);
|
||||
#endif
|
||||
}
|
||||
|
@ -7034,6 +7034,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
|
||||
if (sock->state != SS_UNCONNECTED)
|
||||
goto out;
|
||||
|
||||
if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
|
||||
goto out;
|
||||
|
||||
/* If backlog is zero, disable listening. */
|
||||
if (!backlog) {
|
||||
if (sctp_sstate(sk, CLOSED))
|
||||
|
Loading…
Reference in New Issue
Block a user