mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 10:00:51 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Don't leak ipvs->sysctl_tbl, from Tommi Rentala. 2) Fix neighbour table entry leak in rocker driver, from Ying Xue. 3) Do not emit bonding notifications for unregistered interfaces, from Nicolas Dichtel. 4) Set ipv6 flow label properly when in TIME_WAIT state, from Florent Fourcot. 5) Fix regression in ipv6 multicast filter test, from Henning Rogge. 6) do_replace() in various footables netfilter modules is missing a check for 0 counters in the datastructure provided by the user. Fix from Dave Jones, and found with trinity. 7) Fix RCU bug in packet scheduler classifier module unloads, from Daniel Borkmann. 8) Avoid deadlock in tcp_get_info() by using u64_sync. From Eric Dumzaet. 9) Input packet processing can race with inetdev_destroy() teardown, fix potential OOPS in ip_error() by explicitly testing whether the inetdev is still attached. From Eric W Biederman. 10) MLDv2 parser in bridge multicast code breaks too early while parsing. Fix from Thadeu Lima de Souza Cascardo. 11) Asking for settings on non-zero PHYID doesn't work because we do not import the command structure from the user and use the PHYID provided there. Fix from Arun Parameswaran. 12) Fix UDP checksums with IPV6 RAW sockets, from Vlad Yasevich. 13) Missing NF_TABLES depends for TPROXY etc can cause build failures, fix from Florian Westphal. 14) Fix netfilter conntrack to handle RFC5961 challenge ACKs properly, from Jesper Dangaard Brouer. 15) If netlink autobind retry fails, we have to reset the sockets portid back to zero. From Herbert Xu. 16) VXLAN netns exit code unregisters using wrong device, from John W Linville. 17) Add some USB device IDs to ath3k and btusb bluetooth drivers, from Dmitry Tunin and Wen-chien Jesse Sung. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits) bridge: fix lockdep splat net: core: 'ethtool' issue with querying phy settings bridge: fix parsing of MLDv2 reports ARM: zynq: DT: Use the zynq binding with macb net: macb: Disable half duplex gigabit on Zynq net: macb: Document zynq gem dt binding ipv4: fill in table id when replacing a route cdc_ncm: Fix tx_bytes statistics ipv4: Avoid crashing in ip_error tcp: fix a potential deadlock in tcp_get_info() net: sched: fix call_rcu() race on classifier module unloads net: phy: Make sure phy_start() always re-enables the phy interrupts ipv6: fix ECMP route replacement ipv6: do not delete previously existing ECMP routes if add fails Revert "netfilter: bridge: query conntrack about skb dnat" netfilter: ensure number of counters is >0 in do_replace() netfilter: nfnetlink_{log,queue}: Register pernet in first place tcp: don't over-send F-RTO probes tcp: only undo on partial ACKs in CA_Loss net/ipv6/udp: Fix ipv6 multicast socket filter regression ...
This commit is contained in:
commit
0b6280c620
@ -3,7 +3,8 @@
|
||||
Required properties:
|
||||
- compatible: Should be "cdns,[<chip>-]{emac}"
|
||||
Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC.
|
||||
or the generic form: "cdns,emac".
|
||||
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
|
||||
Or the generic form: "cdns,emac".
|
||||
- reg: Address and length of the register set for the device
|
||||
- interrupts: Should contain macb interrupt
|
||||
- phy-mode: see ethernet.txt file in the same directory.
|
||||
|
@ -193,7 +193,7 @@ spi1: spi@e0007000 {
|
||||
};
|
||||
|
||||
gem0: ethernet@e000b000 {
|
||||
compatible = "cdns,gem";
|
||||
compatible = "cdns,zynq-gem";
|
||||
reg = <0xe000b000 0x1000>;
|
||||
status = "disabled";
|
||||
interrupts = <0 22 4>;
|
||||
@ -204,7 +204,7 @@ gem0: ethernet@e000b000 {
|
||||
};
|
||||
|
||||
gem1: ethernet@e000c000 {
|
||||
compatible = "cdns,gem";
|
||||
compatible = "cdns,zynq-gem";
|
||||
reg = <0xe000c000 0x1000>;
|
||||
status = "disabled";
|
||||
interrupts = <0 45 4>;
|
||||
|
@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = {
|
||||
{ USB_DEVICE(0x04CA, 0x3007) },
|
||||
{ USB_DEVICE(0x04CA, 0x3008) },
|
||||
{ USB_DEVICE(0x04CA, 0x300b) },
|
||||
{ USB_DEVICE(0x04CA, 0x300f) },
|
||||
{ USB_DEVICE(0x04CA, 0x3010) },
|
||||
{ USB_DEVICE(0x0930, 0x0219) },
|
||||
{ USB_DEVICE(0x0930, 0x0220) },
|
||||
@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
|
||||
{ USB_DEVICE(0x0cf3, 0xe003) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE004) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE005) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE006) },
|
||||
{ USB_DEVICE(0x13d3, 0x3362) },
|
||||
{ USB_DEVICE(0x13d3, 0x3375) },
|
||||
{ USB_DEVICE(0x13d3, 0x3393) },
|
||||
@ -143,6 +145,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
||||
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
@ -158,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
||||
{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -186,6 +186,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
@ -202,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
|
||||
@ -218,6 +220,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
|
||||
|
||||
/* QCA ROME chipset */
|
||||
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
|
||||
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
|
||||
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
|
||||
|
||||
|
@ -624,7 +624,7 @@ int __bond_opt_set(struct bonding *bond,
|
||||
out:
|
||||
if (ret)
|
||||
bond_opt_error_interpret(bond, opt, ret, val);
|
||||
else
|
||||
else if (bond->dev->reg_state == NETREG_REGISTERED)
|
||||
call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
|
||||
|
||||
return ret;
|
||||
|
@ -350,6 +350,9 @@ static int macb_mii_probe(struct net_device *dev)
|
||||
else
|
||||
phydev->supported &= PHY_BASIC_FEATURES;
|
||||
|
||||
if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
|
||||
phydev->supported &= ~SUPPORTED_1000baseT_Half;
|
||||
|
||||
phydev->advertising = phydev->supported;
|
||||
|
||||
bp->link = 0;
|
||||
@ -1037,6 +1040,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
||||
* add that if/when we get our hands on a full-blown MII PHY.
|
||||
*/
|
||||
|
||||
/* There is a hardware issue under heavy load where DMA can
|
||||
* stop, this causes endless "used buffer descriptor read"
|
||||
* interrupts but it can be cleared by re-enabling RX. See
|
||||
* the at91 manual, section 41.3.1 or the Zynq manual
|
||||
* section 16.7.4 for details.
|
||||
*/
|
||||
if (status & MACB_BIT(RXUBR)) {
|
||||
ctrl = macb_readl(bp, NCR);
|
||||
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
|
||||
@ -2693,6 +2702,14 @@ static const struct macb_config emac_config = {
|
||||
.init = at91ether_init,
|
||||
};
|
||||
|
||||
static const struct macb_config zynq_config = {
|
||||
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
|
||||
MACB_CAPS_NO_GIGABIT_HALF,
|
||||
.dma_burst_length = 16,
|
||||
.clk_init = macb_clk_init,
|
||||
.init = macb_init,
|
||||
};
|
||||
|
||||
static const struct of_device_id macb_dt_ids[] = {
|
||||
{ .compatible = "cdns,at32ap7000-macb" },
|
||||
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
|
||||
@ -2703,6 +2720,7 @@ static const struct of_device_id macb_dt_ids[] = {
|
||||
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
|
||||
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
|
||||
{ .compatible = "cdns,emac", .data = &emac_config },
|
||||
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, macb_dt_ids);
|
||||
|
@ -393,6 +393,7 @@
|
||||
#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
|
||||
#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002
|
||||
#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004
|
||||
#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
|
||||
#define MACB_CAPS_FIFO_MODE 0x10000000
|
||||
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
|
||||
#define MACB_CAPS_SG_DISABLED 0x40000000
|
||||
|
@ -3187,7 +3187,7 @@ int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
|
||||
int cqn = vhcr->in_modifier;
|
||||
struct mlx4_cq_context *cqc = inbox->buf;
|
||||
int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
|
||||
struct res_cq *cq;
|
||||
struct res_cq *cq = NULL;
|
||||
struct res_mtt *mtt;
|
||||
|
||||
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
|
||||
@ -3223,7 +3223,7 @@ int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
|
||||
{
|
||||
int err;
|
||||
int cqn = vhcr->in_modifier;
|
||||
struct res_cq *cq;
|
||||
struct res_cq *cq = NULL;
|
||||
|
||||
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
|
||||
if (err)
|
||||
@ -3362,7 +3362,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
|
||||
int err;
|
||||
int srqn = vhcr->in_modifier;
|
||||
struct res_mtt *mtt;
|
||||
struct res_srq *srq;
|
||||
struct res_srq *srq = NULL;
|
||||
struct mlx4_srq_context *srqc = inbox->buf;
|
||||
int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
|
||||
|
||||
@ -3406,7 +3406,7 @@ int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
|
||||
{
|
||||
int err;
|
||||
int srqn = vhcr->in_modifier;
|
||||
struct res_srq *srq;
|
||||
struct res_srq *srq = NULL;
|
||||
|
||||
err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
|
||||
if (err)
|
||||
|
@ -2921,10 +2921,11 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
|
||||
struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
|
||||
int err = 0;
|
||||
|
||||
if (!n)
|
||||
if (!n) {
|
||||
n = neigh_create(&arp_tbl, &ip_addr, dev);
|
||||
if (!n)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(n))
|
||||
return IS_ERR(n);
|
||||
}
|
||||
|
||||
/* If the neigh is already resolved, then go ahead and
|
||||
* install the entry, otherwise start the ARP process to
|
||||
@ -2936,6 +2937,7 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
|
||||
else
|
||||
neigh_event_send(n, NULL);
|
||||
|
||||
neigh_release(n);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -742,6 +742,9 @@ EXPORT_SYMBOL(phy_stop);
|
||||
*/
|
||||
void phy_start(struct phy_device *phydev)
|
||||
{
|
||||
bool do_resume = false;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
|
||||
switch (phydev->state) {
|
||||
@ -752,11 +755,22 @@ void phy_start(struct phy_device *phydev)
|
||||
phydev->state = PHY_UP;
|
||||
break;
|
||||
case PHY_HALTED:
|
||||
/* make sure interrupts are re-enabled for the PHY */
|
||||
err = phy_enable_interrupts(phydev);
|
||||
if (err < 0)
|
||||
break;
|
||||
|
||||
phydev->state = PHY_RESUMING;
|
||||
do_resume = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
/* if phy was suspended, bring the physical link up again */
|
||||
if (do_resume)
|
||||
phy_resume(phydev);
|
||||
}
|
||||
EXPORT_SYMBOL(phy_start);
|
||||
|
||||
@ -769,7 +783,7 @@ void phy_state_machine(struct work_struct *work)
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct phy_device *phydev =
|
||||
container_of(dwork, struct phy_device, state_queue);
|
||||
bool needs_aneg = false, do_suspend = false, do_resume = false;
|
||||
bool needs_aneg = false, do_suspend = false;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
@ -888,14 +902,6 @@ void phy_state_machine(struct work_struct *work)
|
||||
}
|
||||
break;
|
||||
case PHY_RESUMING:
|
||||
err = phy_clear_interrupt(phydev);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
if (AUTONEG_ENABLE == phydev->autoneg) {
|
||||
err = phy_aneg_done(phydev);
|
||||
if (err < 0)
|
||||
@ -933,7 +939,6 @@ void phy_state_machine(struct work_struct *work)
|
||||
}
|
||||
phydev->adjust_link(phydev->attached_dev);
|
||||
}
|
||||
do_resume = true;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -943,8 +948,6 @@ void phy_state_machine(struct work_struct *work)
|
||||
err = phy_start_aneg(phydev);
|
||||
else if (do_suspend)
|
||||
phy_suspend(phydev);
|
||||
else if (do_resume)
|
||||
phy_resume(phydev);
|
||||
|
||||
if (err < 0)
|
||||
phy_error(phydev);
|
||||
@ -1053,13 +1056,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
|
||||
{
|
||||
/* According to 802.3az,the EEE is supported only in full duplex-mode.
|
||||
* Also EEE feature is active when core is operating with MII, GMII
|
||||
* or RGMII. Internal PHYs are also allowed to proceed and should
|
||||
* return an error if they do not support EEE.
|
||||
* or RGMII (all kinds). Internal PHYs are also allowed to proceed and
|
||||
* should return an error if they do not support EEE.
|
||||
*/
|
||||
if ((phydev->duplex == DUPLEX_FULL) &&
|
||||
((phydev->interface == PHY_INTERFACE_MODE_MII) ||
|
||||
(phydev->interface == PHY_INTERFACE_MODE_GMII) ||
|
||||
(phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
|
||||
(phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
|
||||
phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
|
||||
phy_is_internal(phydev))) {
|
||||
int eee_lp, eee_cap, eee_adv;
|
||||
u32 lp, cap, adv;
|
||||
|
@ -1182,7 +1182,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
|
||||
* payload data instead.
|
||||
*/
|
||||
usbnet_set_skb_tx_stats(skb_out, n,
|
||||
ctx->tx_curr_frame_payload - skb_out->len);
|
||||
(long)ctx->tx_curr_frame_payload - skb_out->len);
|
||||
|
||||
return skb_out;
|
||||
|
||||
|
@ -2961,7 +2961,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
|
||||
* to the list by the previous loop.
|
||||
*/
|
||||
if (!net_eq(dev_net(vxlan->dev), net))
|
||||
unregister_netdevice_queue(dev, &list);
|
||||
unregister_netdevice_queue(vxlan->dev, &list);
|
||||
}
|
||||
|
||||
unregister_netdevice_many(&list);
|
||||
|
@ -17,6 +17,7 @@
|
||||
#ifndef _LINUX_RHASHTABLE_H
|
||||
#define _LINUX_RHASHTABLE_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jhash.h>
|
||||
@ -100,6 +101,7 @@ struct rhashtable;
|
||||
* @key_len: Length of key
|
||||
* @key_offset: Offset of key in struct to be hashed
|
||||
* @head_offset: Offset of rhash_head in struct to be hashed
|
||||
* @insecure_max_entries: Maximum number of entries (may be exceeded)
|
||||
* @max_size: Maximum size while expanding
|
||||
* @min_size: Minimum size while shrinking
|
||||
* @nulls_base: Base value to generate nulls marker
|
||||
@ -115,6 +117,7 @@ struct rhashtable_params {
|
||||
size_t key_len;
|
||||
size_t key_offset;
|
||||
size_t head_offset;
|
||||
unsigned int insecure_max_entries;
|
||||
unsigned int max_size;
|
||||
unsigned int min_size;
|
||||
u32 nulls_base;
|
||||
@ -286,6 +289,18 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht,
|
||||
(!ht->p.max_size || tbl->size < ht->p.max_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* rht_grow_above_max - returns true if table is above maximum
|
||||
* @ht: hash table
|
||||
* @tbl: current table
|
||||
*/
|
||||
static inline bool rht_grow_above_max(const struct rhashtable *ht,
|
||||
const struct bucket_table *tbl)
|
||||
{
|
||||
return ht->p.insecure_max_entries &&
|
||||
atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
|
||||
}
|
||||
|
||||
/* The bucket lock is selected based on the hash and protects mutations
|
||||
* on a group of hash buckets.
|
||||
*
|
||||
@ -589,6 +604,10 @@ static inline int __rhashtable_insert_fast(
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -E2BIG;
|
||||
if (unlikely(rht_grow_above_max(ht, tbl)))
|
||||
goto out;
|
||||
|
||||
if (unlikely(rht_grow_above_100(ht, tbl))) {
|
||||
slow_path:
|
||||
spin_unlock_bh(lock);
|
||||
|
@ -176,6 +176,7 @@ struct nf_bridge_info {
|
||||
struct net_device *physindev;
|
||||
struct net_device *physoutdev;
|
||||
char neigh_header[8];
|
||||
__be32 ipv4_daddr;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -158,6 +158,8 @@ struct tcp_sock {
|
||||
* sum(delta(snd_una)), or how many bytes
|
||||
* were acked.
|
||||
*/
|
||||
struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */
|
||||
|
||||
u32 snd_una; /* First byte we want an ack for */
|
||||
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
|
||||
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
|
||||
|
@ -129,9 +129,10 @@ struct inet_connection_sock {
|
||||
|
||||
u32 probe_timestamp;
|
||||
} icsk_mtup;
|
||||
u32 icsk_ca_priv[16];
|
||||
u32 icsk_user_timeout;
|
||||
#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32))
|
||||
|
||||
u64 icsk_ca_priv[64 / sizeof(u64)];
|
||||
#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64))
|
||||
};
|
||||
|
||||
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
|
||||
|
@ -42,6 +42,9 @@ enum tcp_conntrack {
|
||||
/* The field td_maxack has been set */
|
||||
#define IP_CT_TCP_FLAG_MAXACK_SET 0x20
|
||||
|
||||
/* Marks possibility for expected RFC5961 challenge ACK */
|
||||
#define IP_CT_EXP_CHALLENGE_ACK 0x40
|
||||
|
||||
struct nf_ct_tcp_flags {
|
||||
__u8 flags;
|
||||
__u8 mask;
|
||||
|
@ -337,7 +337,7 @@ struct rtnexthop {
|
||||
#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
|
||||
#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
|
||||
#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
|
||||
#define RTNH_F_EXTERNAL 8 /* Route installed externally */
|
||||
#define RTNH_F_OFFLOAD 8 /* offloaded route */
|
||||
|
||||
/* Macros to handle hexthops */
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/log2.h>
|
||||
@ -446,6 +447,10 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
|
||||
if (key && rhashtable_lookup_fast(ht, key, ht->p))
|
||||
goto exit;
|
||||
|
||||
err = -E2BIG;
|
||||
if (unlikely(rht_grow_above_max(ht, tbl)))
|
||||
goto exit;
|
||||
|
||||
err = -EAGAIN;
|
||||
if (rhashtable_check_elasticity(ht, tbl, hash) ||
|
||||
rht_grow_above_100(ht, tbl))
|
||||
@ -738,6 +743,12 @@ int rhashtable_init(struct rhashtable *ht,
|
||||
if (params->max_size)
|
||||
ht->p.max_size = rounddown_pow_of_two(params->max_size);
|
||||
|
||||
if (params->insecure_max_entries)
|
||||
ht->p.insecure_max_entries =
|
||||
rounddown_pow_of_two(params->insecure_max_entries);
|
||||
else
|
||||
ht->p.insecure_max_entries = ht->p.max_size * 2;
|
||||
|
||||
ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
|
||||
|
||||
/* The maximum (not average) chain length grows with the
|
||||
|
@ -443,7 +443,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
||||
case NETDEV_UP:
|
||||
/* Put all VLANs for this dev in the up state too. */
|
||||
vlan_group_for_each_dev(grp, i, vlandev) {
|
||||
flgs = vlandev->flags;
|
||||
flgs = dev_get_flags(vlandev);
|
||||
if (flgs & IFF_UP)
|
||||
continue;
|
||||
|
||||
|
@ -2854,9 +2854,11 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
|
||||
* state. If we were running both LE and BR/EDR inquiry
|
||||
* simultaneously, and BR/EDR inquiry is already
|
||||
* finished, stop discovery, otherwise BR/EDR inquiry
|
||||
* will stop discovery when finished.
|
||||
* will stop discovery when finished. If we will resolve
|
||||
* remote device name, do not change discovery state.
|
||||
*/
|
||||
if (!test_bit(HCI_INQUIRY, &hdev->flags))
|
||||
if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
|
||||
hdev->discovery.state != DISCOVERY_RESOLVING)
|
||||
hci_discovery_set_state(hdev,
|
||||
DISCOVERY_STOPPED);
|
||||
} else {
|
||||
|
@ -1072,7 +1072,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
|
||||
|
||||
err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
|
||||
vid);
|
||||
if (!err)
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -37,10 +37,6 @@
|
||||
#include <net/route.h>
|
||||
#include <net/netfilter/br_netfilter.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
#endif
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include "br_private.h"
|
||||
#ifdef CONFIG_SYSCTL
|
||||
@ -350,24 +346,15 @@ static int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool dnat_took_place(const struct sk_buff *skb)
|
||||
static bool daddr_was_changed(const struct sk_buff *skb,
|
||||
const struct nf_bridge_info *nf_bridge)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
if (!ct || nf_ct_is_untracked(ct))
|
||||
return false;
|
||||
|
||||
return test_bit(IPS_DST_NAT_BIT, &ct->status);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
|
||||
}
|
||||
|
||||
/* This requires some explaining. If DNAT has taken place,
|
||||
* we will need to fix up the destination Ethernet address.
|
||||
* This is also true when SNAT takes place (for the reply direction).
|
||||
*
|
||||
* There are two cases to consider:
|
||||
* 1. The packet was DNAT'ed to a device in the same bridge
|
||||
@ -421,7 +408,7 @@ static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
|
||||
nf_bridge->pkt_otherhost = false;
|
||||
}
|
||||
nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
|
||||
if (dnat_took_place(skb)) {
|
||||
if (daddr_was_changed(skb, nf_bridge)) {
|
||||
if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
|
||||
struct in_device *in_dev = __in_dev_get_rcu(dev);
|
||||
|
||||
@ -632,6 +619,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
|
||||
struct sk_buff *skb,
|
||||
const struct nf_hook_state *state)
|
||||
{
|
||||
struct nf_bridge_info *nf_bridge;
|
||||
struct net_bridge_port *p;
|
||||
struct net_bridge *br;
|
||||
__u32 len = nf_bridge_encap_header_len(skb);
|
||||
@ -669,6 +657,9 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
|
||||
if (!setup_pre_routing(skb))
|
||||
return NF_DROP;
|
||||
|
||||
nf_bridge = nf_bridge_info_get(skb);
|
||||
nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
|
||||
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb,
|
||||
|
@ -97,7 +97,9 @@ static void br_forward_delay_timer_expired(unsigned long arg)
|
||||
netif_carrier_on(br->dev);
|
||||
}
|
||||
br_log_state(p);
|
||||
rcu_read_lock();
|
||||
br_ifinfo_notify(RTM_NEWLINK, p);
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&br->lock);
|
||||
}
|
||||
|
||||
|
@ -1117,6 +1117,8 @@ static int do_replace(struct net *net, const void __user *user,
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters == 0)
|
||||
return -EINVAL;
|
||||
|
||||
tmp.name[sizeof(tmp.name) - 1] = 0;
|
||||
|
||||
@ -2159,6 +2161,8 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters == 0)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
|
||||
|
||||
|
@ -359,7 +359,15 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
|
||||
int err;
|
||||
struct ethtool_cmd cmd;
|
||||
|
||||
err = __ethtool_get_settings(dev, &cmd);
|
||||
if (!dev->ethtool_ops->get_settings)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
cmd.cmd = ETHTOOL_GSET;
|
||||
|
||||
err = dev->ethtool_ops->get_settings(dev, &cmd);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -2416,6 +2416,9 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (dev->reg_state != NETREG_REGISTERED)
|
||||
return;
|
||||
|
||||
skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
|
||||
if (skb)
|
||||
rtmsg_ifinfo_send(skb, dev, flags);
|
||||
|
@ -1164,6 +1164,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
|
||||
state = fa->fa_state;
|
||||
new_fa->fa_state = state & ~FA_S_ACCESSED;
|
||||
new_fa->fa_slen = fa->fa_slen;
|
||||
new_fa->tb_id = tb->tb_id;
|
||||
|
||||
err = netdev_switch_fib_ipv4_add(key, plen, fi,
|
||||
new_fa->fa_tos,
|
||||
@ -1764,7 +1765,7 @@ void fib_table_flush_external(struct fib_table *tb)
|
||||
/* record local slen */
|
||||
slen = fa->fa_slen;
|
||||
|
||||
if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL))
|
||||
if (!fi || !(fi->fib_flags & RTNH_F_OFFLOAD))
|
||||
continue;
|
||||
|
||||
netdev_switch_fib_ipv4_del(n->key,
|
||||
|
@ -1075,6 +1075,9 @@ static int do_replace(struct net *net, const void __user *user,
|
||||
/* overflow check */
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters == 0)
|
||||
return -EINVAL;
|
||||
|
||||
tmp.name[sizeof(tmp.name)-1] = 0;
|
||||
|
||||
newinfo = xt_alloc_table_info(tmp.size);
|
||||
@ -1499,6 +1502,9 @@ static int compat_do_replace(struct net *net, void __user *user,
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters == 0)
|
||||
return -EINVAL;
|
||||
|
||||
tmp.name[sizeof(tmp.name)-1] = 0;
|
||||
|
||||
newinfo = xt_alloc_table_info(tmp.size);
|
||||
|
@ -1262,6 +1262,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
|
||||
/* overflow check */
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters == 0)
|
||||
return -EINVAL;
|
||||
|
||||
tmp.name[sizeof(tmp.name)-1] = 0;
|
||||
|
||||
newinfo = xt_alloc_table_info(tmp.size);
|
||||
@ -1809,6 +1812,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters == 0)
|
||||
return -EINVAL;
|
||||
|
||||
tmp.name[sizeof(tmp.name)-1] = 0;
|
||||
|
||||
newinfo = xt_alloc_table_info(tmp.size);
|
||||
|
@ -902,6 +902,10 @@ static int ip_error(struct sk_buff *skb)
|
||||
bool send;
|
||||
int code;
|
||||
|
||||
/* IP on this device is disabled. */
|
||||
if (!in_dev)
|
||||
goto out;
|
||||
|
||||
net = dev_net(rt->dst.dev);
|
||||
if (!IN_DEV_FORWARD(in_dev)) {
|
||||
switch (rt->dst.error) {
|
||||
|
@ -402,6 +402,7 @@ void tcp_init_sock(struct sock *sk)
|
||||
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||
tp->snd_cwnd_clamp = ~0;
|
||||
tp->mss_cache = TCP_MSS_DEFAULT;
|
||||
u64_stats_init(&tp->syncp);
|
||||
|
||||
tp->reordering = sysctl_tcp_reordering;
|
||||
tcp_enable_early_retrans(tp);
|
||||
@ -2598,6 +2599,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
u32 now = tcp_time_stamp;
|
||||
unsigned int start;
|
||||
u32 rate;
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
@ -2665,10 +2667,11 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
||||
rate = READ_ONCE(sk->sk_max_pacing_rate);
|
||||
info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
|
||||
|
||||
spin_lock_bh(&sk->sk_lock.slock);
|
||||
info->tcpi_bytes_acked = tp->bytes_acked;
|
||||
info->tcpi_bytes_received = tp->bytes_received;
|
||||
spin_unlock_bh(&sk->sk_lock.slock);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&tp->syncp);
|
||||
info->tcpi_bytes_acked = tp->bytes_acked;
|
||||
info->tcpi_bytes_received = tp->bytes_received;
|
||||
} while (u64_stats_fetch_retry_irq(&tp->syncp, start));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tcp_get_info);
|
||||
|
||||
|
@ -206,6 +206,10 @@ static bool tcp_fastopen_create_child(struct sock *sk,
|
||||
skb_set_owner_r(skb2, child);
|
||||
__skb_queue_tail(&child->sk_receive_queue, skb2);
|
||||
tp->syn_data_acked = 1;
|
||||
|
||||
/* u64_stats_update_begin(&tp->syncp) not needed here,
|
||||
* as we certainly are not changing upper 32bit value (0)
|
||||
*/
|
||||
tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
|
||||
} else {
|
||||
end_seq = TCP_SKB_CB(skb)->seq + 1;
|
||||
|
@ -2698,16 +2698,21 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
bool recovered = !before(tp->snd_una, tp->high_seq);
|
||||
|
||||
if ((flag & FLAG_SND_UNA_ADVANCED) &&
|
||||
tcp_try_undo_loss(sk, false))
|
||||
return;
|
||||
|
||||
if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
|
||||
/* Step 3.b. A timeout is spurious if not all data are
|
||||
* lost, i.e., never-retransmitted data are (s)acked.
|
||||
*/
|
||||
if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED))
|
||||
if ((flag & FLAG_ORIG_SACK_ACKED) &&
|
||||
tcp_try_undo_loss(sk, true))
|
||||
return;
|
||||
|
||||
if (after(tp->snd_nxt, tp->high_seq) &&
|
||||
(flag & FLAG_DATA_SACKED || is_dupack)) {
|
||||
tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
|
||||
if (after(tp->snd_nxt, tp->high_seq)) {
|
||||
if (flag & FLAG_DATA_SACKED || is_dupack)
|
||||
tp->frto = 0; /* Step 3.a. loss was real */
|
||||
} else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
|
||||
tp->high_seq = tp->snd_nxt;
|
||||
__tcp_push_pending_frames(sk, tcp_current_mss(sk),
|
||||
@ -2732,8 +2737,6 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
|
||||
else if (flag & FLAG_SND_UNA_ADVANCED)
|
||||
tcp_reset_reno_sack(tp);
|
||||
}
|
||||
if (tcp_try_undo_loss(sk, false))
|
||||
return;
|
||||
tcp_xmit_retransmit_queue(sk);
|
||||
}
|
||||
|
||||
@ -3283,7 +3286,9 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
|
||||
{
|
||||
u32 delta = ack - tp->snd_una;
|
||||
|
||||
u64_stats_update_begin(&tp->syncp);
|
||||
tp->bytes_acked += delta;
|
||||
u64_stats_update_end(&tp->syncp);
|
||||
tp->snd_una = ack;
|
||||
}
|
||||
|
||||
@ -3292,7 +3297,9 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
|
||||
{
|
||||
u32 delta = seq - tp->rcv_nxt;
|
||||
|
||||
u64_stats_update_begin(&tp->syncp);
|
||||
tp->bytes_received += delta;
|
||||
u64_stats_update_end(&tp->syncp);
|
||||
tp->rcv_nxt = seq;
|
||||
}
|
||||
|
||||
|
@ -300,7 +300,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
|
||||
tw->tw_v6_daddr = sk->sk_v6_daddr;
|
||||
tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
|
||||
tw->tw_tclass = np->tclass;
|
||||
tw->tw_flowlabel = np->flow_label >> 12;
|
||||
tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
|
||||
tw->tw_ipv6only = sk->sk_ipv6only;
|
||||
}
|
||||
#endif
|
||||
|
@ -693,6 +693,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
||||
{
|
||||
struct rt6_info *iter = NULL;
|
||||
struct rt6_info **ins;
|
||||
struct rt6_info **fallback_ins = NULL;
|
||||
int replace = (info->nlh &&
|
||||
(info->nlh->nlmsg_flags & NLM_F_REPLACE));
|
||||
int add = (!info->nlh ||
|
||||
@ -716,8 +717,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
||||
(info->nlh->nlmsg_flags & NLM_F_EXCL))
|
||||
return -EEXIST;
|
||||
if (replace) {
|
||||
found++;
|
||||
break;
|
||||
if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
|
||||
found++;
|
||||
break;
|
||||
}
|
||||
if (rt_can_ecmp)
|
||||
fallback_ins = fallback_ins ?: ins;
|
||||
goto next_iter;
|
||||
}
|
||||
|
||||
if (iter->dst.dev == rt->dst.dev &&
|
||||
@ -753,9 +759,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
||||
if (iter->rt6i_metric > rt->rt6i_metric)
|
||||
break;
|
||||
|
||||
next_iter:
|
||||
ins = &iter->dst.rt6_next;
|
||||
}
|
||||
|
||||
if (fallback_ins && !found) {
|
||||
/* No ECMP-able route found, replace first non-ECMP one */
|
||||
ins = fallback_ins;
|
||||
iter = *ins;
|
||||
found++;
|
||||
}
|
||||
|
||||
/* Reset round-robin state, if necessary */
|
||||
if (ins == &fn->leaf)
|
||||
fn->rr_ptr = NULL;
|
||||
@ -815,6 +829,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
||||
}
|
||||
|
||||
} else {
|
||||
int nsiblings;
|
||||
|
||||
if (!found) {
|
||||
if (add)
|
||||
goto add;
|
||||
@ -835,8 +851,27 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
||||
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
|
||||
fn->fn_flags |= RTN_RTINFO;
|
||||
}
|
||||
nsiblings = iter->rt6i_nsiblings;
|
||||
fib6_purge_rt(iter, fn, info->nl_net);
|
||||
rt6_release(iter);
|
||||
|
||||
if (nsiblings) {
|
||||
/* Replacing an ECMP route, remove all siblings */
|
||||
ins = &rt->dst.rt6_next;
|
||||
iter = *ins;
|
||||
while (iter) {
|
||||
if (rt6_qualify_for_ecmp(iter)) {
|
||||
*ins = iter->dst.rt6_next;
|
||||
fib6_purge_rt(iter, fn, info->nl_net);
|
||||
rt6_release(iter);
|
||||
nsiblings--;
|
||||
} else {
|
||||
ins = &iter->dst.rt6_next;
|
||||
}
|
||||
iter = *ins;
|
||||
}
|
||||
WARN_ON(nsiblings != 0);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1300,8 +1300,10 @@ static int __ip6_append_data(struct sock *sk,
|
||||
|
||||
/* If this is the first and only packet and device
|
||||
* supports checksum offloading, let's use it.
|
||||
* Use transhdrlen, same as IPv4, because partial
|
||||
* sums only work when transhdrlen is set.
|
||||
*/
|
||||
if (!skb && sk->sk_protocol == IPPROTO_UDP &&
|
||||
if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
|
||||
length + fragheaderlen < mtu &&
|
||||
rt->dst.dev->features & NETIF_F_V6_CSUM &&
|
||||
!exthdrlen)
|
||||
|
@ -1275,6 +1275,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
|
||||
/* overflow check */
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters == 0)
|
||||
return -EINVAL;
|
||||
|
||||
tmp.name[sizeof(tmp.name)-1] = 0;
|
||||
|
||||
newinfo = xt_alloc_table_info(tmp.size);
|
||||
@ -1822,6 +1825,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters == 0)
|
||||
return -EINVAL;
|
||||
|
||||
tmp.name[sizeof(tmp.name)-1] = 0;
|
||||
|
||||
newinfo = xt_alloc_table_info(tmp.size);
|
||||
|
@ -2504,9 +2504,9 @@ static int ip6_route_multipath(struct fib6_config *cfg, int add)
|
||||
int attrlen;
|
||||
int err = 0, last_err = 0;
|
||||
|
||||
remaining = cfg->fc_mp_len;
|
||||
beginning:
|
||||
rtnh = (struct rtnexthop *)cfg->fc_mp;
|
||||
remaining = cfg->fc_mp_len;
|
||||
|
||||
/* Parse a Multipath Entry */
|
||||
while (rtnh_ok(rtnh, remaining)) {
|
||||
@ -2536,15 +2536,19 @@ static int ip6_route_multipath(struct fib6_config *cfg, int add)
|
||||
* next hops that have been already added.
|
||||
*/
|
||||
add = 0;
|
||||
remaining = cfg->fc_mp_len - remaining;
|
||||
goto beginning;
|
||||
}
|
||||
}
|
||||
/* Because each route is added like a single route we remove
|
||||
* this flag after the first nexthop (if there is a collision,
|
||||
* we have already fail to add the first nexthop:
|
||||
* fib6_add_rt2node() has reject it).
|
||||
* these flags after the first nexthop: if there is a collision,
|
||||
* we have already failed to add the first nexthop:
|
||||
* fib6_add_rt2node() has rejected it; when replacing, old
|
||||
* nexthops have been replaced by first new, the rest should
|
||||
* be added to it.
|
||||
*/
|
||||
cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
|
||||
cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
|
||||
NLM_F_REPLACE);
|
||||
rtnh = rtnh_next(rtnh, &remaining);
|
||||
}
|
||||
|
||||
|
@ -914,7 +914,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
|
||||
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
|
||||
tcp_time_stamp + tcptw->tw_ts_offset,
|
||||
tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
|
||||
tw->tw_tclass, (tw->tw_flowlabel << 12));
|
||||
tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
|
||||
|
||||
inet_twsk_put(tw);
|
||||
}
|
||||
|
@ -731,7 +731,9 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
|
||||
(inet->inet_dport && inet->inet_dport != rmt_port) ||
|
||||
(!ipv6_addr_any(&sk->sk_v6_daddr) &&
|
||||
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
|
||||
(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
|
||||
(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
|
||||
(!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
|
||||
!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
|
||||
return false;
|
||||
if (!inet6_mc_check(sk, loc_addr, rmt_addr))
|
||||
return false;
|
||||
|
@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
|
||||
|
||||
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
|
||||
|
||||
if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN ||
|
||||
skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
|
||||
if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
|
||||
return NULL;
|
||||
|
||||
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
@ -167,6 +166,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
|
||||
size_t len;
|
||||
u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
|
||||
|
||||
if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
|
||||
return -1;
|
||||
|
||||
iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
|
||||
if (!iv)
|
||||
return -1;
|
||||
|
@ -863,6 +863,7 @@ config NETFILTER_XT_TARGET_TPROXY
|
||||
depends on NETFILTER_XTABLES
|
||||
depends on NETFILTER_ADVANCED
|
||||
depends on (IPV6 || IPV6=n)
|
||||
depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
|
||||
depends on IP_NF_MANGLE
|
||||
select NF_DEFRAG_IPV4
|
||||
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
|
||||
@ -1356,6 +1357,7 @@ config NETFILTER_XT_MATCH_SOCKET
|
||||
depends on NETFILTER_ADVANCED
|
||||
depends on !NF_CONNTRACK || NF_CONNTRACK
|
||||
depends on (IPV6 || IPV6=n)
|
||||
depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
|
||||
select NF_DEFRAG_IPV4
|
||||
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
|
||||
help
|
||||
|
@ -3823,6 +3823,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
|
||||
cancel_work_sync(&ipvs->defense_work.work);
|
||||
unregister_net_sysctl_table(ipvs->sysctl_hdr);
|
||||
ip_vs_stop_estimator(net, &ipvs->tot_stats);
|
||||
|
||||
if (!net_eq(net, &init_net))
|
||||
kfree(ipvs->sysctl_tbl);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -202,7 +202,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
|
||||
* sES -> sES :-)
|
||||
* sFW -> sCW Normal close request answered by ACK.
|
||||
* sCW -> sCW
|
||||
* sLA -> sTW Last ACK detected.
|
||||
* sLA -> sTW Last ACK detected (RFC5961 challenged)
|
||||
* sTW -> sTW Retransmitted last ACK. Remain in the same state.
|
||||
* sCL -> sCL
|
||||
*/
|
||||
@ -261,7 +261,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
|
||||
* sES -> sES :-)
|
||||
* sFW -> sCW Normal close request answered by ACK.
|
||||
* sCW -> sCW
|
||||
* sLA -> sTW Last ACK detected.
|
||||
* sLA -> sTW Last ACK detected (RFC5961 challenged)
|
||||
* sTW -> sTW Retransmitted last ACK.
|
||||
* sCL -> sCL
|
||||
*/
|
||||
@ -906,6 +906,7 @@ static int tcp_packet(struct nf_conn *ct,
|
||||
1 : ct->proto.tcp.last_win;
|
||||
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
|
||||
ct->proto.tcp.last_wscale;
|
||||
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
|
||||
ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
|
||||
ct->proto.tcp.last_flags;
|
||||
memset(&ct->proto.tcp.seen[dir], 0,
|
||||
@ -923,7 +924,9 @@ static int tcp_packet(struct nf_conn *ct,
|
||||
* may be in sync but we are not. In that case, we annotate
|
||||
* the TCP options and let the packet go through. If it is a
|
||||
* valid SYN packet, the server will reply with a SYN/ACK, and
|
||||
* then we'll get in sync. Otherwise, the server ignores it. */
|
||||
* then we'll get in sync. Otherwise, the server potentially
|
||||
* responds with a challenge ACK if implementing RFC5961.
|
||||
*/
|
||||
if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
|
||||
struct ip_ct_tcp_state seen = {};
|
||||
|
||||
@ -939,6 +942,13 @@ static int tcp_packet(struct nf_conn *ct,
|
||||
ct->proto.tcp.last_flags |=
|
||||
IP_CT_TCP_FLAG_SACK_PERM;
|
||||
}
|
||||
/* Mark the potential for RFC5961 challenge ACK,
|
||||
* this pose a special problem for LAST_ACK state
|
||||
* as ACK is intrepretated as ACKing last FIN.
|
||||
*/
|
||||
if (old_state == TCP_CONNTRACK_LAST_ACK)
|
||||
ct->proto.tcp.last_flags |=
|
||||
IP_CT_EXP_CHALLENGE_ACK;
|
||||
}
|
||||
spin_unlock_bh(&ct->lock);
|
||||
if (LOG_INVALID(net, IPPROTO_TCP))
|
||||
@ -970,6 +980,25 @@ static int tcp_packet(struct nf_conn *ct,
|
||||
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
||||
"nf_ct_tcp: invalid state ");
|
||||
return -NF_ACCEPT;
|
||||
case TCP_CONNTRACK_TIME_WAIT:
|
||||
/* RFC5961 compliance cause stack to send "challenge-ACK"
|
||||
* e.g. in response to spurious SYNs. Conntrack MUST
|
||||
* not believe this ACK is acking last FIN.
|
||||
*/
|
||||
if (old_state == TCP_CONNTRACK_LAST_ACK &&
|
||||
index == TCP_ACK_SET &&
|
||||
ct->proto.tcp.last_dir != dir &&
|
||||
ct->proto.tcp.last_index == TCP_SYN_SET &&
|
||||
(ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
|
||||
/* Detected RFC5961 challenge ACK */
|
||||
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
|
||||
spin_unlock_bh(&ct->lock);
|
||||
if (LOG_INVALID(net, IPPROTO_TCP))
|
||||
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
||||
"nf_ct_tcp: challenge-ACK ignored ");
|
||||
return NF_ACCEPT; /* Don't change state */
|
||||
}
|
||||
break;
|
||||
case TCP_CONNTRACK_CLOSE:
|
||||
if (index == TCP_RST_SET
|
||||
&& (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
|
||||
|
@ -4472,9 +4472,9 @@ EXPORT_SYMBOL_GPL(nft_data_init);
|
||||
*/
|
||||
void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
|
||||
{
|
||||
switch (type) {
|
||||
case NFT_DATA_VALUE:
|
||||
if (type < NFT_DATA_VERDICT)
|
||||
return;
|
||||
switch (type) {
|
||||
case NFT_DATA_VERDICT:
|
||||
return nft_verdict_uninit(data);
|
||||
default:
|
||||
|
@ -1073,7 +1073,13 @@ static struct pernet_operations nfnl_log_net_ops = {
|
||||
|
||||
static int __init nfnetlink_log_init(void)
|
||||
{
|
||||
int status = -ENOMEM;
|
||||
int status;
|
||||
|
||||
status = register_pernet_subsys(&nfnl_log_net_ops);
|
||||
if (status < 0) {
|
||||
pr_err("failed to register pernet ops\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
netlink_register_notifier(&nfulnl_rtnl_notifier);
|
||||
status = nfnetlink_subsys_register(&nfulnl_subsys);
|
||||
@ -1088,28 +1094,23 @@ static int __init nfnetlink_log_init(void)
|
||||
goto cleanup_subsys;
|
||||
}
|
||||
|
||||
status = register_pernet_subsys(&nfnl_log_net_ops);
|
||||
if (status < 0) {
|
||||
pr_err("failed to register pernet ops\n");
|
||||
goto cleanup_logger;
|
||||
}
|
||||
return status;
|
||||
|
||||
cleanup_logger:
|
||||
nf_log_unregister(&nfulnl_logger);
|
||||
cleanup_subsys:
|
||||
nfnetlink_subsys_unregister(&nfulnl_subsys);
|
||||
cleanup_netlink_notifier:
|
||||
netlink_unregister_notifier(&nfulnl_rtnl_notifier);
|
||||
unregister_pernet_subsys(&nfnl_log_net_ops);
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __exit nfnetlink_log_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&nfnl_log_net_ops);
|
||||
nf_log_unregister(&nfulnl_logger);
|
||||
nfnetlink_subsys_unregister(&nfulnl_subsys);
|
||||
netlink_unregister_notifier(&nfulnl_rtnl_notifier);
|
||||
unregister_pernet_subsys(&nfnl_log_net_ops);
|
||||
}
|
||||
|
||||
MODULE_DESCRIPTION("netfilter userspace logging");
|
||||
|
@ -1317,7 +1317,13 @@ static struct pernet_operations nfnl_queue_net_ops = {
|
||||
|
||||
static int __init nfnetlink_queue_init(void)
|
||||
{
|
||||
int status = -ENOMEM;
|
||||
int status;
|
||||
|
||||
status = register_pernet_subsys(&nfnl_queue_net_ops);
|
||||
if (status < 0) {
|
||||
pr_err("nf_queue: failed to register pernet ops\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
netlink_register_notifier(&nfqnl_rtnl_notifier);
|
||||
status = nfnetlink_subsys_register(&nfqnl_subsys);
|
||||
@ -1326,19 +1332,13 @@ static int __init nfnetlink_queue_init(void)
|
||||
goto cleanup_netlink_notifier;
|
||||
}
|
||||
|
||||
status = register_pernet_subsys(&nfnl_queue_net_ops);
|
||||
if (status < 0) {
|
||||
pr_err("nf_queue: failed to register pernet ops\n");
|
||||
goto cleanup_subsys;
|
||||
}
|
||||
register_netdevice_notifier(&nfqnl_dev_notifier);
|
||||
nf_register_queue_handler(&nfqh);
|
||||
return status;
|
||||
|
||||
cleanup_subsys:
|
||||
nfnetlink_subsys_unregister(&nfqnl_subsys);
|
||||
cleanup_netlink_notifier:
|
||||
netlink_unregister_notifier(&nfqnl_rtnl_notifier);
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -1346,9 +1346,9 @@ static void __exit nfnetlink_queue_fini(void)
|
||||
{
|
||||
nf_unregister_queue_handler();
|
||||
unregister_netdevice_notifier(&nfqnl_dev_notifier);
|
||||
unregister_pernet_subsys(&nfnl_queue_net_ops);
|
||||
nfnetlink_subsys_unregister(&nfqnl_subsys);
|
||||
netlink_unregister_notifier(&nfqnl_rtnl_notifier);
|
||||
unregister_pernet_subsys(&nfnl_queue_net_ops);
|
||||
|
||||
rcu_barrier(); /* Wait for completion of call_rcu()'s */
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ static inline int netlink_is_kernel(struct sock *sk)
|
||||
return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
|
||||
}
|
||||
|
||||
struct netlink_table *nl_table;
|
||||
struct netlink_table *nl_table __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nl_table);
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
|
||||
@ -1081,6 +1081,7 @@ static int netlink_insert(struct sock *sk, u32 portid)
|
||||
if (err) {
|
||||
if (err == -EEXIST)
|
||||
err = -EADDRINUSE;
|
||||
nlk_sk(sk)->portid = 0;
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
|
@ -81,6 +81,11 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
|
||||
struct tcf_proto_ops *t;
|
||||
int rc = -ENOENT;
|
||||
|
||||
/* Wait for outstanding call_rcu()s, if any, from a
|
||||
* tcf_proto_ops's destroy() handler.
|
||||
*/
|
||||
rcu_barrier();
|
||||
|
||||
write_lock(&cls_mod_lock);
|
||||
list_for_each_entry(t, &tcf_proto_base, head) {
|
||||
if (t == ops) {
|
||||
|
@ -338,7 +338,7 @@ int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
|
||||
fi, tos, type, nlflags,
|
||||
tb_id);
|
||||
if (!err)
|
||||
fi->fib_flags |= RTNH_F_EXTERNAL;
|
||||
fi->fib_flags |= RTNH_F_OFFLOAD;
|
||||
}
|
||||
|
||||
return err;
|
||||
@ -364,7 +364,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
|
||||
const struct swdev_ops *ops;
|
||||
int err = 0;
|
||||
|
||||
if (!(fi->fib_flags & RTNH_F_EXTERNAL))
|
||||
if (!(fi->fib_flags & RTNH_F_OFFLOAD))
|
||||
return 0;
|
||||
|
||||
dev = netdev_switch_get_dev_by_nhs(fi);
|
||||
@ -376,7 +376,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
|
||||
err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len,
|
||||
fi, tos, type, tb_id);
|
||||
if (!err)
|
||||
fi->fib_flags &= ~RTNH_F_EXTERNAL;
|
||||
fi->fib_flags &= ~RTNH_F_OFFLOAD;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
Loading…
Reference in New Issue
Block a user