mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 14:51:00 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Always increment IPV4 ID field in encapsulated GSO packets, even when DF is set. Regression fix from Pravin B Shelar. 2) Fix per-net subsystem initialization in netfilter conntrack, otherwise we may access dynamically allocated memory before it is actually allocated. From Gao Feng. 3) Fix DMA buffer lengths in iwl3945 driver, from Stanislaw Gruszka. 4) Fix race between submission of sync vs async commands in mwifiex driver, from Amitkumar Karwar. 5) Add missing cancel of command timer in mwifiex driver, from Bing Zhao. 6) Missing SKB free in rtlwifi USB driver, from Jussi Kivilinna. 7) Thermal layer tries to use a genetlink multicast string that is longer than the 16 character limit. Fix it and add a BUG check to prevent this kind of thing from happening in the future. From Masatake YAMATO. 8) Fix many bugs in the handling of the teardown of L2TP connections, UDP encapsulation instances, and sockets. From Tom Parkin. 9) Missing socket release in IRDA, from Kees Cook. 10) Fix fec driver modular build, from Fabio Estevam. 11) Erroneous use of kfree() instead of free_netdev() in lantiq_etop, from Wei Yongjun. 12) Fix bugs in handling of queue numbers and steering rules in mlx4 driver, from Moshe Lazer, Hadar Hen Zion, and Or Gerlitz. 13) Some FOO_DIAG_MAX constants were defined off by one, fix from Andrey Vagin. 14) TCP segmentation deferral is unintentionally done too strongly, breaking ACK clocking. Fix from Eric Dumazet. 15) net_enable_timestamp() can legitimately be invoked from software interrupts, and in a way that is safe, so remove the WARN_ON(). Also from Eric Dumazet. 16) Fix use after free in VLANs, from Cong Wang. 17) Fix TCP slow start retransmit storms after SACK reneging, from Yuchung Cheng. 18) Unix socket release should mark a socket dead before NULL'ing out sock->sk, otherwise we can race. Fix from Paul Moore. 19) IPV6 addrconf code can try to free static memory, from Hong Zhiguo. 20) Fix register mis-programming, NULL pointer derefs, and wrong PHC clock frequency in IGB driver. From Lior LevyAlex Williamson, Jiri Benc, and Jeff Kirsher. 21) skb->ip_summed logic in pch_gbe driver is reversed, breaking packet forwarding. Fix from Veaceslav Falico. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (65 commits) ipv4: Fix ip-header identification for gso packets. bonding: remove already created master sysfs link on failure af_unix: dont send SCM_CREDENTIAL when dest socket is NULL pch_gbe: fix ip_summed checksum reporting on rx igb: fix PHC stopping on max freq igb: make sensor info static igb: SR-IOV init reordering igb: Fix null pointer dereference igb: fix i350 anti spoofing config ixgbevf: don't release the soft entries ipv6: fix bad free of addrconf_init_net unix: fix a race condition in unix_release() tcp: undo spurious timeout after SACK reneging bnx2x: fix assignment of signed expression to unsigned variable bridge: fix crash when set mac address of br interface 8021q: fix a potential use-after-free net: remove a WARN_ON() in net_enable_timestamp() tcp: preserve ACK clocking in TSO net: fix *_DIAG_MAX constants net/mlx4_core: Disallow releasing VF QPs which have steering rules ...
This commit is contained in:
commit
b175293ccc
@ -15,6 +15,13 @@ amemthresh - INTEGER
|
||||
enabled and the variable is automatically set to 2, otherwise
|
||||
the strategy is disabled and the variable is set to 1.
|
||||
|
||||
backup_only - BOOLEAN
|
||||
0 - disabled (default)
|
||||
not 0 - enabled
|
||||
|
||||
If set, disable the director function while the server is
|
||||
in backup mode to avoid packet loops for DR/TUN methods.
|
||||
|
||||
conntrack - BOOLEAN
|
||||
0 - disabled (default)
|
||||
not 0 - enabled
|
||||
|
@ -73,9 +73,11 @@ static struct usb_device_id ath3k_table[] = {
|
||||
{ USB_DEVICE(0x03F0, 0x311D) },
|
||||
|
||||
/* Atheros AR3012 with sflash firmware*/
|
||||
{ USB_DEVICE(0x0CF3, 0x0036) },
|
||||
{ USB_DEVICE(0x0CF3, 0x3004) },
|
||||
{ USB_DEVICE(0x0CF3, 0x3008) },
|
||||
{ USB_DEVICE(0x0CF3, 0x311D) },
|
||||
{ USB_DEVICE(0x0CF3, 0x817a) },
|
||||
{ USB_DEVICE(0x13d3, 0x3375) },
|
||||
{ USB_DEVICE(0x04CA, 0x3004) },
|
||||
{ USB_DEVICE(0x04CA, 0x3005) },
|
||||
@ -107,9 +109,11 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
|
||||
static struct usb_device_id ath3k_blist_tbl[] = {
|
||||
|
||||
/* Atheros AR3012 with sflash firmware*/
|
||||
{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -131,9 +131,11 @@ static struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
|
||||
|
||||
/* Atheros 3012 with sflash firmware */
|
||||
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -183,6 +183,11 @@ int bond_create_slave_symlinks(struct net_device *master,
|
||||
sprintf(linkname, "slave_%s", slave->name);
|
||||
ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
|
||||
linkname);
|
||||
|
||||
/* free the master link created earlier in case of error */
|
||||
if (ret)
|
||||
sysfs_remove_link(&(slave->dev.kobj), "master");
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
@ -2139,12 +2139,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
|
||||
break;
|
||||
default:
|
||||
BNX2X_ERR("Non valid capability ID\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
DP(BNX2X_MSG_DCB, "DCB disabled\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
|
||||
@ -2170,12 +2170,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
|
||||
break;
|
||||
default:
|
||||
BNX2X_ERR("Non valid TC-ID\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
DP(BNX2X_MSG_DCB, "DCB disabled\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
return rval;
|
||||
@ -2390,12 +2390,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
|
||||
break;
|
||||
default:
|
||||
BNX2X_ERR("Non valid featrue-ID\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
DP(BNX2X_MSG_DCB, "DCB disabled\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
return rval;
|
||||
@ -2431,12 +2431,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
|
||||
break;
|
||||
default:
|
||||
BNX2X_ERR("Non valid featrue-ID\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
return rval;
|
||||
|
@ -1332,7 +1332,7 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
|
||||
static void fec_enet_free_buffers(struct net_device *ndev)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
int i;
|
||||
unsigned int i;
|
||||
struct sk_buff *skb;
|
||||
struct bufdesc *bdp;
|
||||
|
||||
@ -1356,7 +1356,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
|
||||
static int fec_enet_alloc_buffers(struct net_device *ndev)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
int i;
|
||||
unsigned int i;
|
||||
struct sk_buff *skb;
|
||||
struct bufdesc *bdp;
|
||||
|
||||
@ -1598,7 +1598,7 @@ static int fec_enet_init(struct net_device *ndev)
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
struct bufdesc *cbd_base;
|
||||
struct bufdesc *bdp;
|
||||
int i;
|
||||
unsigned int i;
|
||||
|
||||
/* Allocate memory for buffer descriptors. */
|
||||
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
|
||||
|
@ -128,6 +128,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
|
||||
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(fec_ptp_start_cyclecounter);
|
||||
|
||||
/**
|
||||
* fec_ptp_adjfreq - adjust ptp cycle frequency
|
||||
@ -318,6 +319,7 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
|
||||
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
|
||||
-EFAULT : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fec_ptp_ioctl);
|
||||
|
||||
/**
|
||||
* fec_time_keep - call timecounter_read every second to avoid timer overrun
|
||||
@ -383,3 +385,4 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
|
||||
pr_info("registered PHC device on %s\n", ndev->name);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fec_ptp_init);
|
||||
|
@ -1818,27 +1818,32 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
|
||||
**/
|
||||
void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
|
||||
{
|
||||
u32 dtxswc;
|
||||
u32 reg_val, reg_offset;
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case e1000_82576:
|
||||
reg_offset = E1000_DTXSWC;
|
||||
break;
|
||||
case e1000_i350:
|
||||
dtxswc = rd32(E1000_DTXSWC);
|
||||
if (enable) {
|
||||
dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
|
||||
E1000_DTXSWC_VLAN_SPOOF_MASK);
|
||||
/* The PF can spoof - it has to in order to
|
||||
* support emulation mode NICs */
|
||||
dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
|
||||
} else {
|
||||
dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
|
||||
E1000_DTXSWC_VLAN_SPOOF_MASK);
|
||||
}
|
||||
wr32(E1000_DTXSWC, dtxswc);
|
||||
reg_offset = E1000_TXSWC;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
return;
|
||||
}
|
||||
|
||||
reg_val = rd32(reg_offset);
|
||||
if (enable) {
|
||||
reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
|
||||
E1000_DTXSWC_VLAN_SPOOF_MASK);
|
||||
/* The PF can spoof - it has to in order to
|
||||
* support emulation mode NICs
|
||||
*/
|
||||
reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
|
||||
} else {
|
||||
reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
|
||||
E1000_DTXSWC_VLAN_SPOOF_MASK);
|
||||
}
|
||||
wr32(reg_offset, reg_val);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -39,7 +39,7 @@
|
||||
#include <linux/pci.h>
|
||||
|
||||
#ifdef CONFIG_IGB_HWMON
|
||||
struct i2c_board_info i350_sensor_info = {
|
||||
static struct i2c_board_info i350_sensor_info = {
|
||||
I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
|
||||
};
|
||||
|
||||
|
@ -2542,8 +2542,8 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
|
||||
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
|
||||
return;
|
||||
|
||||
igb_enable_sriov(pdev, max_vfs);
|
||||
pci_sriov_set_totalvfs(pdev, 7);
|
||||
igb_enable_sriov(pdev, max_vfs);
|
||||
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
}
|
||||
@ -2652,7 +2652,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
|
||||
if (max_vfs > 7) {
|
||||
dev_warn(&pdev->dev,
|
||||
"Maximum of 7 VFs per PF, using max\n");
|
||||
adapter->vfs_allocated_count = 7;
|
||||
max_vfs = adapter->vfs_allocated_count = 7;
|
||||
} else
|
||||
adapter->vfs_allocated_count = max_vfs;
|
||||
if (adapter->vfs_allocated_count)
|
||||
|
@ -740,7 +740,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
|
||||
case e1000_82576:
|
||||
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
|
||||
adapter->ptp_caps.owner = THIS_MODULE;
|
||||
adapter->ptp_caps.max_adj = 1000000000;
|
||||
adapter->ptp_caps.max_adj = 999999881;
|
||||
adapter->ptp_caps.n_ext_ts = 0;
|
||||
adapter->ptp_caps.pps = 0;
|
||||
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
|
||||
|
@ -944,9 +944,17 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
|
||||
free_irq(adapter->msix_entries[vector].vector,
|
||||
adapter->q_vector[vector]);
|
||||
}
|
||||
pci_disable_msix(adapter->pdev);
|
||||
kfree(adapter->msix_entries);
|
||||
adapter->msix_entries = NULL;
|
||||
/* This failure is non-recoverable - it indicates the system is
|
||||
* out of MSIX vector resources and the VF driver cannot run
|
||||
* without them. Set the number of msix vectors to zero
|
||||
* indicating that not enough can be allocated. The error
|
||||
* will be returned to the user indicating device open failed.
|
||||
* Any further attempts to force the driver to open will also
|
||||
* fail. The only way to recover is to unload the driver and
|
||||
* reload it again. If the system has recovered some MSIX
|
||||
* vectors then it may succeed.
|
||||
*/
|
||||
adapter->num_msix_vectors = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2572,6 +2580,15 @@ static int ixgbevf_open(struct net_device *netdev)
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
int err;
|
||||
|
||||
/* A previous failure to open the device because of a lack of
|
||||
* available MSIX vector resources may have reset the number
|
||||
* of msix vectors variable to zero. The only way to recover
|
||||
* is to unload/reload the driver and hope that the system has
|
||||
* been able to recover some MSIX vector resources.
|
||||
*/
|
||||
if (!adapter->num_msix_vectors)
|
||||
return -ENOMEM;
|
||||
|
||||
/* disallow open during test */
|
||||
if (test_bit(__IXGBEVF_TESTING, &adapter->state))
|
||||
return -EBUSY;
|
||||
@ -2628,7 +2645,6 @@ static int ixgbevf_open(struct net_device *netdev)
|
||||
|
||||
err_req_irq:
|
||||
ixgbevf_down(adapter);
|
||||
ixgbevf_free_irq(adapter);
|
||||
err_setup_rx:
|
||||
ixgbevf_free_all_rx_resources(adapter);
|
||||
err_setup_tx:
|
||||
|
@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
kfree(dev);
|
||||
free_netdev(dev);
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
@ -1637,6 +1637,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
||||
/* Flush multicast filter */
|
||||
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
|
||||
|
||||
/* Remove flow steering rules for the port*/
|
||||
if (mdev->dev->caps.steering_mode ==
|
||||
MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
||||
ASSERT_RTNL();
|
||||
list_for_each_entry_safe(flow, tmp_flow,
|
||||
&priv->ethtool_list, list) {
|
||||
mlx4_flow_detach(mdev->dev, flow->id);
|
||||
list_del(&flow->list);
|
||||
}
|
||||
}
|
||||
|
||||
mlx4_en_destroy_drop_qp(priv);
|
||||
|
||||
/* Free TX Rings */
|
||||
@ -1657,17 +1668,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
||||
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
|
||||
mdev->mac_removed[priv->port] = 1;
|
||||
|
||||
/* Remove flow steering rules for the port*/
|
||||
if (mdev->dev->caps.steering_mode ==
|
||||
MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
||||
ASSERT_RTNL();
|
||||
list_for_each_entry_safe(flow, tmp_flow,
|
||||
&priv->ethtool_list, list) {
|
||||
mlx4_flow_detach(mdev->dev, flow->id);
|
||||
list_del(&flow->list);
|
||||
}
|
||||
}
|
||||
|
||||
/* Free RX Rings */
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
|
||||
|
@ -771,7 +771,7 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_slave_event_eq_info *event_eq =
|
||||
priv->mfunc.master.slave_state[slave].event_eq;
|
||||
u32 in_modifier = vhcr->in_modifier;
|
||||
u32 eqn = in_modifier & 0x1FF;
|
||||
u32 eqn = in_modifier & 0x3FF;
|
||||
u64 in_param = vhcr->in_param;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
@ -99,6 +99,7 @@ struct res_qp {
|
||||
struct list_head mcg_list;
|
||||
spinlock_t mcg_spl;
|
||||
int local_qpn;
|
||||
atomic_t ref_count;
|
||||
};
|
||||
|
||||
enum res_mtt_states {
|
||||
@ -197,6 +198,7 @@ enum res_fs_rule_states {
|
||||
|
||||
struct res_fs_rule {
|
||||
struct res_common com;
|
||||
int qpn;
|
||||
};
|
||||
|
||||
static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
|
||||
@ -355,7 +357,7 @@ static int mpt_mask(struct mlx4_dev *dev)
|
||||
return dev->caps.num_mpts - 1;
|
||||
}
|
||||
|
||||
static void *find_res(struct mlx4_dev *dev, int res_id,
|
||||
static void *find_res(struct mlx4_dev *dev, u64 res_id,
|
||||
enum mlx4_resource type)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
@ -447,6 +449,7 @@ static struct res_common *alloc_qp_tr(int id)
|
||||
ret->local_qpn = id;
|
||||
INIT_LIST_HEAD(&ret->mcg_list);
|
||||
spin_lock_init(&ret->mcg_spl);
|
||||
atomic_set(&ret->ref_count, 0);
|
||||
|
||||
return &ret->com;
|
||||
}
|
||||
@ -554,7 +557,7 @@ static struct res_common *alloc_xrcdn_tr(int id)
|
||||
return &ret->com;
|
||||
}
|
||||
|
||||
static struct res_common *alloc_fs_rule_tr(u64 id)
|
||||
static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
|
||||
{
|
||||
struct res_fs_rule *ret;
|
||||
|
||||
@ -564,7 +567,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id)
|
||||
|
||||
ret->com.res_id = id;
|
||||
ret->com.state = RES_FS_RULE_ALLOCATED;
|
||||
|
||||
ret->qpn = qpn;
|
||||
return &ret->com;
|
||||
}
|
||||
|
||||
@ -602,7 +605,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
|
||||
ret = alloc_xrcdn_tr(id);
|
||||
break;
|
||||
case RES_FS_RULE:
|
||||
ret = alloc_fs_rule_tr(id);
|
||||
ret = alloc_fs_rule_tr(id, extra);
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
@ -671,10 +674,14 @@ static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
|
||||
|
||||
static int remove_qp_ok(struct res_qp *res)
|
||||
{
|
||||
if (res->com.state == RES_QP_BUSY)
|
||||
if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
|
||||
!list_empty(&res->mcg_list)) {
|
||||
pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
|
||||
res->com.state, atomic_read(&res->ref_count));
|
||||
return -EBUSY;
|
||||
else if (res->com.state != RES_QP_RESERVED)
|
||||
} else if (res->com.state != RES_QP_RESERVED) {
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3124,6 +3131,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
|
||||
int err;
|
||||
int qpn;
|
||||
struct res_qp *rqp;
|
||||
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
|
||||
struct _rule_hw *rule_header;
|
||||
int header_id;
|
||||
@ -3134,7 +3142,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
||||
|
||||
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
|
||||
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
|
||||
err = get_res(dev, slave, qpn, RES_QP, NULL);
|
||||
err = get_res(dev, slave, qpn, RES_QP, &rqp);
|
||||
if (err) {
|
||||
pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
|
||||
return err;
|
||||
@ -3175,14 +3183,16 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
||||
if (err)
|
||||
goto err_put;
|
||||
|
||||
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
|
||||
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Fail to add flow steering resources.\n ");
|
||||
/* detach rule*/
|
||||
mlx4_cmd(dev, vhcr->out_param, 0, 0,
|
||||
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_NATIVE);
|
||||
goto err_put;
|
||||
}
|
||||
atomic_inc(&rqp->ref_count);
|
||||
err_put:
|
||||
put_res(dev, slave, qpn, RES_QP);
|
||||
return err;
|
||||
@ -3195,20 +3205,35 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_cmd_info *cmd)
|
||||
{
|
||||
int err;
|
||||
struct res_qp *rqp;
|
||||
struct res_fs_rule *rrule;
|
||||
|
||||
if (dev->caps.steering_mode !=
|
||||
MLX4_STEERING_MODE_DEVICE_MANAGED)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
|
||||
if (err)
|
||||
return err;
|
||||
/* Release the rule form busy state before removal */
|
||||
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
|
||||
err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Fail to remove flow steering resources.\n ");
|
||||
return err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
|
||||
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_NATIVE);
|
||||
if (!err)
|
||||
atomic_dec(&rqp->ref_count);
|
||||
out:
|
||||
put_res(dev, slave, rrule->qpn, RES_QP);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -3806,6 +3831,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
|
||||
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
|
||||
/*VLAN*/
|
||||
rem_slave_macs(dev, slave);
|
||||
rem_slave_fs_rule(dev, slave);
|
||||
rem_slave_qps(dev, slave);
|
||||
rem_slave_srqs(dev, slave);
|
||||
rem_slave_cqs(dev, slave);
|
||||
@ -3814,6 +3840,5 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
|
||||
rem_slave_mtts(dev, slave);
|
||||
rem_slave_counters(dev, slave);
|
||||
rem_slave_xrcdns(dev, slave);
|
||||
rem_slave_fs_rule(dev, slave);
|
||||
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
|
||||
}
|
||||
|
@ -1472,7 +1472,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
|
||||
}
|
||||
platform_set_drvdata(pdev, ndev);
|
||||
|
||||
if (lpc_mii_init(pldat) != 0)
|
||||
ret = lpc_mii_init(pldat);
|
||||
if (ret)
|
||||
goto err_out_unregister_netdev;
|
||||
|
||||
netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
|
||||
|
@ -1726,9 +1726,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
|
||||
|
||||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
else
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
else
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
napi_gro_receive(&adapter->napi, skb);
|
||||
(*work_done)++;
|
||||
|
@ -2220,6 +2220,7 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
|
||||
/* MDIO bus release function */
|
||||
static int sh_mdio_release(struct net_device *ndev)
|
||||
{
|
||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||
struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
|
||||
|
||||
/* unregister mdio bus */
|
||||
@ -2234,6 +2235,9 @@ static int sh_mdio_release(struct net_device *ndev)
|
||||
/* free bitbang info */
|
||||
free_mdio_bitbang(bus);
|
||||
|
||||
/* free bitbang memory */
|
||||
kfree(mdp->bitbang);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2262,6 +2266,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
|
||||
bitbang->ctrl.ops = &bb_ops;
|
||||
|
||||
/* MII controller setting */
|
||||
mdp->bitbang = bitbang;
|
||||
mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
|
||||
if (!mdp->mii_bus) {
|
||||
ret = -ENOMEM;
|
||||
@ -2441,6 +2446,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
|
||||
}
|
||||
mdp->tsu_addr = ioremap(rtsu->start,
|
||||
resource_size(rtsu));
|
||||
if (mdp->tsu_addr == NULL) {
|
||||
ret = -ENOMEM;
|
||||
dev_err(&pdev->dev, "TSU ioremap failed.\n");
|
||||
goto out_release;
|
||||
}
|
||||
mdp->port = devno % 2;
|
||||
ndev->features = NETIF_F_HW_VLAN_FILTER;
|
||||
}
|
||||
|
@ -705,6 +705,7 @@ struct sh_eth_private {
|
||||
const u16 *reg_offset;
|
||||
void __iomem *addr;
|
||||
void __iomem *tsu_addr;
|
||||
struct bb_info *bitbang;
|
||||
u32 num_rx_ring;
|
||||
u32 num_tx_ring;
|
||||
dma_addr_t rx_desc_dma;
|
||||
|
@ -1364,7 +1364,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
|
||||
struct platform_device *mdio;
|
||||
|
||||
parp = of_get_property(slave_node, "phy_id", &lenp);
|
||||
if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
|
||||
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
|
||||
pr_err("Missing slave[%d] phy_id property\n", i);
|
||||
ret = -EINVAL;
|
||||
goto error_ret;
|
||||
|
@ -1023,6 +1023,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
|
||||
AR_PHY_AGC_CONTROL_FLTR_CAL |
|
||||
AR_PHY_AGC_CONTROL_PKDET_CAL;
|
||||
|
||||
/* Use chip chainmask only for calibration */
|
||||
ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
|
||||
|
||||
if (rtt) {
|
||||
@ -1150,6 +1151,9 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
|
||||
ar9003_hw_rtt_disable(ah);
|
||||
}
|
||||
|
||||
/* Revert chainmask to runtime parameters */
|
||||
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
|
||||
|
||||
/* Initialize list pointers */
|
||||
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
|
||||
|
||||
|
@ -28,9 +28,9 @@ void ath_tx_complete_poll_work(struct work_struct *work)
|
||||
int i;
|
||||
bool needreset = false;
|
||||
|
||||
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
|
||||
if (ATH_TXQ_SETUP(sc, i)) {
|
||||
txq = &sc->tx.txq[i];
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
txq = sc->tx.txq_map[i];
|
||||
|
||||
ath_txq_lock(sc, txq);
|
||||
if (txq->axq_depth) {
|
||||
if (txq->axq_tx_inprogress) {
|
||||
|
@ -475,6 +475,7 @@ il3945_tx_skb(struct il_priv *il,
|
||||
dma_addr_t txcmd_phys;
|
||||
int txq_id = skb_get_queue_mapping(skb);
|
||||
u16 len, idx, hdr_len;
|
||||
u16 firstlen, secondlen;
|
||||
u8 id;
|
||||
u8 unicast;
|
||||
u8 sta_id;
|
||||
@ -589,21 +590,22 @@ il3945_tx_skb(struct il_priv *il,
|
||||
len =
|
||||
sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
|
||||
hdr_len;
|
||||
len = (len + 3) & ~3;
|
||||
firstlen = (len + 3) & ~3;
|
||||
|
||||
/* Physical address of this Tx command's header (not MAC header!),
|
||||
* within command buffer array. */
|
||||
txcmd_phys =
|
||||
pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
|
||||
pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
|
||||
goto drop_unlock;
|
||||
|
||||
/* Set up TFD's 2nd entry to point directly to remainder of skb,
|
||||
* if any (802.11 null frames have no payload). */
|
||||
len = skb->len - hdr_len;
|
||||
if (len) {
|
||||
secondlen = skb->len - hdr_len;
|
||||
if (secondlen > 0) {
|
||||
phys_addr =
|
||||
pci_map_single(il->pci_dev, skb->data + hdr_len, len,
|
||||
pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
|
||||
goto drop_unlock;
|
||||
@ -611,12 +613,12 @@ il3945_tx_skb(struct il_priv *il,
|
||||
|
||||
/* Add buffer containing Tx command and MAC(!) header to TFD's
|
||||
* first entry */
|
||||
il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
|
||||
il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
|
||||
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
|
||||
dma_unmap_len_set(out_meta, len, len);
|
||||
if (len)
|
||||
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0,
|
||||
U32_PAD(len));
|
||||
dma_unmap_len_set(out_meta, len, firstlen);
|
||||
if (secondlen > 0)
|
||||
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0,
|
||||
U32_PAD(secondlen));
|
||||
|
||||
if (!ieee80211_has_morefrags(hdr->frame_control)) {
|
||||
txq->need_update = 1;
|
||||
|
@ -157,6 +157,20 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
|
||||
return -1;
|
||||
}
|
||||
|
||||
cmd_code = le16_to_cpu(host_cmd->command);
|
||||
cmd_size = le16_to_cpu(host_cmd->size);
|
||||
|
||||
if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
|
||||
cmd_code != HostCmd_CMD_FUNC_SHUTDOWN &&
|
||||
cmd_code != HostCmd_CMD_FUNC_INIT) {
|
||||
dev_err(adapter->dev,
|
||||
"DNLD_CMD: FW in reset state, ignore cmd %#x\n",
|
||||
cmd_code);
|
||||
mwifiex_complete_cmd(adapter, cmd_node);
|
||||
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Set command sequence number */
|
||||
adapter->seq_num++;
|
||||
host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
|
||||
@ -168,9 +182,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
|
||||
adapter->curr_cmd = cmd_node;
|
||||
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
|
||||
|
||||
cmd_code = le16_to_cpu(host_cmd->command);
|
||||
cmd_size = le16_to_cpu(host_cmd->size);
|
||||
|
||||
/* Adjust skb length */
|
||||
if (cmd_node->cmd_skb->len > cmd_size)
|
||||
/*
|
||||
@ -484,8 +495,6 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
|
||||
|
||||
ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
|
||||
data_buf);
|
||||
if (!ret)
|
||||
ret = mwifiex_wait_queue_complete(adapter);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -588,9 +597,10 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
|
||||
if (cmd_no == HostCmd_CMD_802_11_SCAN) {
|
||||
mwifiex_queue_scan_cmd(priv, cmd_node);
|
||||
} else {
|
||||
adapter->cmd_queued = cmd_node;
|
||||
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
|
||||
queue_work(adapter->workqueue, &adapter->main_work);
|
||||
if (cmd_node->wait_q_enabled)
|
||||
ret = mwifiex_wait_queue_complete(adapter, cmd_node);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -709,6 +709,14 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* cancel current command */
|
||||
if (adapter->curr_cmd) {
|
||||
dev_warn(adapter->dev, "curr_cmd is still in processing\n");
|
||||
del_timer(&adapter->cmd_timer);
|
||||
mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
|
||||
adapter->curr_cmd = NULL;
|
||||
}
|
||||
|
||||
/* shut down mwifiex */
|
||||
dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
|
||||
|
||||
|
@ -723,7 +723,6 @@ struct mwifiex_adapter {
|
||||
u16 cmd_wait_q_required;
|
||||
struct mwifiex_wait_queue cmd_wait_q;
|
||||
u8 scan_wait_q_woken;
|
||||
struct cmd_ctrl_node *cmd_queued;
|
||||
spinlock_t queue_lock; /* lock for tx queues */
|
||||
struct completion fw_load;
|
||||
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
|
||||
@ -1018,7 +1017,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
|
||||
struct mwifiex_multicast_list *mcast_list);
|
||||
int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
|
||||
struct net_device *dev);
|
||||
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter);
|
||||
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
|
||||
struct cmd_ctrl_node *cmd_queued);
|
||||
int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
|
||||
struct cfg80211_ssid *req_ssid);
|
||||
int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
|
||||
|
@ -1388,10 +1388,13 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
|
||||
list_del(&cmd_node->list);
|
||||
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
|
||||
flags);
|
||||
adapter->cmd_queued = cmd_node;
|
||||
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
|
||||
true);
|
||||
queue_work(adapter->workqueue, &adapter->main_work);
|
||||
|
||||
/* Perform internal scan synchronously */
|
||||
if (!priv->scan_request)
|
||||
mwifiex_wait_queue_complete(adapter, cmd_node);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
|
||||
flags);
|
||||
@ -1946,9 +1949,6 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
|
||||
/* Normal scan */
|
||||
ret = mwifiex_scan_networks(priv, NULL);
|
||||
|
||||
if (!ret)
|
||||
ret = mwifiex_wait_queue_complete(priv->adapter);
|
||||
|
||||
up(&priv->async_sem);
|
||||
|
||||
return ret;
|
||||
|
@ -54,16 +54,10 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
|
||||
* This function waits on a cmd wait queue. It also cancels the pending
|
||||
* request after waking up, in case of errors.
|
||||
*/
|
||||
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
|
||||
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
|
||||
struct cmd_ctrl_node *cmd_queued)
|
||||
{
|
||||
int status;
|
||||
struct cmd_ctrl_node *cmd_queued;
|
||||
|
||||
if (!adapter->cmd_queued)
|
||||
return 0;
|
||||
|
||||
cmd_queued = adapter->cmd_queued;
|
||||
adapter->cmd_queued = NULL;
|
||||
|
||||
dev_dbg(adapter->dev, "cmd pending\n");
|
||||
atomic_inc(&adapter->cmd_pending);
|
||||
|
@ -851,6 +851,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||
if (unlikely(!_urb)) {
|
||||
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
|
||||
"Can't allocate urb. Drop skb!\n");
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
_rtl_submit_tx_urb(hw, _urb);
|
||||
|
@ -44,7 +44,7 @@
|
||||
/* Adding event notification support elements */
|
||||
#define THERMAL_GENL_FAMILY_NAME "thermal_event"
|
||||
#define THERMAL_GENL_VERSION 0x01
|
||||
#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group"
|
||||
#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
|
||||
|
||||
/* Default Thermal Governor */
|
||||
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
|
||||
|
@ -68,6 +68,7 @@ struct udp_sock {
|
||||
* For encapsulation sockets.
|
||||
*/
|
||||
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
|
||||
void (*encap_destroy)(struct sock *sk);
|
||||
};
|
||||
|
||||
static inline struct udp_sock *udp_sk(const struct sock *sk)
|
||||
|
@ -9,6 +9,7 @@ struct flow_keys {
|
||||
__be32 ports;
|
||||
__be16 port16[2];
|
||||
};
|
||||
u16 thoff;
|
||||
u8 ip_proto;
|
||||
};
|
||||
|
||||
|
@ -976,6 +976,7 @@ struct netns_ipvs {
|
||||
int sysctl_sync_retries;
|
||||
int sysctl_nat_icmp_send;
|
||||
int sysctl_pmtu_disc;
|
||||
int sysctl_backup_only;
|
||||
|
||||
/* ip_vs_lblc */
|
||||
int sysctl_lblc_expiration;
|
||||
@ -1067,6 +1068,12 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
|
||||
return ipvs->sysctl_pmtu_disc;
|
||||
}
|
||||
|
||||
static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
|
||||
{
|
||||
return ipvs->sync_state & IP_VS_STATE_BACKUP &&
|
||||
ipvs->sysctl_backup_only;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
|
||||
@ -1114,6 +1121,11 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -77,9 +77,6 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb,
|
||||
{
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
if (iph->frag_off & htons(IP_DF))
|
||||
iph->id = 0;
|
||||
else {
|
||||
/* Use inner packet iph-id if possible. */
|
||||
if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
|
||||
iph->id = old_iph->id;
|
||||
@ -87,5 +84,4 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb,
|
||||
__ip_select_ident(iph, dst,
|
||||
(skb_shinfo(skb)->gso_segs ?: 1) - 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -33,9 +33,11 @@ enum {
|
||||
PACKET_DIAG_TX_RING,
|
||||
PACKET_DIAG_FANOUT,
|
||||
|
||||
PACKET_DIAG_MAX,
|
||||
__PACKET_DIAG_MAX,
|
||||
};
|
||||
|
||||
#define PACKET_DIAG_MAX (__PACKET_DIAG_MAX - 1)
|
||||
|
||||
struct packet_diag_info {
|
||||
__u32 pdi_index;
|
||||
__u32 pdi_version;
|
||||
|
@ -39,9 +39,11 @@ enum {
|
||||
UNIX_DIAG_MEMINFO,
|
||||
UNIX_DIAG_SHUTDOWN,
|
||||
|
||||
UNIX_DIAG_MAX,
|
||||
__UNIX_DIAG_MAX,
|
||||
};
|
||||
|
||||
#define UNIX_DIAG_MAX (__UNIX_DIAG_MAX - 1)
|
||||
|
||||
struct unix_diag_vfs {
|
||||
__u32 udiag_vfs_ino;
|
||||
__u32 udiag_vfs_dev;
|
||||
|
@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
|
||||
|
||||
grp = &vlan_info->grp;
|
||||
|
||||
/* Take it out of our own structures, but be sure to interlock with
|
||||
* HW accelerating devices or SW vlan input packet processing if
|
||||
* VLAN is not 0 (leave it there for 802.1p).
|
||||
*/
|
||||
if (vlan_id)
|
||||
vlan_vid_del(real_dev, vlan_id);
|
||||
|
||||
grp->nr_vlan_devs--;
|
||||
|
||||
if (vlan->flags & VLAN_FLAG_MVRP)
|
||||
@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
|
||||
vlan_gvrp_uninit_applicant(real_dev);
|
||||
}
|
||||
|
||||
/* Take it out of our own structures, but be sure to interlock with
|
||||
* HW accelerating devices or SW vlan input packet processing if
|
||||
* VLAN is not 0 (leave it there for 802.1p).
|
||||
*/
|
||||
if (vlan_id)
|
||||
vlan_vid_del(real_dev, vlan_id);
|
||||
|
||||
/* Get rid of the vlan's reference to real_dev */
|
||||
dev_put(real_dev);
|
||||
}
|
||||
|
@ -359,6 +359,7 @@ static void __sco_sock_close(struct sock *sk)
|
||||
sco_chan_del(sk, ECONNRESET);
|
||||
break;
|
||||
|
||||
case BT_CONNECT2:
|
||||
case BT_CONNECT:
|
||||
case BT_DISCONN:
|
||||
sco_chan_del(sk, ECONNRESET);
|
||||
|
@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
|
||||
return 0;
|
||||
br_warn(br, "adding interface %s with same address "
|
||||
"as a received packet\n",
|
||||
source->dev->name);
|
||||
source ? source->dev->name : br->dev->name);
|
||||
fdb_delete(br, fdb);
|
||||
}
|
||||
|
||||
|
@ -1545,7 +1545,6 @@ void net_enable_timestamp(void)
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
WARN_ON(in_interrupt());
|
||||
static_key_slow_inc(&netstamp_needed);
|
||||
}
|
||||
EXPORT_SYMBOL(net_enable_timestamp);
|
||||
|
@ -140,6 +140,8 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
|
||||
flow->ports = *ports;
|
||||
}
|
||||
|
||||
flow->thoff = (u16) nhoff;
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(skb_flow_dissect);
|
||||
|
@ -1333,7 +1333,6 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
|
||||
iph->frag_off |= htons(IP_MF);
|
||||
offset += (skb->len - skb->mac_len - iph->ihl * 4);
|
||||
} else {
|
||||
if (!(iph->frag_off & htons(IP_DF)))
|
||||
iph->id = htons(id++);
|
||||
}
|
||||
iph->tot_len = htons(skb->len - skb->mac_len);
|
||||
|
@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void)
|
||||
}
|
||||
for (i++; i < CONF_NAMESERVERS_MAX; i++)
|
||||
if (ic_nameservers[i] != NONE)
|
||||
pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]);
|
||||
pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]);
|
||||
pr_cont("\n");
|
||||
#endif /* !SILENT */
|
||||
|
||||
return 0;
|
||||
|
@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config IP_NF_QUEUE
|
||||
tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
|
||||
depends on NETFILTER_ADVANCED
|
||||
help
|
||||
Netfilter has the ability to queue packets to user space: the
|
||||
netlink device can be used to access them using this driver.
|
||||
|
||||
This option enables the old IPv4-only "ip_queue" implementation
|
||||
which has been obsoleted by the new "nfnetlink_queue" code (see
|
||||
CONFIG_NETFILTER_NETLINK_QUEUE).
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
config IP_NF_IPTABLES
|
||||
tristate "IP tables support (required for filtering/masq/NAT)"
|
||||
default m if NETFILTER_ADVANCED=n
|
||||
|
@ -2059,11 +2059,8 @@ void tcp_enter_loss(struct sock *sk, int how)
|
||||
if (tcp_is_reno(tp))
|
||||
tcp_reset_reno_sack(tp);
|
||||
|
||||
if (!how) {
|
||||
/* Push undo marker, if it was plain RTO and nothing
|
||||
* was retransmitted. */
|
||||
tp->undo_marker = tp->snd_una;
|
||||
} else {
|
||||
if (how) {
|
||||
tp->sacked_out = 0;
|
||||
tp->fackets_out = 0;
|
||||
}
|
||||
|
@ -1809,7 +1809,10 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
|
||||
goto send_now;
|
||||
}
|
||||
|
||||
/* Ok, it looks like it is advisable to defer. */
|
||||
/* Ok, it looks like it is advisable to defer.
|
||||
* Do not rearm the timer if already set to not break TCP ACK clocking.
|
||||
*/
|
||||
if (!tp->tso_deferred)
|
||||
tp->tso_deferred = 1 | (jiffies << 1);
|
||||
|
||||
return true;
|
||||
|
@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb)
|
||||
|
||||
void udp_destroy_sock(struct sock *sk)
|
||||
{
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
bool slow = lock_sock_fast(sk);
|
||||
udp_flush_pending_frames(sk);
|
||||
unlock_sock_fast(sk, slow);
|
||||
if (static_key_false(&udp_encap_needed) && up->encap_type) {
|
||||
void (*encap_destroy)(struct sock *sk);
|
||||
encap_destroy = ACCESS_ONCE(up->encap_destroy);
|
||||
if (encap_destroy)
|
||||
encap_destroy(sk);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4784,26 +4784,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
|
||||
|
||||
static int __net_init addrconf_init_net(struct net *net)
|
||||
{
|
||||
int err;
|
||||
int err = -ENOMEM;
|
||||
struct ipv6_devconf *all, *dflt;
|
||||
|
||||
err = -ENOMEM;
|
||||
all = &ipv6_devconf;
|
||||
dflt = &ipv6_devconf_dflt;
|
||||
|
||||
if (!net_eq(net, &init_net)) {
|
||||
all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
|
||||
all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
|
||||
if (all == NULL)
|
||||
goto err_alloc_all;
|
||||
|
||||
dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
|
||||
dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
|
||||
if (dflt == NULL)
|
||||
goto err_alloc_dflt;
|
||||
} else {
|
||||
|
||||
/* these will be inherited by all namespaces */
|
||||
dflt->autoconf = ipv6_defaults.autoconf;
|
||||
dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
|
||||
}
|
||||
|
||||
net->ipv6.devconf_all = all;
|
||||
net->ipv6.devconf_dflt = dflt;
|
||||
|
@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
|
||||
{
|
||||
.name = "SNPT",
|
||||
.table = "mangle",
|
||||
.target = ip6t_snpt_tg,
|
||||
.targetsize = sizeof(struct ip6t_npt_tginfo),
|
||||
.checkentry = ip6t_npt_checkentry,
|
||||
@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
|
||||
},
|
||||
{
|
||||
.name = "DNPT",
|
||||
.table = "mangle",
|
||||
.target = ip6t_dnpt_tg,
|
||||
.targetsize = sizeof(struct ip6t_npt_tginfo),
|
||||
.checkentry = ip6t_npt_checkentry,
|
||||
|
@ -1285,10 +1285,18 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
|
||||
|
||||
void udpv6_destroy_sock(struct sock *sk)
|
||||
{
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
lock_sock(sk);
|
||||
udp_v6_flush_pending_frames(sk);
|
||||
release_sock(sk);
|
||||
|
||||
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
|
||||
void (*encap_destroy)(struct sock *sk);
|
||||
encap_destroy = ACCESS_ONCE(up->encap_destroy);
|
||||
if (encap_destroy)
|
||||
encap_destroy(sk);
|
||||
}
|
||||
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
|
@ -2583,8 +2583,10 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/* Check if the we got some results */
|
||||
if (!self->cachedaddr)
|
||||
return -EAGAIN; /* Didn't find any devices */
|
||||
if (!self->cachedaddr) {
|
||||
err = -EAGAIN; /* Didn't find any devices */
|
||||
goto out;
|
||||
}
|
||||
daddr = self->cachedaddr;
|
||||
/* Cleanup */
|
||||
self->cachedaddr = 0;
|
||||
|
@ -114,7 +114,6 @@ struct l2tp_net {
|
||||
|
||||
static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
|
||||
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
|
||||
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
|
||||
|
||||
static inline struct l2tp_net *l2tp_pernet(struct net *net)
|
||||
{
|
||||
@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
|
||||
} else {
|
||||
/* Socket is owned by kernelspace */
|
||||
sk = tunnel->sock;
|
||||
sock_hold(sk);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk)
|
||||
}
|
||||
sock_put(sk);
|
||||
}
|
||||
sock_put(sk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
|
||||
|
||||
@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
|
||||
struct sk_buff *skbp;
|
||||
struct sk_buff *tmp;
|
||||
u32 ns = L2TP_SKB_CB(skb)->ns;
|
||||
struct l2tp_stats *sstats;
|
||||
|
||||
spin_lock_bh(&session->reorder_q.lock);
|
||||
sstats = &session->stats;
|
||||
skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
|
||||
if (L2TP_SKB_CB(skbp)->ns > ns) {
|
||||
__skb_queue_before(&session->reorder_q, skbp, skb);
|
||||
@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
|
||||
"%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
|
||||
session->name, ns, L2TP_SKB_CB(skbp)->ns,
|
||||
skb_queue_len(&session->reorder_q));
|
||||
u64_stats_update_begin(&sstats->syncp);
|
||||
sstats->rx_oos_packets++;
|
||||
u64_stats_update_end(&sstats->syncp);
|
||||
atomic_long_inc(&session->stats.rx_oos_packets);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
|
||||
{
|
||||
struct l2tp_tunnel *tunnel = session->tunnel;
|
||||
int length = L2TP_SKB_CB(skb)->length;
|
||||
struct l2tp_stats *tstats, *sstats;
|
||||
|
||||
/* We're about to requeue the skb, so return resources
|
||||
* to its current owner (a socket receive buffer).
|
||||
*/
|
||||
skb_orphan(skb);
|
||||
|
||||
tstats = &tunnel->stats;
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
sstats = &session->stats;
|
||||
u64_stats_update_begin(&sstats->syncp);
|
||||
tstats->rx_packets++;
|
||||
tstats->rx_bytes += length;
|
||||
sstats->rx_packets++;
|
||||
sstats->rx_bytes += length;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
u64_stats_update_end(&sstats->syncp);
|
||||
atomic_long_inc(&tunnel->stats.rx_packets);
|
||||
atomic_long_add(length, &tunnel->stats.rx_bytes);
|
||||
atomic_long_inc(&session->stats.rx_packets);
|
||||
atomic_long_add(length, &session->stats.rx_bytes);
|
||||
|
||||
if (L2TP_SKB_CB(skb)->has_seq) {
|
||||
/* Bump our Nr */
|
||||
@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *tmp;
|
||||
struct l2tp_stats *sstats;
|
||||
|
||||
/* If the pkt at the head of the queue has the nr that we
|
||||
* expect to send up next, dequeue it and any other
|
||||
@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
|
||||
*/
|
||||
start:
|
||||
spin_lock_bh(&session->reorder_q.lock);
|
||||
sstats = &session->stats;
|
||||
skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
|
||||
if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
|
||||
u64_stats_update_begin(&sstats->syncp);
|
||||
sstats->rx_seq_discards++;
|
||||
sstats->rx_errors++;
|
||||
u64_stats_update_end(&sstats->syncp);
|
||||
atomic_long_inc(&session->stats.rx_seq_discards);
|
||||
atomic_long_inc(&session->stats.rx_errors);
|
||||
l2tp_dbg(session, L2TP_MSG_SEQ,
|
||||
"%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
|
||||
session->name, L2TP_SKB_CB(skb)->ns,
|
||||
@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
|
||||
struct l2tp_tunnel *tunnel = session->tunnel;
|
||||
int offset;
|
||||
u32 ns, nr;
|
||||
struct l2tp_stats *sstats = &session->stats;
|
||||
|
||||
/* The ref count is increased since we now hold a pointer to
|
||||
* the session. Take care to decrement the refcnt when exiting
|
||||
@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
|
||||
"%s: cookie mismatch (%u/%u). Discarding.\n",
|
||||
tunnel->name, tunnel->tunnel_id,
|
||||
session->session_id);
|
||||
u64_stats_update_begin(&sstats->syncp);
|
||||
sstats->rx_cookie_discards++;
|
||||
u64_stats_update_end(&sstats->syncp);
|
||||
atomic_long_inc(&session->stats.rx_cookie_discards);
|
||||
goto discard;
|
||||
}
|
||||
ptr += session->peer_cookie_len;
|
||||
@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
|
||||
l2tp_warn(session, L2TP_MSG_SEQ,
|
||||
"%s: recv data has no seq numbers when required. Discarding.\n",
|
||||
session->name);
|
||||
u64_stats_update_begin(&sstats->syncp);
|
||||
sstats->rx_seq_discards++;
|
||||
u64_stats_update_end(&sstats->syncp);
|
||||
atomic_long_inc(&session->stats.rx_seq_discards);
|
||||
goto discard;
|
||||
}
|
||||
|
||||
@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
|
||||
l2tp_warn(session, L2TP_MSG_SEQ,
|
||||
"%s: recv data has no seq numbers when required. Discarding.\n",
|
||||
session->name);
|
||||
u64_stats_update_begin(&sstats->syncp);
|
||||
sstats->rx_seq_discards++;
|
||||
u64_stats_update_end(&sstats->syncp);
|
||||
atomic_long_inc(&session->stats.rx_seq_discards);
|
||||
goto discard;
|
||||
}
|
||||
}
|
||||
@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
|
||||
* packets
|
||||
*/
|
||||
if (L2TP_SKB_CB(skb)->ns != session->nr) {
|
||||
u64_stats_update_begin(&sstats->syncp);
|
||||
sstats->rx_seq_discards++;
|
||||
u64_stats_update_end(&sstats->syncp);
|
||||
atomic_long_inc(&session->stats.rx_seq_discards);
|
||||
l2tp_dbg(session, L2TP_MSG_SEQ,
|
||||
"%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
|
||||
session->name, L2TP_SKB_CB(skb)->ns,
|
||||
@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
|
||||
return;
|
||||
|
||||
discard:
|
||||
u64_stats_update_begin(&sstats->syncp);
|
||||
sstats->rx_errors++;
|
||||
u64_stats_update_end(&sstats->syncp);
|
||||
atomic_long_inc(&session->stats.rx_errors);
|
||||
kfree_skb(skb);
|
||||
|
||||
if (session->deref)
|
||||
@ -828,6 +803,23 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
|
||||
}
|
||||
EXPORT_SYMBOL(l2tp_recv_common);
|
||||
|
||||
/* Drop skbs from the session's reorder_q
|
||||
*/
|
||||
int l2tp_session_queue_purge(struct l2tp_session *session)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
BUG_ON(!session);
|
||||
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
|
||||
while ((skb = skb_dequeue(&session->reorder_q))) {
|
||||
atomic_long_inc(&session->stats.rx_errors);
|
||||
kfree_skb(skb);
|
||||
if (session->deref)
|
||||
(*session->deref)(session);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
|
||||
|
||||
/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
|
||||
* here. The skb is not on a list when we get here.
|
||||
* Returns 0 if the packet was a data packet and was successfully passed on.
|
||||
@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
|
||||
u32 tunnel_id, session_id;
|
||||
u16 version;
|
||||
int length;
|
||||
struct l2tp_stats *tstats;
|
||||
|
||||
if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
|
||||
goto discard_bad_csum;
|
||||
@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
|
||||
discard_bad_csum:
|
||||
LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
|
||||
UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
|
||||
tstats = &tunnel->stats;
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
tstats->rx_errors++;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
atomic_long_inc(&tunnel->stats.rx_errors);
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
|
||||
struct l2tp_tunnel *tunnel = session->tunnel;
|
||||
unsigned int len = skb->len;
|
||||
int error;
|
||||
struct l2tp_stats *tstats, *sstats;
|
||||
|
||||
/* Debug */
|
||||
if (session->send_seq)
|
||||
@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
|
||||
error = ip_queue_xmit(skb, fl);
|
||||
|
||||
/* Update stats */
|
||||
tstats = &tunnel->stats;
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
sstats = &session->stats;
|
||||
u64_stats_update_begin(&sstats->syncp);
|
||||
if (error >= 0) {
|
||||
tstats->tx_packets++;
|
||||
tstats->tx_bytes += len;
|
||||
sstats->tx_packets++;
|
||||
sstats->tx_bytes += len;
|
||||
atomic_long_inc(&tunnel->stats.tx_packets);
|
||||
atomic_long_add(len, &tunnel->stats.tx_bytes);
|
||||
atomic_long_inc(&session->stats.tx_packets);
|
||||
atomic_long_add(len, &session->stats.tx_bytes);
|
||||
} else {
|
||||
tstats->tx_errors++;
|
||||
sstats->tx_errors++;
|
||||
atomic_long_inc(&tunnel->stats.tx_errors);
|
||||
atomic_long_inc(&session->stats.tx_errors);
|
||||
}
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
u64_stats_update_end(&sstats->syncp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
|
||||
/* No longer an encapsulation socket. See net/ipv4/udp.c */
|
||||
(udp_sk(sk))->encap_type = 0;
|
||||
(udp_sk(sk))->encap_rcv = NULL;
|
||||
(udp_sk(sk))->encap_destroy = NULL;
|
||||
break;
|
||||
case L2TP_ENCAPTYPE_IP:
|
||||
break;
|
||||
@ -1311,7 +1293,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
|
||||
|
||||
/* When the tunnel is closed, all the attached sessions need to go too.
|
||||
*/
|
||||
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
|
||||
void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
|
||||
{
|
||||
int hash;
|
||||
struct hlist_node *walk;
|
||||
@ -1334,25 +1316,13 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
|
||||
|
||||
hlist_del_init(&session->hlist);
|
||||
|
||||
/* Since we should hold the sock lock while
|
||||
* doing any unbinding, we need to release the
|
||||
* lock we're holding before taking that lock.
|
||||
* Hold a reference to the sock so it doesn't
|
||||
* disappear as we're jumping between locks.
|
||||
*/
|
||||
if (session->ref != NULL)
|
||||
(*session->ref)(session);
|
||||
|
||||
write_unlock_bh(&tunnel->hlist_lock);
|
||||
|
||||
if (tunnel->version != L2TP_HDR_VER_2) {
|
||||
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
|
||||
|
||||
spin_lock_bh(&pn->l2tp_session_hlist_lock);
|
||||
hlist_del_init_rcu(&session->global_hlist);
|
||||
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
|
||||
synchronize_rcu();
|
||||
}
|
||||
__l2tp_session_unhash(session);
|
||||
l2tp_session_queue_purge(session);
|
||||
|
||||
if (session->session_close != NULL)
|
||||
(*session->session_close)(session);
|
||||
@ -1360,6 +1330,8 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
|
||||
if (session->deref != NULL)
|
||||
(*session->deref)(session);
|
||||
|
||||
l2tp_session_dec_refcount(session);
|
||||
|
||||
write_lock_bh(&tunnel->hlist_lock);
|
||||
|
||||
/* Now restart from the beginning of this hash
|
||||
@ -1372,6 +1344,17 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
|
||||
}
|
||||
write_unlock_bh(&tunnel->hlist_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
|
||||
|
||||
/* Tunnel socket destroy hook for UDP encapsulation */
|
||||
static void l2tp_udp_encap_destroy(struct sock *sk)
|
||||
{
|
||||
struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
|
||||
if (tunnel) {
|
||||
l2tp_tunnel_closeall(tunnel);
|
||||
sock_put(sk);
|
||||
}
|
||||
}
|
||||
|
||||
/* Really kill the tunnel.
|
||||
* Come here only when all sessions have been cleared from the tunnel.
|
||||
@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
|
||||
return;
|
||||
|
||||
sock = sk->sk_socket;
|
||||
BUG_ON(!sock);
|
||||
|
||||
/* If the tunnel socket was created directly by the kernel, use the
|
||||
* sk_* API to release the socket now. Otherwise go through the
|
||||
* inet_* layer to shut the socket down, and let userspace close it.
|
||||
/* If the tunnel socket was created by userspace, then go through the
|
||||
* inet layer to shut the socket down, and let userspace close it.
|
||||
* Otherwise, if we created the socket directly within the kernel, use
|
||||
* the sk API to release it here.
|
||||
* In either case the tunnel resources are freed in the socket
|
||||
* destructor when the tunnel socket goes away.
|
||||
*/
|
||||
if (sock->file == NULL) {
|
||||
if (tunnel->fd >= 0) {
|
||||
if (sock)
|
||||
inet_shutdown(sock, 2);
|
||||
} else {
|
||||
if (sock)
|
||||
kernel_sock_shutdown(sock, SHUT_RDWR);
|
||||
sk_release_kernel(sk);
|
||||
} else {
|
||||
inet_shutdown(sock, 2);
|
||||
}
|
||||
|
||||
l2tp_tunnel_sock_put(sk);
|
||||
@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
|
||||
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
|
||||
udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
|
||||
udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
|
||||
udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (sk->sk_family == PF_INET6)
|
||||
udpv6_encap_enable();
|
||||
@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
|
||||
*/
|
||||
int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
|
||||
{
|
||||
l2tp_tunnel_closeall(tunnel);
|
||||
return (false == queue_work(l2tp_wq, &tunnel->del_work));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
|
||||
@ -1731,37 +1718,15 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
|
||||
*/
|
||||
void l2tp_session_free(struct l2tp_session *session)
|
||||
{
|
||||
struct l2tp_tunnel *tunnel;
|
||||
struct l2tp_tunnel *tunnel = session->tunnel;
|
||||
|
||||
BUG_ON(atomic_read(&session->ref_count) != 0);
|
||||
|
||||
tunnel = session->tunnel;
|
||||
if (tunnel != NULL) {
|
||||
if (tunnel) {
|
||||
BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
|
||||
|
||||
/* Delete the session from the hash */
|
||||
write_lock_bh(&tunnel->hlist_lock);
|
||||
hlist_del_init(&session->hlist);
|
||||
write_unlock_bh(&tunnel->hlist_lock);
|
||||
|
||||
/* Unlink from the global hash if not L2TPv2 */
|
||||
if (tunnel->version != L2TP_HDR_VER_2) {
|
||||
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
|
||||
|
||||
spin_lock_bh(&pn->l2tp_session_hlist_lock);
|
||||
hlist_del_init_rcu(&session->global_hlist);
|
||||
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
if (session->session_id != 0)
|
||||
atomic_dec(&l2tp_session_count);
|
||||
|
||||
sock_put(tunnel->sock);
|
||||
|
||||
/* This will delete the tunnel context if this
|
||||
* is the last session on the tunnel.
|
||||
*/
|
||||
session->tunnel = NULL;
|
||||
l2tp_tunnel_dec_refcount(tunnel);
|
||||
}
|
||||
@ -1772,21 +1737,52 @@ void l2tp_session_free(struct l2tp_session *session)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_session_free);
|
||||
|
||||
/* Remove an l2tp session from l2tp_core's hash lists.
|
||||
* Provides a tidyup interface for pseudowire code which can't just route all
|
||||
* shutdown via. l2tp_session_delete and a pseudowire-specific session_close
|
||||
* callback.
|
||||
*/
|
||||
void __l2tp_session_unhash(struct l2tp_session *session)
|
||||
{
|
||||
struct l2tp_tunnel *tunnel = session->tunnel;
|
||||
|
||||
/* Remove the session from core hashes */
|
||||
if (tunnel) {
|
||||
/* Remove from the per-tunnel hash */
|
||||
write_lock_bh(&tunnel->hlist_lock);
|
||||
hlist_del_init(&session->hlist);
|
||||
write_unlock_bh(&tunnel->hlist_lock);
|
||||
|
||||
/* For L2TPv3 we have a per-net hash: remove from there, too */
|
||||
if (tunnel->version != L2TP_HDR_VER_2) {
|
||||
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
|
||||
spin_lock_bh(&pn->l2tp_session_hlist_lock);
|
||||
hlist_del_init_rcu(&session->global_hlist);
|
||||
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
|
||||
synchronize_rcu();
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
|
||||
|
||||
/* This function is used by the netlink SESSION_DELETE command and by
|
||||
pseudowire modules.
|
||||
*/
|
||||
int l2tp_session_delete(struct l2tp_session *session)
|
||||
{
|
||||
if (session->ref)
|
||||
(*session->ref)(session);
|
||||
__l2tp_session_unhash(session);
|
||||
l2tp_session_queue_purge(session);
|
||||
if (session->session_close != NULL)
|
||||
(*session->session_close)(session);
|
||||
|
||||
if (session->deref)
|
||||
(*session->ref)(session);
|
||||
l2tp_session_dec_refcount(session);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_session_delete);
|
||||
|
||||
|
||||
/* We come here whenever a session's send_seq, cookie_len or
|
||||
* l2specific_len parameters are set.
|
||||
*/
|
||||
|
@ -36,16 +36,15 @@ enum {
|
||||
struct sk_buff;
|
||||
|
||||
struct l2tp_stats {
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
u64 tx_errors;
|
||||
u64 rx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 rx_seq_discards;
|
||||
u64 rx_oos_packets;
|
||||
u64 rx_errors;
|
||||
u64 rx_cookie_discards;
|
||||
struct u64_stats_sync syncp;
|
||||
atomic_long_t tx_packets;
|
||||
atomic_long_t tx_bytes;
|
||||
atomic_long_t tx_errors;
|
||||
atomic_long_t rx_packets;
|
||||
atomic_long_t rx_bytes;
|
||||
atomic_long_t rx_seq_discards;
|
||||
atomic_long_t rx_oos_packets;
|
||||
atomic_long_t rx_errors;
|
||||
atomic_long_t rx_cookie_discards;
|
||||
};
|
||||
|
||||
struct l2tp_tunnel;
|
||||
@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
|
||||
extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
|
||||
|
||||
extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
|
||||
extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
|
||||
extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
|
||||
extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
|
||||
extern void __l2tp_session_unhash(struct l2tp_session *session);
|
||||
extern int l2tp_session_delete(struct l2tp_session *session);
|
||||
extern void l2tp_session_free(struct l2tp_session *session);
|
||||
extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
|
||||
extern int l2tp_session_queue_purge(struct l2tp_session *session);
|
||||
extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
|
||||
|
@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
|
||||
tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
|
||||
atomic_read(&tunnel->ref_count));
|
||||
|
||||
seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n",
|
||||
seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
|
||||
tunnel->debug,
|
||||
(unsigned long long)tunnel->stats.tx_packets,
|
||||
(unsigned long long)tunnel->stats.tx_bytes,
|
||||
(unsigned long long)tunnel->stats.tx_errors,
|
||||
(unsigned long long)tunnel->stats.rx_packets,
|
||||
(unsigned long long)tunnel->stats.rx_bytes,
|
||||
(unsigned long long)tunnel->stats.rx_errors);
|
||||
atomic_long_read(&tunnel->stats.tx_packets),
|
||||
atomic_long_read(&tunnel->stats.tx_bytes),
|
||||
atomic_long_read(&tunnel->stats.tx_errors),
|
||||
atomic_long_read(&tunnel->stats.rx_packets),
|
||||
atomic_long_read(&tunnel->stats.rx_bytes),
|
||||
atomic_long_read(&tunnel->stats.rx_errors));
|
||||
|
||||
if (tunnel->show != NULL)
|
||||
tunnel->show(m, tunnel);
|
||||
@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
|
||||
seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n",
|
||||
seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
|
||||
session->nr, session->ns,
|
||||
(unsigned long long)session->stats.tx_packets,
|
||||
(unsigned long long)session->stats.tx_bytes,
|
||||
(unsigned long long)session->stats.tx_errors,
|
||||
(unsigned long long)session->stats.rx_packets,
|
||||
(unsigned long long)session->stats.rx_bytes,
|
||||
(unsigned long long)session->stats.rx_errors);
|
||||
atomic_long_read(&session->stats.tx_packets),
|
||||
atomic_long_read(&session->stats.tx_bytes),
|
||||
atomic_long_read(&session->stats.tx_errors),
|
||||
atomic_long_read(&session->stats.rx_packets),
|
||||
atomic_long_read(&session->stats.rx_bytes),
|
||||
atomic_long_read(&session->stats.rx_errors));
|
||||
|
||||
if (session->show != NULL)
|
||||
session->show(m, session);
|
||||
|
@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
|
||||
static void l2tp_ip_destroy_sock(struct sock *sk)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
|
||||
|
||||
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
|
||||
kfree_skb(skb);
|
||||
|
||||
if (tunnel) {
|
||||
l2tp_tunnel_closeall(tunnel);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
sk_refcnt_debug_dec(sk);
|
||||
}
|
||||
|
||||
|
@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
|
||||
|
||||
static void l2tp_ip6_destroy_sock(struct sock *sk)
|
||||
{
|
||||
struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
|
||||
|
||||
lock_sock(sk);
|
||||
ip6_flush_pending_frames(sk);
|
||||
release_sock(sk);
|
||||
|
||||
if (tunnel) {
|
||||
l2tp_tunnel_closeall(tunnel);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
|
@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct ipv6_pinfo *np = NULL;
|
||||
#endif
|
||||
struct l2tp_stats stats;
|
||||
unsigned int start;
|
||||
|
||||
hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
|
||||
L2TP_CMD_TUNNEL_GET);
|
||||
@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
|
||||
if (nest == NULL)
|
||||
goto nla_put_failure;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&tunnel->stats.syncp);
|
||||
stats.tx_packets = tunnel->stats.tx_packets;
|
||||
stats.tx_bytes = tunnel->stats.tx_bytes;
|
||||
stats.tx_errors = tunnel->stats.tx_errors;
|
||||
stats.rx_packets = tunnel->stats.rx_packets;
|
||||
stats.rx_bytes = tunnel->stats.rx_bytes;
|
||||
stats.rx_errors = tunnel->stats.rx_errors;
|
||||
stats.rx_seq_discards = tunnel->stats.rx_seq_discards;
|
||||
stats.rx_oos_packets = tunnel->stats.rx_oos_packets;
|
||||
} while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));
|
||||
|
||||
if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
|
||||
if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
|
||||
atomic_long_read(&tunnel->stats.tx_packets)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
|
||||
atomic_long_read(&tunnel->stats.tx_bytes)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
|
||||
atomic_long_read(&tunnel->stats.tx_errors)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
|
||||
atomic_long_read(&tunnel->stats.rx_packets)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
|
||||
atomic_long_read(&tunnel->stats.rx_bytes)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
|
||||
stats.rx_seq_discards) ||
|
||||
atomic_long_read(&tunnel->stats.rx_seq_discards)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
|
||||
stats.rx_oos_packets) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
|
||||
atomic_long_read(&tunnel->stats.rx_oos_packets)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
|
||||
atomic_long_read(&tunnel->stats.rx_errors)))
|
||||
goto nla_put_failure;
|
||||
nla_nest_end(skb, nest);
|
||||
|
||||
@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
|
||||
struct nlattr *nest;
|
||||
struct l2tp_tunnel *tunnel = session->tunnel;
|
||||
struct sock *sk = NULL;
|
||||
struct l2tp_stats stats;
|
||||
unsigned int start;
|
||||
|
||||
sk = tunnel->sock;
|
||||
|
||||
@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
|
||||
if (nest == NULL)
|
||||
goto nla_put_failure;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&session->stats.syncp);
|
||||
stats.tx_packets = session->stats.tx_packets;
|
||||
stats.tx_bytes = session->stats.tx_bytes;
|
||||
stats.tx_errors = session->stats.tx_errors;
|
||||
stats.rx_packets = session->stats.rx_packets;
|
||||
stats.rx_bytes = session->stats.rx_bytes;
|
||||
stats.rx_errors = session->stats.rx_errors;
|
||||
stats.rx_seq_discards = session->stats.rx_seq_discards;
|
||||
stats.rx_oos_packets = session->stats.rx_oos_packets;
|
||||
} while (u64_stats_fetch_retry(&session->stats.syncp, start));
|
||||
|
||||
if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
|
||||
if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
|
||||
atomic_long_read(&session->stats.tx_packets)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
|
||||
atomic_long_read(&session->stats.tx_bytes)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
|
||||
atomic_long_read(&session->stats.tx_errors)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
|
||||
atomic_long_read(&session->stats.rx_packets)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
|
||||
atomic_long_read(&session->stats.rx_bytes)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
|
||||
stats.rx_seq_discards) ||
|
||||
atomic_long_read(&session->stats.rx_seq_discards)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
|
||||
stats.rx_oos_packets) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
|
||||
atomic_long_read(&session->stats.rx_oos_packets)) ||
|
||||
nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
|
||||
atomic_long_read(&session->stats.rx_errors)))
|
||||
goto nla_put_failure;
|
||||
nla_nest_end(skb, nest);
|
||||
|
||||
|
@ -97,6 +97,7 @@
|
||||
#include <net/ip.h>
|
||||
#include <net/udp.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <net/inet_common.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/atomic.h>
|
||||
@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
|
||||
session->name);
|
||||
|
||||
/* Not bound. Nothing we can do, so discard. */
|
||||
session->stats.rx_errors++;
|
||||
atomic_long_inc(&session->stats.rx_errors);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
@ -447,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session)
|
||||
{
|
||||
struct pppol2tp_session *ps = l2tp_session_priv(session);
|
||||
struct sock *sk = ps->sock;
|
||||
struct sk_buff *skb;
|
||||
struct socket *sock = sk->sk_socket;
|
||||
|
||||
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
|
||||
|
||||
if (session->session_id == 0)
|
||||
goto out;
|
||||
|
||||
if (sk != NULL) {
|
||||
lock_sock(sk);
|
||||
|
||||
if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
|
||||
pppox_unbind_sock(sk);
|
||||
sk->sk_state = PPPOX_DEAD;
|
||||
sk->sk_state_change(sk);
|
||||
if (sock) {
|
||||
inet_shutdown(sock, 2);
|
||||
/* Don't let the session go away before our socket does */
|
||||
l2tp_session_inc_refcount(session);
|
||||
}
|
||||
|
||||
/* Purge any queued data */
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_queue_purge(&sk->sk_write_queue);
|
||||
while ((skb = skb_dequeue(&session->reorder_q))) {
|
||||
kfree_skb(skb);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
release_sock(sk);
|
||||
}
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
@ -483,19 +466,12 @@ static void pppol2tp_session_close(struct l2tp_session *session)
|
||||
*/
|
||||
static void pppol2tp_session_destruct(struct sock *sk)
|
||||
{
|
||||
struct l2tp_session *session;
|
||||
|
||||
if (sk->sk_user_data != NULL) {
|
||||
session = sk->sk_user_data;
|
||||
if (session == NULL)
|
||||
goto out;
|
||||
|
||||
struct l2tp_session *session = sk->sk_user_data;
|
||||
if (session) {
|
||||
sk->sk_user_data = NULL;
|
||||
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
|
||||
l2tp_session_dec_refcount(session);
|
||||
}
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
@ -525,16 +501,13 @@ static int pppol2tp_release(struct socket *sock)
|
||||
session = pppol2tp_sock_to_session(sk);
|
||||
|
||||
/* Purge any queued data */
|
||||
if (session != NULL) {
|
||||
__l2tp_session_unhash(session);
|
||||
l2tp_session_queue_purge(session);
|
||||
sock_put(sk);
|
||||
}
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_queue_purge(&sk->sk_write_queue);
|
||||
if (session != NULL) {
|
||||
struct sk_buff *skb;
|
||||
while ((skb = skb_dequeue(&session->reorder_q))) {
|
||||
kfree_skb(skb);
|
||||
sock_put(sk);
|
||||
}
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
release_sock(sk);
|
||||
|
||||
@ -880,18 +853,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Called when deleting sessions via the netlink interface.
|
||||
*/
|
||||
static int pppol2tp_session_delete(struct l2tp_session *session)
|
||||
{
|
||||
struct pppol2tp_session *ps = l2tp_session_priv(session);
|
||||
|
||||
if (ps->sock == NULL)
|
||||
l2tp_session_dec_refcount(session);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_L2TP_V3 */
|
||||
|
||||
/* getname() support.
|
||||
@ -1025,14 +986,14 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
|
||||
struct l2tp_stats *stats)
|
||||
{
|
||||
dest->tx_packets = stats->tx_packets;
|
||||
dest->tx_bytes = stats->tx_bytes;
|
||||
dest->tx_errors = stats->tx_errors;
|
||||
dest->rx_packets = stats->rx_packets;
|
||||
dest->rx_bytes = stats->rx_bytes;
|
||||
dest->rx_seq_discards = stats->rx_seq_discards;
|
||||
dest->rx_oos_packets = stats->rx_oos_packets;
|
||||
dest->rx_errors = stats->rx_errors;
|
||||
dest->tx_packets = atomic_long_read(&stats->tx_packets);
|
||||
dest->tx_bytes = atomic_long_read(&stats->tx_bytes);
|
||||
dest->tx_errors = atomic_long_read(&stats->tx_errors);
|
||||
dest->rx_packets = atomic_long_read(&stats->rx_packets);
|
||||
dest->rx_bytes = atomic_long_read(&stats->rx_bytes);
|
||||
dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards);
|
||||
dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets);
|
||||
dest->rx_errors = atomic_long_read(&stats->rx_errors);
|
||||
}
|
||||
|
||||
/* Session ioctl helper.
|
||||
@ -1666,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
|
||||
tunnel->name,
|
||||
(tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
|
||||
atomic_read(&tunnel->ref_count) - 1);
|
||||
seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
|
||||
seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",
|
||||
tunnel->debug,
|
||||
(unsigned long long)tunnel->stats.tx_packets,
|
||||
(unsigned long long)tunnel->stats.tx_bytes,
|
||||
(unsigned long long)tunnel->stats.tx_errors,
|
||||
(unsigned long long)tunnel->stats.rx_packets,
|
||||
(unsigned long long)tunnel->stats.rx_bytes,
|
||||
(unsigned long long)tunnel->stats.rx_errors);
|
||||
atomic_long_read(&tunnel->stats.tx_packets),
|
||||
atomic_long_read(&tunnel->stats.tx_bytes),
|
||||
atomic_long_read(&tunnel->stats.tx_errors),
|
||||
atomic_long_read(&tunnel->stats.rx_packets),
|
||||
atomic_long_read(&tunnel->stats.rx_bytes),
|
||||
atomic_long_read(&tunnel->stats.rx_errors));
|
||||
}
|
||||
|
||||
static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
|
||||
@ -1708,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
|
||||
session->lns_mode ? "LNS" : "LAC",
|
||||
session->debug,
|
||||
jiffies_to_msecs(session->reorder_timeout));
|
||||
seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
|
||||
seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n",
|
||||
session->nr, session->ns,
|
||||
(unsigned long long)session->stats.tx_packets,
|
||||
(unsigned long long)session->stats.tx_bytes,
|
||||
(unsigned long long)session->stats.tx_errors,
|
||||
(unsigned long long)session->stats.rx_packets,
|
||||
(unsigned long long)session->stats.rx_bytes,
|
||||
(unsigned long long)session->stats.rx_errors);
|
||||
atomic_long_read(&session->stats.tx_packets),
|
||||
atomic_long_read(&session->stats.tx_bytes),
|
||||
atomic_long_read(&session->stats.tx_errors),
|
||||
atomic_long_read(&session->stats.rx_packets),
|
||||
atomic_long_read(&session->stats.rx_bytes),
|
||||
atomic_long_read(&session->stats.rx_errors));
|
||||
|
||||
if (po)
|
||||
seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
|
||||
@ -1839,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = {
|
||||
|
||||
static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
|
||||
.session_create = pppol2tp_session_create,
|
||||
.session_delete = pppol2tp_session_delete,
|
||||
.session_delete = l2tp_session_delete,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_L2TP_V3 */
|
||||
|
@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
|
||||
skb_reset_network_header(skb);
|
||||
IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
|
||||
&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
|
||||
rcu_read_lock();
|
||||
ipv4_update_pmtu(skb, dev_net(skb->dev),
|
||||
mtu, 0, 0, 0, 0);
|
||||
rcu_read_unlock();
|
||||
/* Client uses PMTUD? */
|
||||
if (!(cih->frag_off & htons(IP_DF)))
|
||||
goto ignore_ipip;
|
||||
@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
|
||||
}
|
||||
/* ipvs enabled in this netns ? */
|
||||
net = skb_net(skb);
|
||||
if (!net_ipvs(net)->enable)
|
||||
ipvs = net_ipvs(net);
|
||||
if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
|
||||
return NF_ACCEPT;
|
||||
|
||||
ip_vs_fill_iph_skb(af, skb, &iph);
|
||||
@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
|
||||
}
|
||||
|
||||
IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
|
||||
ipvs = net_ipvs(net);
|
||||
/* Check the server status */
|
||||
if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
|
||||
/* the destination server is not available */
|
||||
@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
|
||||
{
|
||||
int r;
|
||||
struct net *net;
|
||||
struct netns_ipvs *ipvs;
|
||||
|
||||
if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
|
||||
return NF_ACCEPT;
|
||||
|
||||
/* ipvs enabled in this netns ? */
|
||||
net = skb_net(skb);
|
||||
if (!net_ipvs(net)->enable)
|
||||
ipvs = net_ipvs(net);
|
||||
if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
|
||||
return NF_ACCEPT;
|
||||
|
||||
return ip_vs_in_icmp(skb, &r, hooknum);
|
||||
@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
|
||||
{
|
||||
int r;
|
||||
struct net *net;
|
||||
struct netns_ipvs *ipvs;
|
||||
struct ip_vs_iphdr iphdr;
|
||||
|
||||
ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
|
||||
@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
|
||||
|
||||
/* ipvs enabled in this netns ? */
|
||||
net = skb_net(skb);
|
||||
if (!net_ipvs(net)->enable)
|
||||
ipvs = net_ipvs(net);
|
||||
if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
|
||||
return NF_ACCEPT;
|
||||
|
||||
return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
|
||||
|
@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "backup_only",
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#ifdef CONFIG_IP_VS_DEBUG
|
||||
{
|
||||
.procname = "debug_level",
|
||||
@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
|
||||
tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
|
||||
ipvs->sysctl_pmtu_disc = 1;
|
||||
tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
|
||||
tbl[idx++].data = &ipvs->sysctl_backup_only;
|
||||
|
||||
|
||||
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
|
||||
|
@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
|
||||
sctp_chunkhdr_t _sctpch, *sch;
|
||||
unsigned char chunk_type;
|
||||
int event, next_state;
|
||||
int ihl;
|
||||
int ihl, cofs;
|
||||
|
||||
#ifdef CONFIG_IP_VS_IPV6
|
||||
ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
|
||||
@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
|
||||
ihl = ip_hdrlen(skb);
|
||||
#endif
|
||||
|
||||
sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t),
|
||||
sizeof(_sctpch), &_sctpch);
|
||||
cofs = ihl + sizeof(sctp_sctphdr_t);
|
||||
sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch);
|
||||
if (sch == NULL)
|
||||
return;
|
||||
|
||||
@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
|
||||
*/
|
||||
if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
|
||||
(sch->type == SCTP_CID_COOKIE_ACK)) {
|
||||
sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) +
|
||||
sch->length), sizeof(_sctpch), &_sctpch);
|
||||
if (sch) {
|
||||
if (sch->type == SCTP_CID_ABORT)
|
||||
int clen = ntohs(sch->length);
|
||||
|
||||
if (clen >= sizeof(sctp_chunkhdr_t)) {
|
||||
sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4),
|
||||
sizeof(_sctpch), &_sctpch);
|
||||
if (sch && sch->type == SCTP_CID_ABORT)
|
||||
chunk_type = sch->type;
|
||||
}
|
||||
}
|
||||
|
@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_pernet_subsys(&dccp_net_ops);
|
||||
if (ret < 0)
|
||||
goto out_pernet;
|
||||
|
||||
ret = nf_ct_l4proto_register(&dccp_proto4);
|
||||
if (ret < 0)
|
||||
goto out_dccp4;
|
||||
@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void)
|
||||
if (ret < 0)
|
||||
goto out_dccp6;
|
||||
|
||||
ret = register_pernet_subsys(&dccp_net_ops);
|
||||
if (ret < 0)
|
||||
goto out_pernet;
|
||||
|
||||
return 0;
|
||||
out_pernet:
|
||||
nf_ct_l4proto_unregister(&dccp_proto6);
|
||||
out_dccp6:
|
||||
nf_ct_l4proto_unregister(&dccp_proto4);
|
||||
out_dccp4:
|
||||
unregister_pernet_subsys(&dccp_net_ops);
|
||||
out_pernet:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
|
||||
if (ret < 0)
|
||||
goto out_gre4;
|
||||
|
||||
ret = register_pernet_subsys(&proto_gre_net_ops);
|
||||
if (ret < 0)
|
||||
goto out_pernet;
|
||||
|
||||
ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
|
||||
if (ret < 0)
|
||||
goto out_gre4;
|
||||
|
||||
return 0;
|
||||
out_pernet:
|
||||
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4);
|
||||
out_gre4:
|
||||
unregister_pernet_subsys(&proto_gre_net_ops);
|
||||
out_pernet:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_pernet_subsys(&sctp_net_ops);
|
||||
if (ret < 0)
|
||||
goto out_pernet;
|
||||
|
||||
ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4);
|
||||
if (ret < 0)
|
||||
goto out_sctp4;
|
||||
@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void)
|
||||
if (ret < 0)
|
||||
goto out_sctp6;
|
||||
|
||||
ret = register_pernet_subsys(&sctp_net_ops);
|
||||
if (ret < 0)
|
||||
goto out_pernet;
|
||||
|
||||
return 0;
|
||||
out_pernet:
|
||||
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
|
||||
out_sctp6:
|
||||
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
|
||||
out_sctp4:
|
||||
unregister_pernet_subsys(&sctp_net_ops);
|
||||
out_pernet:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_pernet_subsys(&udplite_net_ops);
|
||||
if (ret < 0)
|
||||
goto out_pernet;
|
||||
|
||||
ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4);
|
||||
if (ret < 0)
|
||||
goto out_udplite4;
|
||||
@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void)
|
||||
if (ret < 0)
|
||||
goto out_udplite6;
|
||||
|
||||
ret = register_pernet_subsys(&udplite_net_ops);
|
||||
if (ret < 0)
|
||||
goto out_pernet;
|
||||
|
||||
return 0;
|
||||
out_pernet:
|
||||
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
|
||||
out_udplite6:
|
||||
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
|
||||
out_udplite4:
|
||||
unregister_pernet_subsys(&udplite_net_ops);
|
||||
out_pernet:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid)
|
||||
inst->queue_num = queue_num;
|
||||
inst->peer_portid = portid;
|
||||
inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
|
||||
inst->copy_range = 0xfffff;
|
||||
inst->copy_range = 0xffff;
|
||||
inst->copy_mode = NFQNL_COPY_NONE;
|
||||
spin_lock_init(&inst->lock);
|
||||
INIT_LIST_HEAD(&inst->queue_list);
|
||||
|
@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family,
|
||||
int err = 0;
|
||||
|
||||
BUG_ON(grp->name[0] == '\0');
|
||||
BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);
|
||||
|
||||
genl_lock();
|
||||
|
||||
|
@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int unix_release_sock(struct sock *sk, int embrion)
|
||||
static void unix_release_sock(struct sock *sk, int embrion)
|
||||
{
|
||||
struct unix_sock *u = unix_sk(sk);
|
||||
struct path path;
|
||||
@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
|
||||
|
||||
if (unix_tot_inflight)
|
||||
unix_gc(); /* Garbage collect fds */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void init_peercred(struct sock *sk)
|
||||
@ -699,9 +697,10 @@ static int unix_release(struct socket *sock)
|
||||
if (!sk)
|
||||
return 0;
|
||||
|
||||
unix_release_sock(sk, 0);
|
||||
sock->sk = NULL;
|
||||
|
||||
return unix_release_sock(sk, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int unix_autobind(struct socket *sock)
|
||||
@ -1413,8 +1412,8 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
|
||||
if (UNIXCB(skb).cred)
|
||||
return;
|
||||
if (test_bit(SOCK_PASSCRED, &sock->flags) ||
|
||||
!other->sk_socket ||
|
||||
test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
|
||||
(other->sk_socket &&
|
||||
test_bit(SOCK_PASSCRED, &other->sk_socket->flags))) {
|
||||
UNIXCB(skb).pid = get_pid(task_tgid(current));
|
||||
UNIXCB(skb).cred = get_current_cred();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user