mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 09:40:58 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/mellanox/mlx4/main.c net/packet/af_packet.c Both conflicts were cases of simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
3a07bd6fea
@ -237,7 +237,7 @@ config HISAX_MIC
|
|||||||
|
|
||||||
config HISAX_NETJET
|
config HISAX_NETJET
|
||||||
bool "NETjet card"
|
bool "NETjet card"
|
||||||
depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
|
depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN) || MICROBLAZE))
|
||||||
depends on VIRT_TO_BUS
|
depends on VIRT_TO_BUS
|
||||||
help
|
help
|
||||||
This enables HiSax support for the NetJet from Traverse
|
This enables HiSax support for the NetJet from Traverse
|
||||||
@ -249,7 +249,7 @@ config HISAX_NETJET
|
|||||||
|
|
||||||
config HISAX_NETJET_U
|
config HISAX_NETJET_U
|
||||||
bool "NETspider U card"
|
bool "NETspider U card"
|
||||||
depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
|
depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN) || MICROBLAZE))
|
||||||
depends on VIRT_TO_BUS
|
depends on VIRT_TO_BUS
|
||||||
help
|
help
|
||||||
This enables HiSax support for the Netspider U interface ISDN card
|
This enables HiSax support for the Netspider U interface ISDN card
|
||||||
|
@ -440,6 +440,9 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
|
|||||||
struct can_frame *cf = (struct can_frame *)skb->data;
|
struct can_frame *cf = (struct can_frame *)skb->data;
|
||||||
u8 dlc = cf->can_dlc;
|
u8 dlc = cf->can_dlc;
|
||||||
|
|
||||||
|
if (!(skb->tstamp.tv64))
|
||||||
|
__net_timestamp(skb);
|
||||||
|
|
||||||
netif_rx(priv->echo_skb[idx]);
|
netif_rx(priv->echo_skb[idx]);
|
||||||
priv->echo_skb[idx] = NULL;
|
priv->echo_skb[idx] = NULL;
|
||||||
|
|
||||||
@ -575,6 +578,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
|
|||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
__net_timestamp(skb);
|
||||||
skb->protocol = htons(ETH_P_CAN);
|
skb->protocol = htons(ETH_P_CAN);
|
||||||
skb->pkt_type = PACKET_BROADCAST;
|
skb->pkt_type = PACKET_BROADCAST;
|
||||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
@ -603,6 +607,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
|
|||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
__net_timestamp(skb);
|
||||||
skb->protocol = htons(ETH_P_CANFD);
|
skb->protocol = htons(ETH_P_CANFD);
|
||||||
skb->pkt_type = PACKET_BROADCAST;
|
skb->pkt_type = PACKET_BROADCAST;
|
||||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
|
@ -207,6 +207,7 @@ static void slc_bump(struct slcan *sl)
|
|||||||
if (!skb)
|
if (!skb)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
__net_timestamp(skb);
|
||||||
skb->dev = sl->dev;
|
skb->dev = sl->dev;
|
||||||
skb->protocol = htons(ETH_P_CAN);
|
skb->protocol = htons(ETH_P_CAN);
|
||||||
skb->pkt_type = PACKET_BROADCAST;
|
skb->pkt_type = PACKET_BROADCAST;
|
||||||
|
@ -78,6 +78,9 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
|
|||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
|
|
||||||
|
if (!(skb->tstamp.tv64))
|
||||||
|
__net_timestamp(skb);
|
||||||
|
|
||||||
netif_rx_ni(skb);
|
netif_rx_ni(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -487,6 +487,9 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
|
|||||||
{
|
{
|
||||||
struct sk_buff *new_skb;
|
struct sk_buff *new_skb;
|
||||||
|
|
||||||
|
if (skb_linearize(skb))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
/* Alloc new skb */
|
/* Alloc new skb */
|
||||||
new_skb = netdev_alloc_skb(dev, skb->len + 4);
|
new_skb = netdev_alloc_skb(dev, skb->len + 4);
|
||||||
if (!new_skb)
|
if (!new_skb)
|
||||||
@ -512,12 +515,27 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
cbd_t __iomem *bdp;
|
cbd_t __iomem *bdp;
|
||||||
int curidx;
|
int curidx;
|
||||||
u16 sc;
|
u16 sc;
|
||||||
int nr_frags = skb_shinfo(skb)->nr_frags;
|
int nr_frags;
|
||||||
skb_frag_t *frag;
|
skb_frag_t *frag;
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
#ifdef CONFIG_FS_ENET_MPC5121_FEC
|
#ifdef CONFIG_FS_ENET_MPC5121_FEC
|
||||||
if (((unsigned long)skb->data) & 0x3) {
|
int is_aligned = 1;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
|
||||||
|
is_aligned = 0;
|
||||||
|
} else {
|
||||||
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
|
frag = skb_shinfo(skb)->frags;
|
||||||
|
for (i = 0; i < nr_frags; i++, frag++) {
|
||||||
|
if (!IS_ALIGNED(frag->page_offset, 4)) {
|
||||||
|
is_aligned = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!is_aligned) {
|
||||||
skb = tx_skb_align_workaround(dev, skb);
|
skb = tx_skb_align_workaround(dev, skb);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
/*
|
/*
|
||||||
@ -529,6 +547,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
spin_lock(&fep->tx_lock);
|
spin_lock(&fep->tx_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -536,6 +555,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
*/
|
*/
|
||||||
bdp = fep->cur_tx;
|
bdp = fep->cur_tx;
|
||||||
|
|
||||||
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
|
if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
spin_unlock(&fep->tx_lock);
|
spin_unlock(&fep->tx_lock);
|
||||||
|
@ -1013,6 +1013,12 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
|
|||||||
val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
|
val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
|
||||||
val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
|
val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
|
||||||
mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
|
mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
|
||||||
|
} else {
|
||||||
|
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
|
||||||
|
val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
|
||||||
|
MVNETA_GMAC_AN_SPEED_EN |
|
||||||
|
MVNETA_GMAC_AN_DUPLEX_EN);
|
||||||
|
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
mvneta_set_ucast_table(pp, -1);
|
mvneta_set_ucast_table(pp, -1);
|
||||||
|
@ -479,6 +479,14 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
|
||||||
|
(port_type_array[0] == MLX4_PORT_TYPE_IB) &&
|
||||||
|
(port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
|
||||||
|
mlx4_warn(dev,
|
||||||
|
"Granular QoS per VF not supported with IB/Eth configuration\n");
|
||||||
|
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
|
||||||
|
}
|
||||||
|
|
||||||
dev->caps.max_counters = dev_cap->max_counters;
|
dev->caps.max_counters = dev_cap->max_counters;
|
||||||
|
|
||||||
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
|
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
|
||||||
|
@ -5297,7 +5297,7 @@ static int __init rocker_module_init(void)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_pci_register_driver:
|
err_pci_register_driver:
|
||||||
unregister_netdevice_notifier(&rocker_netevent_nb);
|
unregister_netevent_notifier(&rocker_netevent_nb);
|
||||||
unregister_netdevice_notifier(&rocker_netdevice_nb);
|
unregister_netdevice_notifier(&rocker_netdevice_nb);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -158,6 +158,8 @@ struct dma_desc {
|
|||||||
u32 buffer2_size:13;
|
u32 buffer2_size:13;
|
||||||
u32 reserved4:3;
|
u32 reserved4:3;
|
||||||
} etx; /* -- enhanced -- */
|
} etx; /* -- enhanced -- */
|
||||||
|
|
||||||
|
u64 all_flags;
|
||||||
} des01;
|
} des01;
|
||||||
unsigned int des2;
|
unsigned int des2;
|
||||||
unsigned int des3;
|
unsigned int des3;
|
||||||
|
@ -240,6 +240,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
|
|||||||
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
||||||
int mode, int end)
|
int mode, int end)
|
||||||
{
|
{
|
||||||
|
p->des01.all_flags = 0;
|
||||||
p->des01.erx.own = 1;
|
p->des01.erx.own = 1;
|
||||||
p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
|
p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
|
||||||
|
|
||||||
@ -254,7 +255,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
|||||||
|
|
||||||
static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
|
static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
|
||||||
{
|
{
|
||||||
p->des01.etx.own = 0;
|
p->des01.all_flags = 0;
|
||||||
if (mode == STMMAC_CHAIN_MODE)
|
if (mode == STMMAC_CHAIN_MODE)
|
||||||
ehn_desc_tx_set_on_chain(p, end);
|
ehn_desc_tx_set_on_chain(p, end);
|
||||||
else
|
else
|
||||||
|
@ -123,6 +123,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
|
|||||||
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
|
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
|
||||||
int end)
|
int end)
|
||||||
{
|
{
|
||||||
|
p->des01.all_flags = 0;
|
||||||
p->des01.rx.own = 1;
|
p->des01.rx.own = 1;
|
||||||
p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
|
p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
|
||||||
|
|
||||||
@ -137,7 +138,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
|
|||||||
|
|
||||||
static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
|
static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
|
||||||
{
|
{
|
||||||
p->des01.tx.own = 0;
|
p->des01.all_flags = 0;
|
||||||
if (mode == STMMAC_CHAIN_MODE)
|
if (mode == STMMAC_CHAIN_MODE)
|
||||||
ndesc_tx_set_on_chain(p, end);
|
ndesc_tx_set_on_chain(p, end);
|
||||||
else
|
else
|
||||||
|
@ -1195,7 +1195,7 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
|
|||||||
goto err_tx_skbuff;
|
goto err_tx_skbuff;
|
||||||
|
|
||||||
if (priv->extend_desc) {
|
if (priv->extend_desc) {
|
||||||
priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
|
priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
|
||||||
sizeof(struct
|
sizeof(struct
|
||||||
dma_extended_desc),
|
dma_extended_desc),
|
||||||
&priv->dma_rx_phy,
|
&priv->dma_rx_phy,
|
||||||
@ -1203,7 +1203,7 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
|
|||||||
if (!priv->dma_erx)
|
if (!priv->dma_erx)
|
||||||
goto err_dma;
|
goto err_dma;
|
||||||
|
|
||||||
priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
|
priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
|
||||||
sizeof(struct
|
sizeof(struct
|
||||||
dma_extended_desc),
|
dma_extended_desc),
|
||||||
&priv->dma_tx_phy,
|
&priv->dma_tx_phy,
|
||||||
@ -1215,14 +1215,14 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
|
|||||||
goto err_dma;
|
goto err_dma;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
|
priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
|
||||||
sizeof(struct dma_desc),
|
sizeof(struct dma_desc),
|
||||||
&priv->dma_rx_phy,
|
&priv->dma_rx_phy,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!priv->dma_rx)
|
if (!priv->dma_rx)
|
||||||
goto err_dma;
|
goto err_dma;
|
||||||
|
|
||||||
priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
|
priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
|
||||||
sizeof(struct dma_desc),
|
sizeof(struct dma_desc),
|
||||||
&priv->dma_tx_phy,
|
&priv->dma_tx_phy,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
@ -681,6 +681,9 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
|
|||||||
char *node;
|
char *node;
|
||||||
unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
|
unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
|
||||||
|
|
||||||
|
if (vif->credit_watch.node)
|
||||||
|
return -EADDRINUSE;
|
||||||
|
|
||||||
node = kmalloc(maxlen, GFP_KERNEL);
|
node = kmalloc(maxlen, GFP_KERNEL);
|
||||||
if (!node)
|
if (!node)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -770,6 +773,7 @@ static void connect(struct backend_info *be)
|
|||||||
}
|
}
|
||||||
|
|
||||||
xen_net_read_rate(dev, &credit_bytes, &credit_usec);
|
xen_net_read_rate(dev, &credit_bytes, &credit_usec);
|
||||||
|
xen_unregister_watchers(be->vif);
|
||||||
xen_register_watchers(dev, be->vif);
|
xen_register_watchers(dev, be->vif);
|
||||||
read_xenbus_vif_flags(be);
|
read_xenbus_vif_flags(be);
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@ struct netns_sctp {
|
|||||||
struct list_head addr_waitq;
|
struct list_head addr_waitq;
|
||||||
struct timer_list addr_wq_timer;
|
struct timer_list addr_wq_timer;
|
||||||
struct list_head auto_asconf_splist;
|
struct list_head auto_asconf_splist;
|
||||||
|
/* Lock that protects both addr_waitq and auto_asconf_splist */
|
||||||
spinlock_t addr_wq_lock;
|
spinlock_t addr_wq_lock;
|
||||||
|
|
||||||
/* Lock that protects the local_addr_list writers */
|
/* Lock that protects the local_addr_list writers */
|
||||||
|
@ -223,6 +223,10 @@ struct sctp_sock {
|
|||||||
atomic_t pd_mode;
|
atomic_t pd_mode;
|
||||||
/* Receive to here while partial delivery is in effect. */
|
/* Receive to here while partial delivery is in effect. */
|
||||||
struct sk_buff_head pd_lobby;
|
struct sk_buff_head pd_lobby;
|
||||||
|
|
||||||
|
/* These must be the last fields, as they will skipped on copies,
|
||||||
|
* like on accept and peeloff operations
|
||||||
|
*/
|
||||||
struct list_head auto_asconf_list;
|
struct list_head auto_asconf_list;
|
||||||
int do_auto_asconf;
|
int do_auto_asconf;
|
||||||
};
|
};
|
||||||
|
@ -247,9 +247,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|||||||
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
|
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
spin_lock_bh(&br->lock);
|
|
||||||
br_stp_set_bridge_priority(br, args[1]);
|
br_stp_set_bridge_priority(br, args[1]);
|
||||||
spin_unlock_bh(&br->lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
case BRCTL_SET_PORT_PRIORITY:
|
case BRCTL_SET_PORT_PRIORITY:
|
||||||
|
@ -37,6 +37,8 @@
|
|||||||
|
|
||||||
static void br_multicast_start_querier(struct net_bridge *br,
|
static void br_multicast_start_querier(struct net_bridge *br,
|
||||||
struct bridge_mcast_own_query *query);
|
struct bridge_mcast_own_query *query);
|
||||||
|
static void br_multicast_add_router(struct net_bridge *br,
|
||||||
|
struct net_bridge_port *port);
|
||||||
unsigned int br_mdb_rehash_seq;
|
unsigned int br_mdb_rehash_seq;
|
||||||
|
|
||||||
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
|
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
|
||||||
@ -936,6 +938,8 @@ void br_multicast_enable_port(struct net_bridge_port *port)
|
|||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
br_multicast_enable(&port->ip6_own_query);
|
br_multicast_enable(&port->ip6_own_query);
|
||||||
#endif
|
#endif
|
||||||
|
if (port->multicast_router == 2 && hlist_unhashed(&port->rlist))
|
||||||
|
br_multicast_add_router(br, port);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&br->multicast_lock);
|
spin_unlock(&br->multicast_lock);
|
||||||
|
@ -243,12 +243,13 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* called under bridge lock */
|
/* Acquires and releases bridge lock */
|
||||||
void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
|
void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
|
||||||
{
|
{
|
||||||
struct net_bridge_port *p;
|
struct net_bridge_port *p;
|
||||||
int wasroot;
|
int wasroot;
|
||||||
|
|
||||||
|
spin_lock_bh(&br->lock);
|
||||||
wasroot = br_is_root_bridge(br);
|
wasroot = br_is_root_bridge(br);
|
||||||
|
|
||||||
list_for_each_entry(p, &br->port_list, list) {
|
list_for_each_entry(p, &br->port_list, list) {
|
||||||
@ -266,6 +267,7 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
|
|||||||
br_port_state_selection(br);
|
br_port_state_selection(br);
|
||||||
if (br_is_root_bridge(br) && !wasroot)
|
if (br_is_root_bridge(br) && !wasroot)
|
||||||
br_become_root_bridge(br);
|
br_become_root_bridge(br);
|
||||||
|
spin_unlock_bh(&br->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* called under bridge lock */
|
/* called under bridge lock */
|
||||||
|
@ -310,8 +310,12 @@ int can_send(struct sk_buff *skb, int loop)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (newskb)
|
if (newskb) {
|
||||||
|
if (!(newskb->tstamp.tv64))
|
||||||
|
__net_timestamp(newskb);
|
||||||
|
|
||||||
netif_rx_ni(newskb);
|
netif_rx_ni(newskb);
|
||||||
|
}
|
||||||
|
|
||||||
/* update statistics */
|
/* update statistics */
|
||||||
can_stats.tx_frames++;
|
can_stats.tx_frames++;
|
||||||
|
@ -958,6 +958,8 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
|
|||||||
rc = 0;
|
rc = 0;
|
||||||
if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
|
if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
|
||||||
goto out_unlock_bh;
|
goto out_unlock_bh;
|
||||||
|
if (neigh->dead)
|
||||||
|
goto out_dead;
|
||||||
|
|
||||||
if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
|
if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
|
||||||
if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
|
if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
|
||||||
@ -1014,6 +1016,13 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
|
|||||||
write_unlock(&neigh->lock);
|
write_unlock(&neigh->lock);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
out_dead:
|
||||||
|
if (neigh->nud_state & NUD_STALE)
|
||||||
|
goto out_unlock_bh;
|
||||||
|
write_unlock_bh(&neigh->lock);
|
||||||
|
kfree_skb(skb);
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__neigh_event_send);
|
EXPORT_SYMBOL(__neigh_event_send);
|
||||||
|
|
||||||
@ -1077,6 +1086,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
|
|||||||
if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
|
if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
|
||||||
(old & (NUD_NOARP | NUD_PERMANENT)))
|
(old & (NUD_NOARP | NUD_PERMANENT)))
|
||||||
goto out;
|
goto out;
|
||||||
|
if (neigh->dead)
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (!(new & NUD_VALID)) {
|
if (!(new & NUD_VALID)) {
|
||||||
neigh_del_timer(neigh);
|
neigh_del_timer(neigh);
|
||||||
@ -1228,6 +1239,8 @@ EXPORT_SYMBOL(neigh_update);
|
|||||||
*/
|
*/
|
||||||
void __neigh_set_probe_once(struct neighbour *neigh)
|
void __neigh_set_probe_once(struct neighbour *neigh)
|
||||||
{
|
{
|
||||||
|
if (neigh->dead)
|
||||||
|
return;
|
||||||
neigh->updated = jiffies;
|
neigh->updated = jiffies;
|
||||||
if (!(neigh->nud_state & NUD_FAILED))
|
if (!(neigh->nud_state & NUD_FAILED))
|
||||||
return;
|
return;
|
||||||
|
@ -228,6 +228,8 @@ int inet_listen(struct socket *sock, int backlog)
|
|||||||
err = 0;
|
err = 0;
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
tcp_fastopen_init_key_once(true);
|
||||||
}
|
}
|
||||||
err = inet_csk_listen_start(sk, backlog);
|
err = inet_csk_listen_start(sk, backlog);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -432,6 +432,15 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
|
|||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* For some errors we have valid addr_offset even with zero payload and
|
||||||
|
* zero port. Also, addr_offset should be supported if port is set.
|
||||||
|
*/
|
||||||
|
static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
|
||||||
|
{
|
||||||
|
return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
|
||||||
|
serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
|
||||||
|
}
|
||||||
|
|
||||||
/* IPv4 supports cmsg on all imcp errors and some timestamps
|
/* IPv4 supports cmsg on all imcp errors and some timestamps
|
||||||
*
|
*
|
||||||
* Timestamp code paths do not initialize the fields expected by cmsg:
|
* Timestamp code paths do not initialize the fields expected by cmsg:
|
||||||
@ -498,7 +507,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
|
|||||||
|
|
||||||
serr = SKB_EXT_ERR(skb);
|
serr = SKB_EXT_ERR(skb);
|
||||||
|
|
||||||
if (sin && serr->port) {
|
if (sin && ipv4_datagram_support_addr(serr)) {
|
||||||
sin->sin_family = AF_INET;
|
sin->sin_family = AF_INET;
|
||||||
sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
|
sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
|
||||||
serr->addr_offset);
|
serr->addr_offset);
|
||||||
|
@ -2573,10 +2573,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
|||||||
|
|
||||||
case TCP_FASTOPEN:
|
case TCP_FASTOPEN:
|
||||||
if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
|
if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
|
||||||
TCPF_LISTEN)))
|
TCPF_LISTEN))) {
|
||||||
|
tcp_fastopen_init_key_once(true);
|
||||||
|
|
||||||
err = fastopen_init_queue(sk, val);
|
err = fastopen_init_queue(sk, val);
|
||||||
else
|
} else {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case TCP_TIMESTAMP:
|
case TCP_TIMESTAMP:
|
||||||
if (!tp->repair)
|
if (!tp->repair)
|
||||||
|
@ -78,8 +78,6 @@ static bool __tcp_fastopen_cookie_gen(const void *path,
|
|||||||
struct tcp_fastopen_context *ctx;
|
struct tcp_fastopen_context *ctx;
|
||||||
bool ok = false;
|
bool ok = false;
|
||||||
|
|
||||||
tcp_fastopen_init_key_once(true);
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
ctx = rcu_dereference(tcp_fastopen_ctx);
|
ctx = rcu_dereference(tcp_fastopen_ctx);
|
||||||
if (ctx) {
|
if (ctx) {
|
||||||
|
@ -325,6 +325,16 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
|
|||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* For some errors we have valid addr_offset even with zero payload and
|
||||||
|
* zero port. Also, addr_offset should be supported if port is set.
|
||||||
|
*/
|
||||||
|
static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
|
||||||
|
{
|
||||||
|
return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
|
||||||
|
serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
|
||||||
|
serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
|
||||||
|
}
|
||||||
|
|
||||||
/* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
|
/* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
|
||||||
*
|
*
|
||||||
* At one point, excluding local errors was a quick test to identify icmp/icmp6
|
* At one point, excluding local errors was a quick test to identify icmp/icmp6
|
||||||
@ -389,7 +399,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
|
|||||||
|
|
||||||
serr = SKB_EXT_ERR(skb);
|
serr = SKB_EXT_ERR(skb);
|
||||||
|
|
||||||
if (sin && serr->port) {
|
if (sin && ipv6_datagram_support_addr(serr)) {
|
||||||
const unsigned char *nh = skb_network_header(skb);
|
const unsigned char *nh = skb_network_header(skb);
|
||||||
sin->sin6_family = AF_INET6;
|
sin->sin6_family = AF_INET6;
|
||||||
sin->sin6_flowinfo = 0;
|
sin->sin6_flowinfo = 0;
|
||||||
|
@ -66,12 +66,15 @@ update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
|
|||||||
if (sdata->vif.type != NL80211_IFTYPE_AP)
|
if (sdata->vif.type != NL80211_IFTYPE_AP)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&sdata->local->mtx);
|
/* crypto_tx_tailroom_needed_cnt is protected by this */
|
||||||
|
assert_key_lock(sdata->local);
|
||||||
|
|
||||||
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
|
rcu_read_lock();
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list)
|
||||||
vlan->crypto_tx_tailroom_needed_cnt += delta;
|
vlan->crypto_tx_tailroom_needed_cnt += delta;
|
||||||
|
|
||||||
mutex_unlock(&sdata->local->mtx);
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
|
static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
|
||||||
@ -95,6 +98,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
|
|||||||
* http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
|
* http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
assert_key_lock(sdata->local);
|
||||||
|
|
||||||
update_vlan_tailroom_need_count(sdata, 1);
|
update_vlan_tailroom_need_count(sdata, 1);
|
||||||
|
|
||||||
if (!sdata->crypto_tx_tailroom_needed_cnt++) {
|
if (!sdata->crypto_tx_tailroom_needed_cnt++) {
|
||||||
@ -109,6 +114,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
|
|||||||
static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
|
static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
|
||||||
int delta)
|
int delta)
|
||||||
{
|
{
|
||||||
|
assert_key_lock(sdata->local);
|
||||||
|
|
||||||
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
|
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
|
||||||
|
|
||||||
update_vlan_tailroom_need_count(sdata, -delta);
|
update_vlan_tailroom_need_count(sdata, -delta);
|
||||||
|
@ -1322,16 +1322,6 @@ static void packet_sock_destruct(struct sock *sk)
|
|||||||
sk_refcnt_debug_dec(sk);
|
sk_refcnt_debug_dec(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
|
|
||||||
{
|
|
||||||
int x = atomic_read(&f->rr_cur) + 1;
|
|
||||||
|
|
||||||
if (x >= num)
|
|
||||||
x = 0;
|
|
||||||
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
|
static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
u32 rxhash;
|
u32 rxhash;
|
||||||
@ -1357,13 +1347,9 @@ static unsigned int fanout_demux_lb(struct packet_fanout *f,
|
|||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
unsigned int num)
|
unsigned int num)
|
||||||
{
|
{
|
||||||
int cur, old;
|
unsigned int val = atomic_inc_return(&f->rr_cur);
|
||||||
|
|
||||||
cur = atomic_read(&f->rr_cur);
|
return val % num;
|
||||||
while ((old = atomic_cmpxchg(&f->rr_cur, cur,
|
|
||||||
fanout_rr_next(f, num))) != cur)
|
|
||||||
cur = old;
|
|
||||||
return cur;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int fanout_demux_cpu(struct packet_fanout *f,
|
static unsigned int fanout_demux_cpu(struct packet_fanout *f,
|
||||||
@ -1435,7 +1421,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
|
|||||||
struct packet_type *pt, struct net_device *orig_dev)
|
struct packet_type *pt, struct net_device *orig_dev)
|
||||||
{
|
{
|
||||||
struct packet_fanout *f = pt->af_packet_priv;
|
struct packet_fanout *f = pt->af_packet_priv;
|
||||||
unsigned int num = f->num_members;
|
unsigned int num = READ_ONCE(f->num_members);
|
||||||
struct packet_sock *po;
|
struct packet_sock *po;
|
||||||
unsigned int idx;
|
unsigned int idx;
|
||||||
|
|
||||||
|
@ -192,6 +192,7 @@ static void rose_kill_by_device(struct net_device *dev)
|
|||||||
|
|
||||||
if (rose->device == dev) {
|
if (rose->device == dev) {
|
||||||
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
|
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
|
||||||
|
if (rose->neighbour)
|
||||||
rose->neighbour->use--;
|
rose->neighbour->use--;
|
||||||
rose->device = NULL;
|
rose->device = NULL;
|
||||||
}
|
}
|
||||||
|
@ -1528,8 +1528,10 @@ static void sctp_close(struct sock *sk, long timeout)
|
|||||||
|
|
||||||
/* Supposedly, no process has access to the socket, but
|
/* Supposedly, no process has access to the socket, but
|
||||||
* the net layers still may.
|
* the net layers still may.
|
||||||
|
* Also, sctp_destroy_sock() needs to be called with addr_wq_lock
|
||||||
|
* held and that should be grabbed before socket lock.
|
||||||
*/
|
*/
|
||||||
local_bh_disable();
|
spin_lock_bh(&net->sctp.addr_wq_lock);
|
||||||
bh_lock_sock(sk);
|
bh_lock_sock(sk);
|
||||||
|
|
||||||
/* Hold the sock, since sk_common_release() will put sock_put()
|
/* Hold the sock, since sk_common_release() will put sock_put()
|
||||||
@ -1539,7 +1541,7 @@ static void sctp_close(struct sock *sk, long timeout)
|
|||||||
sk_common_release(sk);
|
sk_common_release(sk);
|
||||||
|
|
||||||
bh_unlock_sock(sk);
|
bh_unlock_sock(sk);
|
||||||
local_bh_enable();
|
spin_unlock_bh(&net->sctp.addr_wq_lock);
|
||||||
|
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
|
|
||||||
@ -3580,6 +3582,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
|
|||||||
if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
|
if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
|
||||||
if (val == 0 && sp->do_auto_asconf) {
|
if (val == 0 && sp->do_auto_asconf) {
|
||||||
list_del(&sp->auto_asconf_list);
|
list_del(&sp->auto_asconf_list);
|
||||||
sp->do_auto_asconf = 0;
|
sp->do_auto_asconf = 0;
|
||||||
@ -3588,6 +3591,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
|
|||||||
&sock_net(sk)->sctp.auto_asconf_splist);
|
&sock_net(sk)->sctp.auto_asconf_splist);
|
||||||
sp->do_auto_asconf = 1;
|
sp->do_auto_asconf = 1;
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4121,18 +4125,28 @@ static int sctp_init_sock(struct sock *sk)
|
|||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
percpu_counter_inc(&sctp_sockets_allocated);
|
percpu_counter_inc(&sctp_sockets_allocated);
|
||||||
sock_prot_inuse_add(net, sk->sk_prot, 1);
|
sock_prot_inuse_add(net, sk->sk_prot, 1);
|
||||||
|
|
||||||
|
/* Nothing can fail after this block, otherwise
|
||||||
|
* sctp_destroy_sock() will be called without addr_wq_lock held
|
||||||
|
*/
|
||||||
if (net->sctp.default_auto_asconf) {
|
if (net->sctp.default_auto_asconf) {
|
||||||
|
spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
|
||||||
list_add_tail(&sp->auto_asconf_list,
|
list_add_tail(&sp->auto_asconf_list,
|
||||||
&net->sctp.auto_asconf_splist);
|
&net->sctp.auto_asconf_splist);
|
||||||
sp->do_auto_asconf = 1;
|
sp->do_auto_asconf = 1;
|
||||||
} else
|
spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
|
||||||
|
} else {
|
||||||
sp->do_auto_asconf = 0;
|
sp->do_auto_asconf = 0;
|
||||||
|
}
|
||||||
|
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Cleanup any SCTP per socket resources. */
|
/* Cleanup any SCTP per socket resources. Must be called with
|
||||||
|
* sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
|
||||||
|
*/
|
||||||
static void sctp_destroy_sock(struct sock *sk)
|
static void sctp_destroy_sock(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct sctp_sock *sp;
|
struct sctp_sock *sp;
|
||||||
@ -7195,6 +7209,19 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
|
|||||||
newinet->mc_list = NULL;
|
newinet->mc_list = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void sctp_copy_descendant(struct sock *sk_to,
|
||||||
|
const struct sock *sk_from)
|
||||||
|
{
|
||||||
|
int ancestor_size = sizeof(struct inet_sock) +
|
||||||
|
sizeof(struct sctp_sock) -
|
||||||
|
offsetof(struct sctp_sock, auto_asconf_list);
|
||||||
|
|
||||||
|
if (sk_from->sk_family == PF_INET6)
|
||||||
|
ancestor_size += sizeof(struct ipv6_pinfo);
|
||||||
|
|
||||||
|
__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
|
||||||
|
}
|
||||||
|
|
||||||
/* Populate the fields of the newsk from the oldsk and migrate the assoc
|
/* Populate the fields of the newsk from the oldsk and migrate the assoc
|
||||||
* and its messages to the newsk.
|
* and its messages to the newsk.
|
||||||
*/
|
*/
|
||||||
@ -7209,7 +7236,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|||||||
struct sk_buff *skb, *tmp;
|
struct sk_buff *skb, *tmp;
|
||||||
struct sctp_ulpevent *event;
|
struct sctp_ulpevent *event;
|
||||||
struct sctp_bind_hashbucket *head;
|
struct sctp_bind_hashbucket *head;
|
||||||
struct list_head tmplist;
|
|
||||||
|
|
||||||
/* Migrate socket buffer sizes and all the socket level options to the
|
/* Migrate socket buffer sizes and all the socket level options to the
|
||||||
* new socket.
|
* new socket.
|
||||||
@ -7217,12 +7243,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|||||||
newsk->sk_sndbuf = oldsk->sk_sndbuf;
|
newsk->sk_sndbuf = oldsk->sk_sndbuf;
|
||||||
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
|
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
|
||||||
/* Brute force copy old sctp opt. */
|
/* Brute force copy old sctp opt. */
|
||||||
if (oldsp->do_auto_asconf) {
|
sctp_copy_descendant(newsk, oldsk);
|
||||||
memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
|
|
||||||
inet_sk_copy_descendant(newsk, oldsk);
|
|
||||||
memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
|
|
||||||
} else
|
|
||||||
inet_sk_copy_descendant(newsk, oldsk);
|
|
||||||
|
|
||||||
/* Restore the ep value that was overwritten with the above structure
|
/* Restore the ep value that was overwritten with the above structure
|
||||||
* copy.
|
* copy.
|
||||||
|
Loading…
Reference in New Issue
Block a user