mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-02-22 02:07:25 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
This commit is contained in:
commit
5a470b1a63
@ -78,6 +78,7 @@
|
|||||||
|
|
||||||
enum hns_desc_type {
|
enum hns_desc_type {
|
||||||
DESC_TYPE_SKB,
|
DESC_TYPE_SKB,
|
||||||
|
DESC_TYPE_FRAGLIST_SKB,
|
||||||
DESC_TYPE_PAGE,
|
DESC_TYPE_PAGE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1106,6 +1106,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
|||||||
if (unlikely(ret < 0))
|
if (unlikely(ret < 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
||||||
|
} else if (type == DESC_TYPE_FRAGLIST_SKB) {
|
||||||
|
struct sk_buff *skb = (struct sk_buff *)priv;
|
||||||
|
|
||||||
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
||||||
} else {
|
} else {
|
||||||
frag = (skb_frag_t *)priv;
|
frag = (skb_frag_t *)priv;
|
||||||
@ -1144,8 +1148,9 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
|||||||
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
|
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
|
||||||
desc_cb->priv = priv;
|
desc_cb->priv = priv;
|
||||||
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
|
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
|
||||||
desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
|
desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB ||
|
||||||
DESC_TYPE_SKB : DESC_TYPE_PAGE;
|
type == DESC_TYPE_SKB) && !k) ?
|
||||||
|
type : DESC_TYPE_PAGE;
|
||||||
|
|
||||||
/* now, fill the descriptor */
|
/* now, fill the descriptor */
|
||||||
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
|
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
|
||||||
@ -1354,7 +1359,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
|
|||||||
ring_ptr_move_bw(ring, next_to_use);
|
ring_ptr_move_bw(ring, next_to_use);
|
||||||
|
|
||||||
/* unmap the descriptor dma address */
|
/* unmap the descriptor dma address */
|
||||||
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
|
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
|
||||||
|
ring->desc_cb[ring->next_to_use].type ==
|
||||||
|
DESC_TYPE_FRAGLIST_SKB)
|
||||||
dma_unmap_single(dev,
|
dma_unmap_single(dev,
|
||||||
ring->desc_cb[ring->next_to_use].dma,
|
ring->desc_cb[ring->next_to_use].dma,
|
||||||
ring->desc_cb[ring->next_to_use].length,
|
ring->desc_cb[ring->next_to_use].length,
|
||||||
@ -1447,7 +1454,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
skb_walk_frags(skb, frag_skb) {
|
skb_walk_frags(skb, frag_skb) {
|
||||||
ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE);
|
ret = hns3_fill_skb_to_desc(ring, frag_skb,
|
||||||
|
DESC_TYPE_FRAGLIST_SKB);
|
||||||
if (unlikely(ret < 0))
|
if (unlikely(ret < 0))
|
||||||
goto fill_err;
|
goto fill_err;
|
||||||
|
|
||||||
@ -2356,7 +2364,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
|
|||||||
static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
|
static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
|
||||||
struct hns3_desc_cb *cb)
|
struct hns3_desc_cb *cb)
|
||||||
{
|
{
|
||||||
if (cb->type == DESC_TYPE_SKB)
|
if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB)
|
||||||
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
|
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
|
||||||
ring_to_dma_dir(ring));
|
ring_to_dma_dir(ring));
|
||||||
else if (cb->length)
|
else if (cb->length)
|
||||||
|
@ -6768,7 +6768,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
|
|||||||
struct hclge_dev *hdev = vport->back;
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
|
||||||
if (enable) {
|
if (enable) {
|
||||||
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
|
hclge_task_schedule(hdev, 0);
|
||||||
} else {
|
} else {
|
||||||
/* Set the DOWN flag here to disable link updating */
|
/* Set the DOWN flag here to disable link updating */
|
||||||
set_bit(HCLGE_STATE_DOWN, &hdev->state);
|
set_bit(HCLGE_STATE_DOWN, &hdev->state);
|
||||||
@ -8986,6 +8986,12 @@ static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
|
|||||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||||
struct hclge_dev *hdev = vport->back;
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
|
||||||
|
/* When nic is down, the service task is not running, doesn't update
|
||||||
|
* the port information per second. Query the port information before
|
||||||
|
* return the media type, ensure getting the correct media information.
|
||||||
|
*/
|
||||||
|
hclge_update_port_info(hdev);
|
||||||
|
|
||||||
if (media_type)
|
if (media_type)
|
||||||
*media_type = hdev->hw.mac.media_type;
|
*media_type = hdev->hw.mac.media_type;
|
||||||
|
|
||||||
@ -10674,7 +10680,7 @@ static int hclge_init(void)
|
|||||||
{
|
{
|
||||||
pr_info("%s is initializing\n", HCLGE_NAME);
|
pr_info("%s is initializing\n", HCLGE_NAME);
|
||||||
|
|
||||||
hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
|
hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
|
||||||
if (!hclge_wq) {
|
if (!hclge_wq) {
|
||||||
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
|
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -2149,49 +2149,50 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
|
static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
|
||||||
{
|
{
|
||||||
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
|
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
|
||||||
int ret;
|
struct hclgevf_rss_tuple_cfg *tuple_sets;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
|
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
|
||||||
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
|
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
|
||||||
|
tuple_sets = &rss_cfg->rss_tuple_sets;
|
||||||
if (hdev->pdev->revision >= 0x21) {
|
if (hdev->pdev->revision >= 0x21) {
|
||||||
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
|
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
|
||||||
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
|
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
|
||||||
HCLGEVF_RSS_KEY_SIZE);
|
HCLGEVF_RSS_KEY_SIZE);
|
||||||
|
|
||||||
ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
|
tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
||||||
rss_cfg->rss_hash_key);
|
tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
||||||
if (ret)
|
tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
|
||||||
return ret;
|
tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
||||||
|
tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
||||||
rss_cfg->rss_tuple_sets.ipv4_tcp_en =
|
tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
||||||
HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
|
||||||
rss_cfg->rss_tuple_sets.ipv4_udp_en =
|
tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
||||||
HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
|
||||||
rss_cfg->rss_tuple_sets.ipv4_sctp_en =
|
|
||||||
HCLGEVF_RSS_INPUT_TUPLE_SCTP;
|
|
||||||
rss_cfg->rss_tuple_sets.ipv4_fragment_en =
|
|
||||||
HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
|
||||||
rss_cfg->rss_tuple_sets.ipv6_tcp_en =
|
|
||||||
HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
|
||||||
rss_cfg->rss_tuple_sets.ipv6_udp_en =
|
|
||||||
HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
|
||||||
rss_cfg->rss_tuple_sets.ipv6_sctp_en =
|
|
||||||
HCLGEVF_RSS_INPUT_TUPLE_SCTP;
|
|
||||||
rss_cfg->rss_tuple_sets.ipv6_fragment_en =
|
|
||||||
HCLGEVF_RSS_INPUT_TUPLE_OTHER;
|
|
||||||
|
|
||||||
ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize RSS indirect table */
|
/* Initialize RSS indirect table */
|
||||||
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
|
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
|
||||||
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
|
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
|
||||||
|
{
|
||||||
|
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (hdev->pdev->revision >= 0x21) {
|
||||||
|
ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
|
||||||
|
rss_cfg->rss_hash_key);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
ret = hclgevf_set_rss_indir_table(hdev);
|
ret = hclgevf_set_rss_indir_table(hdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -2793,6 +2794,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
|||||||
goto err_config;
|
goto err_config;
|
||||||
|
|
||||||
/* Initialize RSS for this VF */
|
/* Initialize RSS for this VF */
|
||||||
|
hclgevf_rss_init_cfg(hdev);
|
||||||
ret = hclgevf_rss_init_hw(hdev);
|
ret = hclgevf_rss_init_hw(hdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
@ -2967,6 +2969,8 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
|
|||||||
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
|
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
|
||||||
rss_indir[i] = i % kinfo->rss_size;
|
rss_indir[i] = i % kinfo->rss_size;
|
||||||
|
|
||||||
|
hdev->rss_cfg.rss_size = kinfo->rss_size;
|
||||||
|
|
||||||
ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
|
ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
|
dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
|
||||||
@ -3220,7 +3224,7 @@ static int hclgevf_init(void)
|
|||||||
{
|
{
|
||||||
pr_info("%s is initializing\n", HCLGEVF_NAME);
|
pr_info("%s is initializing\n", HCLGEVF_NAME);
|
||||||
|
|
||||||
hclgevf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGEVF_NAME);
|
hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
|
||||||
if (!hclgevf_wq) {
|
if (!hclgevf_wq) {
|
||||||
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
|
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -3668,6 +3668,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
|
|||||||
|
|
||||||
skb_push(nskb, -skb_network_offset(nskb) + offset);
|
skb_push(nskb, -skb_network_offset(nskb) + offset);
|
||||||
|
|
||||||
|
skb_release_head_state(nskb);
|
||||||
__copy_skb_header(nskb, skb);
|
__copy_skb_header(nskb, skb);
|
||||||
|
|
||||||
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
|
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
|
||||||
|
@ -2572,6 +2572,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
|
|||||||
" %zd bytes, size of tnode: %zd bytes.\n",
|
" %zd bytes, size of tnode: %zd bytes.\n",
|
||||||
LEAF_SIZE, TNODE_SIZE(0));
|
LEAF_SIZE, TNODE_SIZE(0));
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
|
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
|
||||||
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
|
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
|
||||||
struct fib_table *tb;
|
struct fib_table *tb;
|
||||||
@ -2591,7 +2592,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
|
|||||||
trie_show_usage(seq, t->stats);
|
trie_show_usage(seq, t->stats);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
cond_resched_rcu();
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -142,11 +142,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
|
|||||||
cand = t;
|
cand = t;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & TUNNEL_NO_KEY)
|
|
||||||
goto skip_key_lookup;
|
|
||||||
|
|
||||||
hlist_for_each_entry_rcu(t, head, hash_node) {
|
hlist_for_each_entry_rcu(t, head, hash_node) {
|
||||||
if (t->parms.i_key != key ||
|
if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
|
||||||
t->parms.iph.saddr != 0 ||
|
t->parms.iph.saddr != 0 ||
|
||||||
t->parms.iph.daddr != 0 ||
|
t->parms.iph.daddr != 0 ||
|
||||||
!(t->dev->flags & IFF_UP))
|
!(t->dev->flags & IFF_UP))
|
||||||
@ -158,7 +155,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
|
|||||||
cand = t;
|
cand = t;
|
||||||
}
|
}
|
||||||
|
|
||||||
skip_key_lookup:
|
|
||||||
if (cand)
|
if (cand)
|
||||||
return cand;
|
return cand;
|
||||||
|
|
||||||
|
@ -453,6 +453,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
|
|||||||
unsigned int off = skb_gro_offset(skb);
|
unsigned int off = skb_gro_offset(skb);
|
||||||
int flush = 1;
|
int flush = 1;
|
||||||
|
|
||||||
|
NAPI_GRO_CB(skb)->is_flist = 0;
|
||||||
if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
|
if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
|
||||||
NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1;
|
NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1;
|
||||||
|
|
||||||
|
@ -3611,7 +3611,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
|
|||||||
* Drop unicast frames to unauthorised stations unless they are
|
* Drop unicast frames to unauthorised stations unless they are
|
||||||
* EAPOL frames from the local station.
|
* EAPOL frames from the local station.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!ieee80211_vif_is_mesh(&tx.sdata->vif) &&
|
if (unlikely(ieee80211_is_data(hdr->frame_control) &&
|
||||||
|
!ieee80211_vif_is_mesh(&tx.sdata->vif) &&
|
||||||
tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
|
tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
|
||||||
!is_multicast_ether_addr(hdr->addr1) &&
|
!is_multicast_ether_addr(hdr->addr1) &&
|
||||||
!test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
|
!test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
|
||||||
|
@ -228,7 +228,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
{
|
{
|
||||||
struct sctp_association *asoc = t->asoc;
|
struct sctp_association *asoc = t->asoc;
|
||||||
struct dst_entry *dst = NULL;
|
struct dst_entry *dst = NULL;
|
||||||
struct flowi6 *fl6 = &fl->u.ip6;
|
struct flowi _fl;
|
||||||
|
struct flowi6 *fl6 = &_fl.u.ip6;
|
||||||
struct sctp_bind_addr *bp;
|
struct sctp_bind_addr *bp;
|
||||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||||
struct sctp_sockaddr_entry *laddr;
|
struct sctp_sockaddr_entry *laddr;
|
||||||
@ -238,7 +239,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
enum sctp_scope scope;
|
enum sctp_scope scope;
|
||||||
__u8 matchlen = 0;
|
__u8 matchlen = 0;
|
||||||
|
|
||||||
memset(fl6, 0, sizeof(struct flowi6));
|
memset(&_fl, 0, sizeof(_fl));
|
||||||
fl6->daddr = daddr->v6.sin6_addr;
|
fl6->daddr = daddr->v6.sin6_addr;
|
||||||
fl6->fl6_dport = daddr->v6.sin6_port;
|
fl6->fl6_dport = daddr->v6.sin6_port;
|
||||||
fl6->flowi6_proto = IPPROTO_SCTP;
|
fl6->flowi6_proto = IPPROTO_SCTP;
|
||||||
@ -276,8 +277,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
|
||||||
if (!asoc || saddr)
|
if (!asoc || saddr) {
|
||||||
|
t->dst = dst;
|
||||||
|
memcpy(fl, &_fl, sizeof(_fl));
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
bp = &asoc->base.bind_addr;
|
bp = &asoc->base.bind_addr;
|
||||||
scope = sctp_scope(daddr);
|
scope = sctp_scope(daddr);
|
||||||
@ -300,6 +304,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
if ((laddr->a.sa.sa_family == AF_INET6) &&
|
if ((laddr->a.sa.sa_family == AF_INET6) &&
|
||||||
(sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
|
(sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
t->dst = dst;
|
||||||
|
memcpy(fl, &_fl, sizeof(_fl));
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -338,6 +344,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
if (!IS_ERR_OR_NULL(dst))
|
if (!IS_ERR_OR_NULL(dst))
|
||||||
dst_release(dst);
|
dst_release(dst);
|
||||||
dst = bdst;
|
dst = bdst;
|
||||||
|
t->dst = dst;
|
||||||
|
memcpy(fl, &_fl, sizeof(_fl));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -351,6 +359,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
dst_release(dst);
|
dst_release(dst);
|
||||||
dst = bdst;
|
dst = bdst;
|
||||||
matchlen = bmatchlen;
|
matchlen = bmatchlen;
|
||||||
|
t->dst = dst;
|
||||||
|
memcpy(fl, &_fl, sizeof(_fl));
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
@ -359,14 +369,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
struct rt6_info *rt;
|
struct rt6_info *rt;
|
||||||
|
|
||||||
rt = (struct rt6_info *)dst;
|
rt = (struct rt6_info *)dst;
|
||||||
t->dst = dst;
|
|
||||||
t->dst_cookie = rt6_get_cookie(rt);
|
t->dst_cookie = rt6_get_cookie(rt);
|
||||||
pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
|
pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
|
||||||
&rt->rt6i_dst.addr, rt->rt6i_dst.plen,
|
&rt->rt6i_dst.addr, rt->rt6i_dst.plen,
|
||||||
&fl6->saddr);
|
&fl->u.ip6.saddr);
|
||||||
} else {
|
} else {
|
||||||
t->dst = NULL;
|
t->dst = NULL;
|
||||||
|
|
||||||
pr_debug("no route\n");
|
pr_debug("no route\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -409,7 +409,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
{
|
{
|
||||||
struct sctp_association *asoc = t->asoc;
|
struct sctp_association *asoc = t->asoc;
|
||||||
struct rtable *rt;
|
struct rtable *rt;
|
||||||
struct flowi4 *fl4 = &fl->u.ip4;
|
struct flowi _fl;
|
||||||
|
struct flowi4 *fl4 = &_fl.u.ip4;
|
||||||
struct sctp_bind_addr *bp;
|
struct sctp_bind_addr *bp;
|
||||||
struct sctp_sockaddr_entry *laddr;
|
struct sctp_sockaddr_entry *laddr;
|
||||||
struct dst_entry *dst = NULL;
|
struct dst_entry *dst = NULL;
|
||||||
@ -419,7 +420,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
|
|
||||||
if (t->dscp & SCTP_DSCP_SET_MASK)
|
if (t->dscp & SCTP_DSCP_SET_MASK)
|
||||||
tos = t->dscp & SCTP_DSCP_VAL_MASK;
|
tos = t->dscp & SCTP_DSCP_VAL_MASK;
|
||||||
memset(fl4, 0x0, sizeof(struct flowi4));
|
memset(&_fl, 0x0, sizeof(_fl));
|
||||||
fl4->daddr = daddr->v4.sin_addr.s_addr;
|
fl4->daddr = daddr->v4.sin_addr.s_addr;
|
||||||
fl4->fl4_dport = daddr->v4.sin_port;
|
fl4->fl4_dport = daddr->v4.sin_port;
|
||||||
fl4->flowi4_proto = IPPROTO_SCTP;
|
fl4->flowi4_proto = IPPROTO_SCTP;
|
||||||
@ -438,8 +439,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
&fl4->saddr);
|
&fl4->saddr);
|
||||||
|
|
||||||
rt = ip_route_output_key(sock_net(sk), fl4);
|
rt = ip_route_output_key(sock_net(sk), fl4);
|
||||||
if (!IS_ERR(rt))
|
if (!IS_ERR(rt)) {
|
||||||
dst = &rt->dst;
|
dst = &rt->dst;
|
||||||
|
t->dst = dst;
|
||||||
|
memcpy(fl, &_fl, sizeof(_fl));
|
||||||
|
}
|
||||||
|
|
||||||
/* If there is no association or if a source address is passed, no
|
/* If there is no association or if a source address is passed, no
|
||||||
* more validation is required.
|
* more validation is required.
|
||||||
@ -502,27 +506,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||||||
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
|
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
|
||||||
false);
|
false);
|
||||||
if (!odev || odev->ifindex != fl4->flowi4_oif) {
|
if (!odev || odev->ifindex != fl4->flowi4_oif) {
|
||||||
if (!dst)
|
if (!dst) {
|
||||||
dst = &rt->dst;
|
dst = &rt->dst;
|
||||||
else
|
t->dst = dst;
|
||||||
|
memcpy(fl, &_fl, sizeof(_fl));
|
||||||
|
} else {
|
||||||
dst_release(&rt->dst);
|
dst_release(&rt->dst);
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
dst_release(dst);
|
dst_release(dst);
|
||||||
dst = &rt->dst;
|
dst = &rt->dst;
|
||||||
|
t->dst = dst;
|
||||||
|
memcpy(fl, &_fl, sizeof(_fl));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
out:
|
out:
|
||||||
t->dst = dst;
|
if (dst) {
|
||||||
if (dst)
|
|
||||||
pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
|
pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
|
||||||
&fl4->daddr, &fl4->saddr);
|
&fl->u.ip4.daddr, &fl->u.ip4.saddr);
|
||||||
else
|
} else {
|
||||||
|
t->dst = NULL;
|
||||||
pr_debug("no route\n");
|
pr_debug("no route\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* For v4, the source address is cached in the route entry(dst). So no need
|
/* For v4, the source address is cached in the route entry(dst). So no need
|
||||||
|
@ -147,29 +147,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk)
|
|||||||
skb_orphan(chunk->skb);
|
skb_orphan(chunk->skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define traverse_and_process() \
|
||||||
|
do { \
|
||||||
|
msg = chunk->msg; \
|
||||||
|
if (msg == prev_msg) \
|
||||||
|
continue; \
|
||||||
|
list_for_each_entry(c, &msg->chunks, frag_list) { \
|
||||||
|
if ((clear && asoc->base.sk == c->skb->sk) || \
|
||||||
|
(!clear && asoc->base.sk != c->skb->sk)) \
|
||||||
|
cb(c); \
|
||||||
|
} \
|
||||||
|
prev_msg = msg; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
|
static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
|
||||||
|
bool clear,
|
||||||
void (*cb)(struct sctp_chunk *))
|
void (*cb)(struct sctp_chunk *))
|
||||||
|
|
||||||
{
|
{
|
||||||
|
struct sctp_datamsg *msg, *prev_msg = NULL;
|
||||||
struct sctp_outq *q = &asoc->outqueue;
|
struct sctp_outq *q = &asoc->outqueue;
|
||||||
|
struct sctp_chunk *chunk, *c;
|
||||||
struct sctp_transport *t;
|
struct sctp_transport *t;
|
||||||
struct sctp_chunk *chunk;
|
|
||||||
|
|
||||||
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
|
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
|
||||||
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
|
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
|
||||||
cb(chunk);
|
traverse_and_process();
|
||||||
|
|
||||||
list_for_each_entry(chunk, &q->retransmit, transmitted_list)
|
list_for_each_entry(chunk, &q->retransmit, transmitted_list)
|
||||||
cb(chunk);
|
traverse_and_process();
|
||||||
|
|
||||||
list_for_each_entry(chunk, &q->sacked, transmitted_list)
|
list_for_each_entry(chunk, &q->sacked, transmitted_list)
|
||||||
cb(chunk);
|
traverse_and_process();
|
||||||
|
|
||||||
list_for_each_entry(chunk, &q->abandoned, transmitted_list)
|
list_for_each_entry(chunk, &q->abandoned, transmitted_list)
|
||||||
cb(chunk);
|
traverse_and_process();
|
||||||
|
|
||||||
list_for_each_entry(chunk, &q->out_chunk_list, list)
|
list_for_each_entry(chunk, &q->out_chunk_list, list)
|
||||||
cb(chunk);
|
traverse_and_process();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
|
static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
|
||||||
@ -9574,9 +9589,9 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|||||||
* paths won't try to lock it and then oldsk.
|
* paths won't try to lock it and then oldsk.
|
||||||
*/
|
*/
|
||||||
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
|
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
|
||||||
sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
|
sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w);
|
||||||
sctp_assoc_migrate(assoc, newsk);
|
sctp_assoc_migrate(assoc, newsk);
|
||||||
sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
|
sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w);
|
||||||
|
|
||||||
/* If the association on the newsk is already closed before accept()
|
/* If the association on the newsk is already closed before accept()
|
||||||
* is called, set RCV_SHUTDOWN flag.
|
* is called, set RCV_SHUTDOWN flag.
|
||||||
|
Loading…
Reference in New Issue
Block a user