mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 01:36:44 +07:00
net: vlan: add protocol argument to packet tagging functions
Add a protocol argument to the VLAN packet tagging functions. In case of HW tagging, we need that protocol available in the ndo_start_xmit functions, so it is stored in a new field in the skb. The new field fits into a hole (on 64 bit) and doesn't increase the sks's size. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1fd9b1fc31
commit
86a9bad3ab
@ -2948,7 +2948,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
|
|||||||
nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
|
nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
|
||||||
nesvnic->netdev->name, vlan_tag);
|
nesvnic->netdev->name, vlan_tag);
|
||||||
|
|
||||||
__vlan_hwaccel_put_tag(rx_skb, vlan_tag);
|
__vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag);
|
||||||
}
|
}
|
||||||
if (nes_use_lro)
|
if (nes_use_lro)
|
||||||
lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
|
lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
|
||||||
|
@ -514,7 +514,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
|
|||||||
skb->dev = client_info->slave->dev;
|
skb->dev = client_info->slave->dev;
|
||||||
|
|
||||||
if (client_info->tag) {
|
if (client_info->tag) {
|
||||||
skb = vlan_put_tag(skb, client_info->vlan_id);
|
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
pr_err("%s: Error: failed to insert VLAN tag\n",
|
pr_err("%s: Error: failed to insert VLAN tag\n",
|
||||||
client_info->slave->bond->dev->name);
|
client_info->slave->bond->dev->name);
|
||||||
@ -1014,7 +1014,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = vlan_put_tag(skb, vlan->vlan_id);
|
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan->vlan_id);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
pr_err("%s: Error: failed to insert VLAN tag\n",
|
pr_err("%s: Error: failed to insert VLAN tag\n",
|
||||||
bond->dev->name);
|
bond->dev->name);
|
||||||
|
@ -1690,7 +1690,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
|
|||||||
skb_checksum_none_assert(new_skb);
|
skb_checksum_none_assert(new_skb);
|
||||||
|
|
||||||
if (rx->rxStatus & TYPHOON_RX_VLAN)
|
if (rx->rxStatus & TYPHOON_RX_VLAN)
|
||||||
__vlan_hwaccel_put_tag(new_skb,
|
__vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
|
||||||
ntohl(rx->vlanTag) & 0xffff);
|
ntohl(rx->vlanTag) & 0xffff);
|
||||||
netif_receive_skb(new_skb);
|
netif_receive_skb(new_skb);
|
||||||
|
|
||||||
|
@ -1498,7 +1498,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
|
|||||||
printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
|
printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
|
||||||
vlid);
|
vlid);
|
||||||
}
|
}
|
||||||
__vlan_hwaccel_put_tag(skb, vlid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
|
||||||
}
|
}
|
||||||
#endif /* VLAN_SUPPORT */
|
#endif /* VLAN_SUPPORT */
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
@ -2019,7 +2019,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
|
|||||||
|
|
||||||
/* send it up */
|
/* send it up */
|
||||||
if ((bd_flags & BD_FLG_VLAN_TAG))
|
if ((bd_flags & BD_FLG_VLAN_TAG))
|
||||||
__vlan_hwaccel_put_tag(skb, retdesc->vlan);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
|
|
||||||
dev->stats.rx_packets++;
|
dev->stats.rx_packets++;
|
||||||
|
@ -793,7 +793,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
|
|||||||
#if AMD8111E_VLAN_TAG_USED
|
#if AMD8111E_VLAN_TAG_USED
|
||||||
if (vtag == TT_VLAN_TAGGED){
|
if (vtag == TT_VLAN_TAGGED){
|
||||||
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
|
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
@ -1809,7 +1809,7 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
|
|||||||
|
|
||||||
AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
|
AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
|
||||||
vlan = le16_to_cpu(vlan);
|
vlan = le16_to_cpu(vlan);
|
||||||
__vlan_hwaccel_put_tag(skb, vlan);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
|
||||||
}
|
}
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
|
||||||
|
@ -1435,7 +1435,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
|
|||||||
netdev_dbg(netdev,
|
netdev_dbg(netdev,
|
||||||
"RXD VLAN TAG<RRD>=0x%04x\n",
|
"RXD VLAN TAG<RRD>=0x%04x\n",
|
||||||
prrs->vtag);
|
prrs->vtag);
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||||
}
|
}
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
|
||||||
|
@ -2024,7 +2024,7 @@ static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
|
|||||||
((rrd->vlan_tag & 7) << 13) |
|
((rrd->vlan_tag & 7) << 13) |
|
||||||
((rrd->vlan_tag & 8) << 9);
|
((rrd->vlan_tag & 8) << 9);
|
||||||
|
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||||
}
|
}
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
|
||||||
|
@ -452,7 +452,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
|
|||||||
((rxd->status.vtag&7) << 13) |
|
((rxd->status.vtag&7) << 13) |
|
||||||
((rxd->status.vtag&8) << 9);
|
((rxd->status.vtag&8) << 9);
|
||||||
|
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||||
}
|
}
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
netdev->stats.rx_bytes += rx_size;
|
netdev->stats.rx_bytes += rx_size;
|
||||||
|
@ -3211,7 +3211,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
|
|||||||
}
|
}
|
||||||
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
|
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
|
||||||
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
|
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
|
||||||
__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
|
||||||
|
|
||||||
skb->protocol = eth_type_trans(skb, bp->dev);
|
skb->protocol = eth_type_trans(skb, bp->dev);
|
||||||
|
|
||||||
|
@ -719,7 +719,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||||||
if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
|
if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
|
||||||
skb, cqe, cqe_idx)) {
|
skb, cqe, cqe_idx)) {
|
||||||
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
|
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
|
||||||
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
|
||||||
bnx2x_gro_receive(bp, fp, skb);
|
bnx2x_gro_receive(bp, fp, skb);
|
||||||
} else {
|
} else {
|
||||||
DP(NETIF_MSG_RX_STATUS,
|
DP(NETIF_MSG_RX_STATUS,
|
||||||
@ -994,7 +994,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
|
|||||||
|
|
||||||
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
|
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
|
||||||
PARSING_FLAGS_VLAN)
|
PARSING_FLAGS_VLAN)
|
||||||
__vlan_hwaccel_put_tag(skb,
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||||
le16_to_cpu(cqe_fp->vlan_tag));
|
le16_to_cpu(cqe_fp->vlan_tag));
|
||||||
napi_gro_receive(&fp->napi, skb);
|
napi_gro_receive(&fp->napi, skb);
|
||||||
|
|
||||||
|
@ -6715,7 +6715,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
|
|||||||
|
|
||||||
if (desc->type_flags & RXD_FLAG_VLAN &&
|
if (desc->type_flags & RXD_FLAG_VLAN &&
|
||||||
!(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
|
!(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
|
||||||
__vlan_hwaccel_put_tag(skb,
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||||
desc->err_vlan & RXD_VLAN_MASK);
|
desc->err_vlan & RXD_VLAN_MASK);
|
||||||
|
|
||||||
napi_gro_receive(&tnapi->napi, skb);
|
napi_gro_receive(&tnapi->napi, skb);
|
||||||
|
@ -610,7 +610,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|||||||
rcb->rxq->rx_bytes += length;
|
rcb->rxq->rx_bytes += length;
|
||||||
|
|
||||||
if (flags & BNA_CQ_EF_VLAN)
|
if (flags & BNA_CQ_EF_VLAN)
|
||||||
__vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
|
||||||
|
|
||||||
if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
|
if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
|
||||||
napi_gro_frags(&rx_ctrl->napi);
|
napi_gro_frags(&rx_ctrl->napi);
|
||||||
|
@ -1386,7 +1386,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
|
|||||||
|
|
||||||
if (p->vlan_valid) {
|
if (p->vlan_valid) {
|
||||||
st->vlan_xtract++;
|
st->vlan_xtract++;
|
||||||
__vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
|
||||||
}
|
}
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
}
|
}
|
||||||
|
@ -2030,7 +2030,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
|
|||||||
|
|
||||||
if (p->vlan_valid) {
|
if (p->vlan_valid) {
|
||||||
qs->port_stats[SGE_PSTAT_VLANEX]++;
|
qs->port_stats[SGE_PSTAT_VLANEX]++;
|
||||||
__vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
|
||||||
}
|
}
|
||||||
if (rq->polling) {
|
if (rq->polling) {
|
||||||
if (lro)
|
if (lro)
|
||||||
@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
|
|||||||
|
|
||||||
if (cpl->vlan_valid) {
|
if (cpl->vlan_valid) {
|
||||||
qs->port_stats[SGE_PSTAT_VLANEX]++;
|
qs->port_stats[SGE_PSTAT_VLANEX]++;
|
||||||
__vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
|
||||||
}
|
}
|
||||||
napi_gro_frags(&qs->napi);
|
napi_gro_frags(&qs->napi);
|
||||||
}
|
}
|
||||||
|
@ -1633,7 +1633,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
|
|||||||
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
|
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
|
||||||
|
|
||||||
if (unlikely(pkt->vlan_ex)) {
|
if (unlikely(pkt->vlan_ex)) {
|
||||||
__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
|
||||||
rxq->stats.vlan_ex++;
|
rxq->stats.vlan_ex++;
|
||||||
}
|
}
|
||||||
ret = napi_gro_frags(&rxq->rspq.napi);
|
ret = napi_gro_frags(&rxq->rspq.napi);
|
||||||
@ -1705,7 +1705,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
|
|||||||
skb_checksum_none_assert(skb);
|
skb_checksum_none_assert(skb);
|
||||||
|
|
||||||
if (unlikely(pkt->vlan_ex)) {
|
if (unlikely(pkt->vlan_ex)) {
|
||||||
__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
|
||||||
rxq->stats.vlan_ex++;
|
rxq->stats.vlan_ex++;
|
||||||
}
|
}
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
@ -1482,7 +1482,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
|
|||||||
skb_record_rx_queue(skb, rxq->rspq.idx);
|
skb_record_rx_queue(skb, rxq->rspq.idx);
|
||||||
|
|
||||||
if (pkt->vlan_ex) {
|
if (pkt->vlan_ex) {
|
||||||
__vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
|
__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
|
||||||
|
be16_to_cpu(pkt->vlan));
|
||||||
rxq->stats.vlan_ex++;
|
rxq->stats.vlan_ex++;
|
||||||
}
|
}
|
||||||
ret = napi_gro_frags(&rxq->rspq.napi);
|
ret = napi_gro_frags(&rxq->rspq.napi);
|
||||||
@ -1551,7 +1552,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
|
|||||||
|
|
||||||
if (pkt->vlan_ex) {
|
if (pkt->vlan_ex) {
|
||||||
rxq->stats.vlan_ex++;
|
rxq->stats.vlan_ex++;
|
||||||
__vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
|
||||||
}
|
}
|
||||||
|
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
@ -1300,7 +1300,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (vlan_stripped)
|
if (vlan_stripped)
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
|
||||||
|
|
||||||
if (netdev->features & NETIF_F_GRO)
|
if (netdev->features & NETIF_F_GRO)
|
||||||
napi_gro_receive(&enic->napi[q_number], skb);
|
napi_gro_receive(&enic->napi[q_number], skb);
|
||||||
|
@ -771,7 +771,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
|
|||||||
|
|
||||||
if (vlan_tx_tag_present(skb)) {
|
if (vlan_tx_tag_present(skb)) {
|
||||||
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
|
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
|
||||||
__vlan_put_tag(skb, vlan_tag);
|
__vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||||
skb->vlan_tci = 0;
|
skb->vlan_tci = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1383,7 +1383,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
|
|||||||
|
|
||||||
|
|
||||||
if (rxcp->vlanf)
|
if (rxcp->vlanf)
|
||||||
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
|
||||||
|
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
}
|
}
|
||||||
@ -1439,7 +1439,7 @@ void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
|
|||||||
skb->rxhash = rxcp->rss_hash;
|
skb->rxhash = rxcp->rss_hash;
|
||||||
|
|
||||||
if (rxcp->vlanf)
|
if (rxcp->vlanf)
|
||||||
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
|
||||||
|
|
||||||
napi_gro_frags(napi);
|
napi_gro_frags(napi);
|
||||||
}
|
}
|
||||||
|
@ -4003,7 +4003,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
|
|||||||
if (status & E1000_RXD_STAT_VP) {
|
if (status & E1000_RXD_STAT_VP) {
|
||||||
u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
|
u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
|
||||||
|
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
}
|
}
|
||||||
napi_gro_receive(&adapter->napi, skb);
|
napi_gro_receive(&adapter->napi, skb);
|
||||||
}
|
}
|
||||||
|
@ -554,7 +554,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
|
|||||||
skb->protocol = eth_type_trans(skb, netdev);
|
skb->protocol = eth_type_trans(skb, netdev);
|
||||||
|
|
||||||
if (staterr & E1000_RXD_STAT_VP)
|
if (staterr & E1000_RXD_STAT_VP)
|
||||||
__vlan_hwaccel_put_tag(skb, tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
|
||||||
|
|
||||||
napi_gro_receive(&adapter->napi, skb);
|
napi_gro_receive(&adapter->napi, skb);
|
||||||
}
|
}
|
||||||
|
@ -6683,7 +6683,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
|
|||||||
else
|
else
|
||||||
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
|
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
|
||||||
|
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_record_rx_queue(skb, rx_ring->queue_index);
|
skb_record_rx_queue(skb, rx_ring->queue_index);
|
||||||
|
@ -116,7 +116,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
|
|||||||
else
|
else
|
||||||
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
|
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
|
||||||
if (test_bit(vid, adapter->active_vlans))
|
if (test_bit(vid, adapter->active_vlans))
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
napi_gro_receive(&adapter->rx_ring->napi, skb);
|
napi_gro_receive(&adapter->rx_ring->napi, skb);
|
||||||
|
@ -2082,8 +2082,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
|
|||||||
|
|
||||||
skb->protocol = eth_type_trans(skb, netdev);
|
skb->protocol = eth_type_trans(skb, netdev);
|
||||||
if (status & IXGB_RX_DESC_STATUS_VP)
|
if (status & IXGB_RX_DESC_STATUS_VP)
|
||||||
__vlan_hwaccel_put_tag(skb,
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||||
le16_to_cpu(rx_desc->special));
|
le16_to_cpu(rx_desc->special));
|
||||||
|
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
|
||||||
|
@ -1491,7 +1491,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
|
|||||||
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||||
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
|
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
|
||||||
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
|
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_record_rx_queue(skb, rx_ring->queue_index);
|
skb_record_rx_queue(skb, rx_ring->queue_index);
|
||||||
|
@ -291,7 +291,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
|
|||||||
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
|
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
|
||||||
|
|
||||||
if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
|
if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
|
||||||
__vlan_hwaccel_put_tag(skb, tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
|
||||||
|
|
||||||
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
|
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
|
||||||
napi_gro_receive(&q_vector->napi, skb);
|
napi_gro_receive(&q_vector->napi, skb);
|
||||||
|
@ -1059,7 +1059,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
|
|||||||
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
|
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
|
||||||
u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
|
u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
|
||||||
|
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
NET_STAT(jme).rx_bytes += 4;
|
NET_STAT(jme).rx_bytes += 4;
|
||||||
}
|
}
|
||||||
jme->jme_rx(skb);
|
jme->jme_rx(skb);
|
||||||
|
@ -2713,7 +2713,7 @@ static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = sky2->rx_ring[sky2->rx_next].skb;
|
skb = sky2->rx_ring[sky2->rx_next].skb;
|
||||||
__vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
|
static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
|
||||||
|
@ -673,7 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|||||||
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
|
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
|
||||||
u16 vid = be16_to_cpu(cqe->sl_vid);
|
u16 vid = be16_to_cpu(cqe->sl_vid);
|
||||||
|
|
||||||
__vlan_hwaccel_put_tag(gro_skb, vid);
|
__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->features & NETIF_F_RXHASH)
|
if (dev->features & NETIF_F_RXHASH)
|
||||||
@ -716,7 +716,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|||||||
|
|
||||||
if (be32_to_cpu(cqe->vlan_my_qpn) &
|
if (be32_to_cpu(cqe->vlan_my_qpn) &
|
||||||
MLX4_CQE_VLAN_PRESENT_MASK)
|
MLX4_CQE_VLAN_PRESENT_MASK)
|
||||||
__vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
|
||||||
|
|
||||||
/* Push it up the stack */
|
/* Push it up the stack */
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
@ -1290,7 +1290,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
|
|||||||
skb->csum = csum_sub(skb->csum, vsum);
|
skb->csum = csum_sub(skb->csum, vsum);
|
||||||
}
|
}
|
||||||
/* pop tag */
|
/* pop tag */
|
||||||
__vlan_hwaccel_put_tag(skb, ntohs(veh->h_vlan_TCI));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
|
||||||
memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
|
memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
|
||||||
skb->len -= VLAN_HLEN;
|
skb->len -= VLAN_HLEN;
|
||||||
skb->data_len -= VLAN_HLEN;
|
skb->data_len -= VLAN_HLEN;
|
||||||
|
@ -911,7 +911,7 @@ static void rx_irq(struct net_device *ndev)
|
|||||||
unsigned short tag;
|
unsigned short tag;
|
||||||
|
|
||||||
tag = ntohs(extsts & EXTSTS_VTG_MASK);
|
tag = ntohs(extsts & EXTSTS_VTG_MASK);
|
||||||
__vlan_hwaccel_put_tag(skb, tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
rx_rc = netif_rx(skb);
|
rx_rc = netif_rx(skb);
|
||||||
|
@ -8555,7 +8555,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
|
|||||||
|
|
||||||
skb->protocol = eth_type_trans(skb, dev);
|
skb->protocol = eth_type_trans(skb, dev);
|
||||||
if (vlan_tag && sp->vlan_strip_flag)
|
if (vlan_tag && sp->vlan_strip_flag)
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tag);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||||
if (sp->config.napi)
|
if (sp->config.napi)
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
else
|
else
|
||||||
|
@ -312,7 +312,7 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
|
|||||||
|
|
||||||
if (ext_info->vlan &&
|
if (ext_info->vlan &&
|
||||||
ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
|
ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
|
||||||
__vlan_hwaccel_put_tag(skb, ext_info->vlan);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
|
||||||
napi_gro_receive(ring->napi_p, skb);
|
napi_gro_receive(ring->napi_p, skb);
|
||||||
|
|
||||||
vxge_debug_entryexit(VXGE_TRACE,
|
vxge_debug_entryexit(VXGE_TRACE,
|
||||||
|
@ -2969,7 +2969,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
|
|||||||
vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
|
vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
|
||||||
u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
|
u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
|
||||||
|
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
}
|
}
|
||||||
napi_gro_receive(&np->napi, skb);
|
napi_gro_receive(&np->napi, skb);
|
||||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||||
|
@ -1050,7 +1050,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
|
|||||||
skb->protocol = eth_type_trans(skb, netdev);
|
skb->protocol = eth_type_trans(skb, netdev);
|
||||||
|
|
||||||
if (vid != 0xffff)
|
if (vid != 0xffff)
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
|
|
||||||
napi_gro_receive(&sds_ring->napi, skb);
|
napi_gro_receive(&sds_ring->napi, skb);
|
||||||
|
|
||||||
@ -1153,7 +1153,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (vid != 0xffff)
|
if (vid != 0xffff)
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
|
||||||
adapter->stats.lro_pkts++;
|
adapter->stats.lro_pkts++;
|
||||||
@ -1518,7 +1518,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
|
|||||||
skb->protocol = eth_type_trans(skb, netdev);
|
skb->protocol = eth_type_trans(skb, netdev);
|
||||||
|
|
||||||
if (vid != 0xffff)
|
if (vid != 0xffff)
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
|
|
||||||
napi_gro_receive(&sds_ring->napi, skb);
|
napi_gro_receive(&sds_ring->napi, skb);
|
||||||
|
|
||||||
@ -1615,7 +1615,7 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (vid != 0xffff)
|
if (vid != 0xffff)
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
|
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
|
||||||
|
@ -1498,7 +1498,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
|
|||||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
skb_record_rx_queue(skb, rx_ring->cq_id);
|
skb_record_rx_queue(skb, rx_ring->cq_id);
|
||||||
if (vlan_id != 0xffff)
|
if (vlan_id != 0xffff)
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_id);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
|
||||||
napi_gro_frags(napi);
|
napi_gro_frags(napi);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1574,7 +1574,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
|
|||||||
|
|
||||||
skb_record_rx_queue(skb, rx_ring->cq_id);
|
skb_record_rx_queue(skb, rx_ring->cq_id);
|
||||||
if (vlan_id != 0xffff)
|
if (vlan_id != 0xffff)
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_id);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
|
||||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
||||||
napi_gro_receive(napi, skb);
|
napi_gro_receive(napi, skb);
|
||||||
else
|
else
|
||||||
@ -1670,7 +1670,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
|
|||||||
|
|
||||||
skb_record_rx_queue(skb, rx_ring->cq_id);
|
skb_record_rx_queue(skb, rx_ring->cq_id);
|
||||||
if (vlan_id != 0xffff)
|
if (vlan_id != 0xffff)
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_id);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
|
||||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
||||||
napi_gro_receive(&rx_ring->napi, skb);
|
napi_gro_receive(&rx_ring->napi, skb);
|
||||||
else
|
else
|
||||||
@ -1975,7 +1975,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
|
|||||||
rx_ring->rx_bytes += skb->len;
|
rx_ring->rx_bytes += skb->len;
|
||||||
skb_record_rx_queue(skb, rx_ring->cq_id);
|
skb_record_rx_queue(skb, rx_ring->cq_id);
|
||||||
if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
|
if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_id);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
|
||||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
||||||
napi_gro_receive(&rx_ring->napi, skb);
|
napi_gro_receive(&rx_ring->napi, skb);
|
||||||
else
|
else
|
||||||
|
@ -431,7 +431,7 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
|
|||||||
cp->dev->stats.rx_bytes += skb->len;
|
cp->dev->stats.rx_bytes += skb->len;
|
||||||
|
|
||||||
if (opts2 & RxVlanTagged)
|
if (opts2 & RxVlanTagged)
|
||||||
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
|
||||||
|
|
||||||
napi_gro_receive(&cp->napi, skb);
|
napi_gro_receive(&cp->napi, skb);
|
||||||
}
|
}
|
||||||
|
@ -1843,7 +1843,7 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
|
|||||||
u32 opts2 = le32_to_cpu(desc->opts2);
|
u32 opts2 = le32_to_cpu(desc->opts2);
|
||||||
|
|
||||||
if (opts2 & RxVlanTag)
|
if (opts2 & RxVlanTag)
|
||||||
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
|
static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||||
|
@ -1148,7 +1148,7 @@ NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
|
|||||||
priv->ndev->name,
|
priv->ndev->name,
|
||||||
GET_RXD_VLAN_ID(rxd_vlan),
|
GET_RXD_VLAN_ID(rxd_vlan),
|
||||||
GET_RXD_VTAG(rxd_val1));
|
GET_RXD_VTAG(rxd_val1));
|
||||||
__vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
|
||||||
}
|
}
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
}
|
}
|
||||||
|
@ -1936,7 +1936,7 @@ static int rhine_rx(struct net_device *dev, int limit)
|
|||||||
skb->protocol = eth_type_trans(skb, dev);
|
skb->protocol = eth_type_trans(skb, dev);
|
||||||
|
|
||||||
if (unlikely(desc_length & DescTag))
|
if (unlikely(desc_length & DescTag))
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
|
||||||
u64_stats_update_begin(&rp->rx_stats.syncp);
|
u64_stats_update_begin(&rp->rx_stats.syncp);
|
||||||
|
@ -2080,7 +2080,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
|
|||||||
if (rd->rdesc0.RSR & RSR_DETAG) {
|
if (rd->rdesc0.RSR & RSR_DETAG) {
|
||||||
u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
|
u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
|
||||||
|
|
||||||
__vlan_hwaccel_put_tag(skb, vid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
}
|
}
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
|
|
||||||
|
@ -221,7 +221,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
|
|||||||
|
|
||||||
/* map MBIM session to VLAN */
|
/* map MBIM session to VLAN */
|
||||||
if (tci)
|
if (tci)
|
||||||
vlan_put_tag(skb, tci);
|
vlan_put_tag(skb, htons(ETH_P_8021Q), tci);
|
||||||
err:
|
err:
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
@ -1293,7 +1293,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|||||||
skb->protocol = eth_type_trans(skb, adapter->netdev);
|
skb->protocol = eth_type_trans(skb, adapter->netdev);
|
||||||
|
|
||||||
if (unlikely(rcd->ts))
|
if (unlikely(rcd->ts))
|
||||||
__vlan_hwaccel_put_tag(skb, rcd->tci);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
|
||||||
|
|
||||||
if (adapter->netdev->features & NETIF_F_LRO)
|
if (adapter->netdev->features & NETIF_F_LRO)
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
|
@ -157,9 +157,18 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline bool vlan_hw_offload_capable(netdev_features_t features,
|
||||||
|
__be16 proto)
|
||||||
|
{
|
||||||
|
if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vlan_insert_tag - regular VLAN tag inserting
|
* vlan_insert_tag - regular VLAN tag inserting
|
||||||
* @skb: skbuff to tag
|
* @skb: skbuff to tag
|
||||||
|
* @vlan_proto: VLAN encapsulation protocol
|
||||||
* @vlan_tci: VLAN TCI to insert
|
* @vlan_tci: VLAN TCI to insert
|
||||||
*
|
*
|
||||||
* Inserts the VLAN tag into @skb as part of the payload
|
* Inserts the VLAN tag into @skb as part of the payload
|
||||||
@ -170,7 +179,8 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
|
|||||||
*
|
*
|
||||||
* Does not change skb->protocol so this function can be used during receive.
|
* Does not change skb->protocol so this function can be used during receive.
|
||||||
*/
|
*/
|
||||||
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
|
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
|
||||||
|
__be16 vlan_proto, u16 vlan_tci)
|
||||||
{
|
{
|
||||||
struct vlan_ethhdr *veth;
|
struct vlan_ethhdr *veth;
|
||||||
|
|
||||||
@ -185,7 +195,7 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
|
|||||||
skb->mac_header -= VLAN_HLEN;
|
skb->mac_header -= VLAN_HLEN;
|
||||||
|
|
||||||
/* first, the ethernet type */
|
/* first, the ethernet type */
|
||||||
veth->h_vlan_proto = htons(ETH_P_8021Q);
|
veth->h_vlan_proto = vlan_proto;
|
||||||
|
|
||||||
/* now, the TCI */
|
/* now, the TCI */
|
||||||
veth->h_vlan_TCI = htons(vlan_tci);
|
veth->h_vlan_TCI = htons(vlan_tci);
|
||||||
@ -204,24 +214,28 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
|
|||||||
* Following the skb_unshare() example, in case of error, the calling function
|
* Following the skb_unshare() example, in case of error, the calling function
|
||||||
* doesn't have to worry about freeing the original skb.
|
* doesn't have to worry about freeing the original skb.
|
||||||
*/
|
*/
|
||||||
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
|
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
|
||||||
|
__be16 vlan_proto, u16 vlan_tci)
|
||||||
{
|
{
|
||||||
skb = vlan_insert_tag(skb, vlan_tci);
|
skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
|
||||||
if (skb)
|
if (skb)
|
||||||
skb->protocol = htons(ETH_P_8021Q);
|
skb->protocol = vlan_proto;
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
|
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
|
||||||
* @skb: skbuff to tag
|
* @skb: skbuff to tag
|
||||||
|
* @vlan_proto: VLAN encapsulation protocol
|
||||||
* @vlan_tci: VLAN TCI to insert
|
* @vlan_tci: VLAN TCI to insert
|
||||||
*
|
*
|
||||||
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
|
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
|
||||||
*/
|
*/
|
||||||
static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
|
static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
|
||||||
|
__be16 vlan_proto,
|
||||||
u16 vlan_tci)
|
u16 vlan_tci)
|
||||||
{
|
{
|
||||||
|
skb->vlan_proto = vlan_proto;
|
||||||
skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
|
skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
@ -236,12 +250,13 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
|
|||||||
* Assumes skb->dev is the target that will xmit this frame.
|
* Assumes skb->dev is the target that will xmit this frame.
|
||||||
* Returns a VLAN tagged skb.
|
* Returns a VLAN tagged skb.
|
||||||
*/
|
*/
|
||||||
static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
|
static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb,
|
||||||
|
__be16 vlan_proto, u16 vlan_tci)
|
||||||
{
|
{
|
||||||
if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
|
if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) {
|
||||||
return __vlan_hwaccel_put_tag(skb, vlan_tci);
|
return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
|
||||||
} else {
|
} else {
|
||||||
return __vlan_put_tag(skb, vlan_tci);
|
return __vlan_put_tag(skb, vlan_proto, vlan_tci);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -387,6 +387,7 @@ typedef unsigned char *sk_buff_data_t;
|
|||||||
* @secmark: security marking
|
* @secmark: security marking
|
||||||
* @mark: Generic packet mark
|
* @mark: Generic packet mark
|
||||||
* @dropcount: total number of sk_receive_queue overflows
|
* @dropcount: total number of sk_receive_queue overflows
|
||||||
|
* @vlan_proto: vlan encapsulation protocol
|
||||||
* @vlan_tci: vlan tag control information
|
* @vlan_tci: vlan tag control information
|
||||||
* @inner_transport_header: Inner transport layer header (encapsulation)
|
* @inner_transport_header: Inner transport layer header (encapsulation)
|
||||||
* @inner_network_header: Network layer header (encapsulation)
|
* @inner_network_header: Network layer header (encapsulation)
|
||||||
@ -465,6 +466,7 @@ struct sk_buff {
|
|||||||
|
|
||||||
__u32 rxhash;
|
__u32 rxhash;
|
||||||
|
|
||||||
|
__be16 vlan_proto;
|
||||||
__u16 vlan_tci;
|
__u16 vlan_tci;
|
||||||
|
|
||||||
#ifdef CONFIG_NET_SCHED
|
#ifdef CONFIG_NET_SCHED
|
||||||
|
@ -8,11 +8,12 @@
|
|||||||
bool vlan_do_receive(struct sk_buff **skbp)
|
bool vlan_do_receive(struct sk_buff **skbp)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb = *skbp;
|
struct sk_buff *skb = *skbp;
|
||||||
|
__be16 vlan_proto = skb->vlan_proto;
|
||||||
u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
|
u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
|
||||||
struct net_device *vlan_dev;
|
struct net_device *vlan_dev;
|
||||||
struct vlan_pcpu_stats *rx_stats;
|
struct vlan_pcpu_stats *rx_stats;
|
||||||
|
|
||||||
vlan_dev = vlan_find_dev(skb->dev, htons(ETH_P_8021Q), vlan_id);
|
vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
|
||||||
if (!vlan_dev)
|
if (!vlan_dev)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -38,7 +39,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
|
|||||||
* original position later
|
* original position later
|
||||||
*/
|
*/
|
||||||
skb_push(skb, offset);
|
skb_push(skb, offset);
|
||||||
skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
|
skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
|
||||||
|
skb->vlan_tci);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return false;
|
return false;
|
||||||
skb_pull(skb, offset + VLAN_HLEN);
|
skb_pull(skb, offset + VLAN_HLEN);
|
||||||
@ -127,7 +129,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
|
|||||||
|
|
||||||
vhdr = (struct vlan_hdr *) skb->data;
|
vhdr = (struct vlan_hdr *) skb->data;
|
||||||
vlan_tci = ntohs(vhdr->h_vlan_TCI);
|
vlan_tci = ntohs(vhdr->h_vlan_TCI);
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
|
||||||
|
|
||||||
skb_pull_rcsum(skb, VLAN_HLEN);
|
skb_pull_rcsum(skb, VLAN_HLEN);
|
||||||
vlan_set_encap_proto(skb, vhdr);
|
vlan_set_encap_proto(skb, vhdr);
|
||||||
|
@ -167,7 +167,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
|
|||||||
u16 vlan_tci;
|
u16 vlan_tci;
|
||||||
vlan_tci = vlan->vlan_id;
|
vlan_tci = vlan->vlan_id;
|
||||||
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
|
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
|
||||||
skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
|
skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
|
||||||
}
|
}
|
||||||
|
|
||||||
skb->dev = vlan->real_dev;
|
skb->dev = vlan->real_dev;
|
||||||
|
@ -341,7 +341,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (vid != -1)
|
if (vid != -1)
|
||||||
skb = vlan_insert_tag(skb, vid);
|
skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid);
|
||||||
|
|
||||||
skb_reset_mac_header(skb);
|
skb_reset_mac_header(skb);
|
||||||
skb->protocol = eth_type_trans(skb, soft_iface);
|
skb->protocol = eth_type_trans(skb, soft_iface);
|
||||||
|
@ -535,7 +535,7 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
|
|||||||
if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
|
if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
|
||||||
return br;
|
return br;
|
||||||
|
|
||||||
vlan = __vlan_find_dev_deep(br, htons(ETH_P_8021Q),
|
vlan = __vlan_find_dev_deep(br, skb->vlan_proto,
|
||||||
vlan_tx_tag_get(skb) & VLAN_VID_MASK);
|
vlan_tx_tag_get(skb) & VLAN_VID_MASK);
|
||||||
|
|
||||||
return vlan ? vlan : br;
|
return vlan ? vlan : br;
|
||||||
|
@ -175,7 +175,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
|
|||||||
* mac header.
|
* mac header.
|
||||||
*/
|
*/
|
||||||
skb_push(skb, ETH_HLEN);
|
skb_push(skb, ETH_HLEN);
|
||||||
skb = __vlan_put_tag(skb, skb->vlan_tci);
|
skb = __vlan_put_tag(skb, skb->vlan_proto, skb->vlan_tci);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
goto out;
|
goto out;
|
||||||
/* put skb->data back to where it was */
|
/* put skb->data back to where it was */
|
||||||
@ -217,7 +217,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
|
|||||||
/* PVID is set on this port. Any untagged ingress
|
/* PVID is set on this port. Any untagged ingress
|
||||||
* frame is considered to belong to this vlan.
|
* frame is considered to belong to this vlan.
|
||||||
*/
|
*/
|
||||||
__vlan_hwaccel_put_tag(skb, pvid);
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2482,8 +2482,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||||||
features = netif_skb_features(skb);
|
features = netif_skb_features(skb);
|
||||||
|
|
||||||
if (vlan_tx_tag_present(skb) &&
|
if (vlan_tx_tag_present(skb) &&
|
||||||
!(features & NETIF_F_HW_VLAN_CTAG_TX)) {
|
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
|
||||||
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
|
skb = __vlan_put_tag(skb, skb->vlan_proto,
|
||||||
|
vlan_tx_tag_get(skb));
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -383,8 +383,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
|||||||
if (__netif_tx_trylock(txq)) {
|
if (__netif_tx_trylock(txq)) {
|
||||||
if (!netif_xmit_stopped(txq)) {
|
if (!netif_xmit_stopped(txq)) {
|
||||||
if (vlan_tx_tag_present(skb) &&
|
if (vlan_tx_tag_present(skb) &&
|
||||||
!(netif_skb_features(skb) & NETIF_F_HW_VLAN_CTAG_TX)) {
|
!vlan_hw_offload_capable(netif_skb_features(skb),
|
||||||
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
|
skb->vlan_proto)) {
|
||||||
|
skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
break;
|
break;
|
||||||
skb->vlan_tci = 0;
|
skb->vlan_tci = 0;
|
||||||
|
@ -707,6 +707,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
|
|||||||
new->tc_verd = old->tc_verd;
|
new->tc_verd = old->tc_verd;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
new->vlan_proto = old->vlan_proto;
|
||||||
new->vlan_tci = old->vlan_tci;
|
new->vlan_tci = old->vlan_tci;
|
||||||
|
|
||||||
skb_copy_secmark(new, old);
|
skb_copy_secmark(new, old);
|
||||||
|
@ -98,7 +98,7 @@ static int pop_vlan(struct sk_buff *skb)
|
|||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
__vlan_hwaccel_put_tag(skb, ntohs(tci));
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,7 +110,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
|
|||||||
/* push down current VLAN tag */
|
/* push down current VLAN tag */
|
||||||
current_tag = vlan_tx_tag_get(skb);
|
current_tag = vlan_tx_tag_get(skb);
|
||||||
|
|
||||||
if (!__vlan_put_tag(skb, current_tag))
|
if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||||
@ -118,7 +118,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
|
|||||||
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
|
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
|
||||||
|
|
||||||
}
|
}
|
||||||
__vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
|
__vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -401,7 +401,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
|
|||||||
if (!nskb)
|
if (!nskb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
|
nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
|
||||||
if (!nskb)
|
if (!nskb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user