mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 16:30:52 +07:00
net: Fix drivers advertising HW_CSUM feature to use csum_start
Some drivers are using skb_transport_offset(skb) instead of skb->csum_start for NETIF_F_HW_CSUM offload. This does not matter now, but if someone implements checksumming of encapsulated packets then this will break silently. TSO output paths are left as they are, since they are for IP+TCP only (might be worth converting though). Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
55508d601d
commit
0d0b16727f
@ -2078,7 +2078,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
|
||||
check_sum:
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
u8 css, cso;
|
||||
cso = skb_transport_offset(skb);
|
||||
cso = skb_checksum_start_offset(skb);
|
||||
|
||||
if (unlikely(cso & 0x1)) {
|
||||
if (netif_msg_tx_err(adapter))
|
||||
|
@ -1649,7 +1649,7 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
u8 css, cso;
|
||||
|
||||
cso = skb_transport_offset(skb);
|
||||
cso = skb_checksum_start_offset(skb);
|
||||
if (unlikely(cso & 0x1)) {
|
||||
netdev_err(adapter->netdev,
|
||||
"payload offset should not ant event number\n");
|
||||
|
@ -2788,7 +2788,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
|
||||
|
||||
ctrl = 0;
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
const u64 csum_start_off = skb_transport_offset(skb);
|
||||
const u64 csum_start_off = skb_checksum_start_offset(skb);
|
||||
const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
|
||||
|
||||
ctrl = TX_DESC_CSUM_EN |
|
||||
|
@ -2726,7 +2726,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
|
||||
break;
|
||||
}
|
||||
|
||||
css = skb_transport_offset(skb);
|
||||
css = skb_checksum_start_offset(skb);
|
||||
|
||||
i = tx_ring->next_to_use;
|
||||
buffer_info = &tx_ring->buffer_info[i];
|
||||
|
@ -4473,7 +4473,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
|
||||
break;
|
||||
}
|
||||
|
||||
css = skb_transport_offset(skb);
|
||||
css = skb_checksum_start_offset(skb);
|
||||
|
||||
i = tx_ring->next_to_use;
|
||||
buffer_info = &tx_ring->buffer_info[i];
|
||||
|
@ -702,7 +702,7 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
|
||||
{
|
||||
unsigned int head_len = skb_headlen(skb);
|
||||
unsigned int len_left = skb->len - head_len;
|
||||
unsigned int hdr_len = skb_transport_offset(skb);
|
||||
unsigned int hdr_len = skb_checksum_start_offset(skb);
|
||||
unsigned int csum_offset = hdr_len + skb->csum_offset;
|
||||
int eop = (len_left == 0);
|
||||
|
||||
|
@ -1262,7 +1262,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
|
||||
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
struct ixgb_buffer *buffer_info;
|
||||
css = skb_transport_offset(skb);
|
||||
css = skb_checksum_start_offset(skb);
|
||||
cso = css + skb->csum_offset;
|
||||
|
||||
i = adapter->tx_ring.next_to_use;
|
||||
|
@ -692,7 +692,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
|
||||
cur_p->app0 = 0;
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
unsigned int csum_start_off = skb_transport_offset(skb);
|
||||
unsigned int csum_start_off = skb_checksum_start_offset(skb);
|
||||
unsigned int csum_index_off = csum_start_off + skb->csum_offset;
|
||||
|
||||
cur_p->app0 |= 1; /* TX Checksum Enabled */
|
||||
|
@ -2736,7 +2736,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
|
||||
odd_flag = 0;
|
||||
flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
cksum_offset = skb_transport_offset(skb);
|
||||
cksum_offset = skb_checksum_start_offset(skb);
|
||||
pseudo_hdr_offset = cksum_offset + skb->csum_offset;
|
||||
/* If the headers are excessively large, then we must
|
||||
* fall back to a software checksum */
|
||||
|
@ -6589,7 +6589,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
|
||||
(ip_proto == IPPROTO_UDP ?
|
||||
TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
|
||||
|
||||
start = skb_transport_offset(skb) -
|
||||
start = skb_checksum_start_offset(skb) -
|
||||
(pad_bytes + sizeof(struct tx_pkt_hdr));
|
||||
stuff = start + skb->csum_offset;
|
||||
|
||||
|
@ -2764,7 +2764,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
|
||||
td->dma_hi = map >> 32;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
const int offset = skb_transport_offset(skb);
|
||||
const int offset = skb_checksum_start_offset(skb);
|
||||
|
||||
/* This seems backwards, but it is what the sk98lin
|
||||
* does. Looks like hardware is wrong?
|
||||
|
@ -1004,7 +1004,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
|
||||
|
||||
ctrl = 0;
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
const u64 csum_start_off = skb_transport_offset(skb);
|
||||
const u64 csum_start_off = skb_checksum_start_offset(skb);
|
||||
const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
|
||||
|
||||
ctrl = (TXDCTRL_CENAB |
|
||||
|
@ -2266,7 +2266,7 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
|
||||
|
||||
tx_flags = TXFLAG_OWN;
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
const u32 csum_start_off = skb_transport_offset(skb);
|
||||
const u32 csum_start_off = skb_checksum_start_offset(skb);
|
||||
const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
|
||||
|
||||
tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
|
||||
|
@ -798,7 +798,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
||||
{
|
||||
struct Vmxnet3_TxDataDesc *tdd;
|
||||
|
||||
if (ctx->mss) {
|
||||
if (ctx->mss) { /* TSO */
|
||||
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
|
||||
ctx->l4_hdr_size = ((struct tcphdr *)
|
||||
skb_transport_header(skb))->doff * 4;
|
||||
@ -807,7 +807,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
||||
unsigned int pull_size;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
|
||||
ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
|
||||
|
||||
if (ctx->ipv4) {
|
||||
struct iphdr *iph = (struct iphdr *)
|
||||
|
Loading…
Reference in New Issue
Block a user