net: Fix drivers advertising HW_CSUM feature to use csum_start

Some drivers are using skb_transport_offset(skb) instead of skb->csum_start
for NETIF_F_HW_CSUM offload.  This does not matter now, but if someone
implements checksumming of encapsulated packets then this will break silently.

TSO output paths are left as they are, since they are for IP+TCP only
(might be worth converting though).

Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Michał Mirosław 2010-12-14 15:24:08 +00:00 committed by David S. Miller
parent 55508d601d
commit 0d0b16727f
14 changed files with 15 additions and 15 deletions

View File

@ -2078,7 +2078,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
check_sum: check_sum:
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso; u8 css, cso;
cso = skb_transport_offset(skb); cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) { if (unlikely(cso & 0x1)) {
if (netif_msg_tx_err(adapter)) if (netif_msg_tx_err(adapter))

View File

@ -1649,7 +1649,7 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso; u8 css, cso;
cso = skb_transport_offset(skb); cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) { if (unlikely(cso & 0x1)) {
netdev_err(adapter->netdev, netdev_err(adapter->netdev,
"payload offset should not ant event number\n"); "payload offset should not ant event number\n");

View File

@ -2788,7 +2788,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
ctrl = 0; ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
const u64 csum_start_off = skb_transport_offset(skb); const u64 csum_start_off = skb_checksum_start_offset(skb);
const u64 csum_stuff_off = csum_start_off + skb->csum_offset; const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = TX_DESC_CSUM_EN | ctrl = TX_DESC_CSUM_EN |

View File

@ -2726,7 +2726,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
break; break;
} }
css = skb_transport_offset(skb); css = skb_checksum_start_offset(skb);
i = tx_ring->next_to_use; i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];

View File

@ -4473,7 +4473,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
break; break;
} }
css = skb_transport_offset(skb); css = skb_checksum_start_offset(skb);
i = tx_ring->next_to_use; i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];

View File

@ -702,7 +702,7 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
{ {
unsigned int head_len = skb_headlen(skb); unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len; unsigned int len_left = skb->len - head_len;
unsigned int hdr_len = skb_transport_offset(skb); unsigned int hdr_len = skb_checksum_start_offset(skb);
unsigned int csum_offset = hdr_len + skb->csum_offset; unsigned int csum_offset = hdr_len + skb->csum_offset;
int eop = (len_left == 0); int eop = (len_left == 0);

View File

@ -1262,7 +1262,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
struct ixgb_buffer *buffer_info; struct ixgb_buffer *buffer_info;
css = skb_transport_offset(skb); css = skb_checksum_start_offset(skb);
cso = css + skb->csum_offset; cso = css + skb->csum_offset;
i = adapter->tx_ring.next_to_use; i = adapter->tx_ring.next_to_use;

View File

@ -692,7 +692,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 = 0; cur_p->app0 = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned int csum_start_off = skb_transport_offset(skb); unsigned int csum_start_off = skb_checksum_start_offset(skb);
unsigned int csum_index_off = csum_start_off + skb->csum_offset; unsigned int csum_index_off = csum_start_off + skb->csum_offset;
cur_p->app0 |= 1; /* TX Checksum Enabled */ cur_p->app0 |= 1; /* TX Checksum Enabled */

View File

@ -2736,7 +2736,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
odd_flag = 0; odd_flag = 0;
flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
cksum_offset = skb_transport_offset(skb); cksum_offset = skb_checksum_start_offset(skb);
pseudo_hdr_offset = cksum_offset + skb->csum_offset; pseudo_hdr_offset = cksum_offset + skb->csum_offset;
/* If the headers are excessively large, then we must /* If the headers are excessively large, then we must
* fall back to a software checksum */ * fall back to a software checksum */

View File

@ -6589,7 +6589,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
(ip_proto == IPPROTO_UDP ? (ip_proto == IPPROTO_UDP ?
TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
start = skb_transport_offset(skb) - start = skb_checksum_start_offset(skb) -
(pad_bytes + sizeof(struct tx_pkt_hdr)); (pad_bytes + sizeof(struct tx_pkt_hdr));
stuff = start + skb->csum_offset; stuff = start + skb->csum_offset;

View File

@ -2764,7 +2764,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
td->dma_hi = map >> 32; td->dma_hi = map >> 32;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
const int offset = skb_transport_offset(skb); const int offset = skb_checksum_start_offset(skb);
/* This seems backwards, but it is what the sk98lin /* This seems backwards, but it is what the sk98lin
* does. Looks like hardware is wrong? * does. Looks like hardware is wrong?

View File

@ -1004,7 +1004,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
ctrl = 0; ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
const u64 csum_start_off = skb_transport_offset(skb); const u64 csum_start_off = skb_checksum_start_offset(skb);
const u64 csum_stuff_off = csum_start_off + skb->csum_offset; const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = (TXDCTRL_CENAB | ctrl = (TXDCTRL_CENAB |

View File

@ -2266,7 +2266,7 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
tx_flags = TXFLAG_OWN; tx_flags = TXFLAG_OWN;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
const u32 csum_start_off = skb_transport_offset(skb); const u32 csum_start_off = skb_checksum_start_offset(skb);
const u32 csum_stuff_off = csum_start_off + skb->csum_offset; const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |

View File

@ -798,7 +798,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
{ {
struct Vmxnet3_TxDataDesc *tdd; struct Vmxnet3_TxDataDesc *tdd;
if (ctx->mss) { if (ctx->mss) { /* TSO */
ctx->eth_ip_hdr_size = skb_transport_offset(skb); ctx->eth_ip_hdr_size = skb_transport_offset(skb);
ctx->l4_hdr_size = ((struct tcphdr *) ctx->l4_hdr_size = ((struct tcphdr *)
skb_transport_header(skb))->doff * 4; skb_transport_header(skb))->doff * 4;
@ -807,7 +807,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
unsigned int pull_size; unsigned int pull_size;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
ctx->eth_ip_hdr_size = skb_transport_offset(skb); ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
if (ctx->ipv4) { if (ctx->ipv4) {
struct iphdr *iph = (struct iphdr *) struct iphdr *iph = (struct iphdr *)