Merge branch 'hv_netvsc-cleanups'

Stephen Hemminger says:

====================
Hyper-V network driver cleanups.

The only new functionality is minor extensions to ethtool.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-08-23 12:05:38 -07:00
commit 3c90a941ed
5 changed files with 297 additions and 254 deletions

View File

@ -84,8 +84,6 @@ struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */
#define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40
#define ITAB_NUM 128
#define HASH_KEYLEN NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2
extern u8 netvsc_hash_key[];
struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
struct ndis_obj_header hdr;
@ -175,7 +173,7 @@ struct rndis_device {
struct rndis_message;
struct netvsc_device;
int netvsc_device_add(struct hv_device *device, void *additional_info);
int netvsc_device_remove(struct hv_device *device);
void netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
@ -654,6 +652,14 @@ struct netvsc_stats {
struct u64_stats_sync syncp;
};
struct netvsc_ethtool_stats {
unsigned long tx_scattered;
unsigned long tx_no_memory;
unsigned long tx_no_space;
unsigned long tx_too_big;
unsigned long tx_busy;
};
struct netvsc_reconfig {
struct list_head list;
u32 event;
@ -683,6 +689,7 @@ struct net_device_context {
/* Ethtool settings */
u8 duplex;
u32 speed;
struct netvsc_ethtool_stats eth_stats;
/* the device is going away */
bool start_remove;

View File

@ -33,6 +33,89 @@
#include "hyperv_net.h"
/*
* An API to support in-place processing of incoming VMBUS packets.
*/
#define VMBUS_PKT_TRAILER 8
static struct vmpacket_descriptor *
get_next_pkt_raw(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
u32 read_loc = ring_info->priv_read_index;
void *ring_buffer = hv_get_ring_buffer(ring_info);
struct vmpacket_descriptor *cur_desc;
u32 packetlen;
u32 dsize = ring_info->ring_datasize;
u32 delta = read_loc - ring_info->ring_buffer->read_index;
u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
return NULL;
if ((read_loc + sizeof(*cur_desc)) > dsize)
return NULL;
cur_desc = ring_buffer + read_loc;
packetlen = cur_desc->len8 << 3;
/*
* If the packet under consideration is wrapping around,
* return failure.
*/
if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
return NULL;
return cur_desc;
}
/*
* A helper function to step through packets "in-place"
* This API is to be called after each successful call
* get_next_pkt_raw().
*/
static void put_pkt_raw(struct vmbus_channel *channel,
struct vmpacket_descriptor *desc)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
u32 read_loc = ring_info->priv_read_index;
u32 packetlen = desc->len8 << 3;
u32 dsize = ring_info->ring_datasize;
BUG_ON((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize);
/*
* Include the packet trailer.
*/
ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
}
/*
* This call commits the read index and potentially signals the host.
* Here is the pattern for using the "in-place" consumption APIs:
*
* while (get_next_pkt_raw() {
* process the packet "in-place";
* put_pkt_raw();
* }
* if (packets processed in place)
* commit_rd_index();
*/
static void commit_rd_index(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
/*
* Make sure all reads are done before we update the read index since
* the writer may start writing to the read area once the read index
* is updated.
*/
virt_rmb();
ring_info->ring_buffer->read_index = ring_info->priv_read_index;
if (hv_need_to_signal_on_read(ring_info))
vmbus_set_event(channel);
}
/*
* Switch the data path from the synthetic interface to the VF
* interface.
@ -59,7 +142,6 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
VM_PKT_DATA_INBAND, 0);
}
static struct netvsc_device *alloc_net_device(void)
{
struct netvsc_device *net_device;
@ -82,6 +164,7 @@ static struct netvsc_device *alloc_net_device(void)
atomic_set(&net_device->open_cnt, 0);
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
init_completion(&net_device->channel_init_wait);
return net_device;
}
@ -123,13 +206,12 @@ static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
return net_device;
}
static int netvsc_destroy_buf(struct hv_device *device)
static void netvsc_destroy_buf(struct hv_device *device)
{
struct nvsp_message *revoke_packet;
int ret = 0;
struct net_device *ndev = hv_get_drvdata(device);
struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
int ret;
/*
* If we got a section count, it means we received a
@ -159,7 +241,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
if (ret != 0) {
netdev_err(ndev, "unable to send "
"revoke receive buffer to netvsp\n");
return ret;
return;
}
}
@ -174,7 +256,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
if (ret != 0) {
netdev_err(ndev,
"unable to teardown receive buffer's gpadl\n");
return ret;
return;
}
net_device->recv_buf_gpadl_handle = 0;
}
@ -218,7 +300,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
if (ret != 0) {
netdev_err(ndev, "unable to send "
"revoke send buffer to netvsp\n");
return ret;
return;
}
}
/* Teardown the gpadl on the vsp end */
@ -232,7 +314,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
if (ret != 0) {
netdev_err(ndev,
"unable to teardown send buffer's gpadl\n");
return ret;
return;
}
net_device->send_buf_gpadl_handle = 0;
}
@ -242,8 +324,6 @@ static int netvsc_destroy_buf(struct hv_device *device)
net_device->send_buf = NULL;
}
kfree(net_device->send_section_map);
return ret;
}
static int netvsc_init_buf(struct hv_device *device)
@ -285,7 +365,6 @@ static int netvsc_init_buf(struct hv_device *device)
goto cleanup;
}
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
@ -412,7 +491,7 @@ static int netvsc_init_buf(struct hv_device *device)
/* Section count is simply the size divided by the section size.
*/
net_device->send_section_cnt =
net_device->send_buf_size/net_device->send_section_size;
net_device->send_buf_size / net_device->send_section_size;
dev_info(&device->device, "Send section size: %d, Section count:%d\n",
net_device->send_section_size, net_device->send_section_cnt);
@ -421,8 +500,8 @@ static int netvsc_init_buf(struct hv_device *device)
net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
BITS_PER_LONG);
net_device->send_section_map =
kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
net_device->send_section_map = kcalloc(net_device->map_words,
sizeof(ulong), GFP_KERNEL);
if (net_device->send_section_map == NULL) {
ret = -ENOMEM;
goto cleanup;
@ -437,7 +516,6 @@ static int netvsc_init_buf(struct hv_device *device)
return ret;
}
/* Negotiate NVSP protocol version */
static int negotiate_nvsp_ver(struct hv_device *device,
struct netvsc_device *net_device,
@ -498,9 +576,10 @@ static int netvsc_connect_vsp(struct hv_device *device)
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
int ndis_version;
u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
const u32 ver_list[] = {
NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
int i, num_ver = 4; /* number of different NVSP versions */
int i;
net_device = get_outbound_net_device(device);
if (!net_device)
@ -509,7 +588,7 @@ static int netvsc_connect_vsp(struct hv_device *device)
init_packet = &net_device->channel_init_pkt;
/* Negotiate the latest NVSP protocol supported */
for (i = num_ver - 1; i >= 0; i--)
for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
if (negotiate_nvsp_ver(device, net_device, init_packet,
ver_list[i]) == 0) {
net_device->nvsp_version = ver_list[i];
@ -568,7 +647,7 @@ static void netvsc_disconnect_vsp(struct hv_device *device)
/*
* netvsc_device_remove - Callback when the root bus device is removed
*/
int netvsc_device_remove(struct hv_device *device)
void netvsc_device_remove(struct hv_device *device)
{
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
@ -590,10 +669,8 @@ int netvsc_device_remove(struct hv_device *device)
/* Release all resources */
vfree(net_device->sub_cb_buf);
free_netvsc_device(net_device);
return 0;
}
#define RING_AVAIL_PERCENT_HIWATER 20
#define RING_AVAIL_PERCENT_LOWATER 10
@ -617,72 +694,79 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
sync_change_bit(index, net_device->send_section_map);
}
static void netvsc_send_tx_complete(struct netvsc_device *net_device,
struct vmbus_channel *incoming_channel,
struct hv_device *device,
struct vmpacket_descriptor *packet)
{
struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = device->channel;
int num_outstanding_sends;
u16 q_idx = 0;
int queue_sends;
/* Notify the layer above us */
if (likely(skb)) {
struct hv_netvsc_packet *nvsc_packet
= (struct hv_netvsc_packet *)skb->cb;
u32 send_index = nvsc_packet->send_buf_index;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
q_idx = nvsc_packet->q_idx;
channel = incoming_channel;
dev_kfree_skb_any(skb);
}
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]);
if (net_device->destroy && num_outstanding_sends == 0)
wake_up(&net_device->wait_drain);
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
!net_device_ctx->start_remove &&
(hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
queue_sends < 1))
netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
}
static void netvsc_send_completion(struct netvsc_device *net_device,
struct vmbus_channel *incoming_channel,
struct hv_device *device,
struct vmpacket_descriptor *packet)
{
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
u32 send_index;
struct sk_buff *skb;
nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
(packet->offset8 << 3));
(packet->offset8 << 3));
if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
(nvsp_packet->hdr.msg_type ==
NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
(nvsp_packet->hdr.msg_type ==
NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
(nvsp_packet->hdr.msg_type ==
NVSP_MSG5_TYPE_SUBCHANNEL)) {
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
case NVSP_MSG5_TYPE_SUBCHANNEL:
/* Copy the response back */
memcpy(&net_device->channel_init_pkt, nvsp_packet,
sizeof(struct nvsp_message));
complete(&net_device->channel_init_wait);
} else if (nvsp_packet->hdr.msg_type ==
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
int num_outstanding_sends;
u16 q_idx = 0;
struct vmbus_channel *channel = device->channel;
int queue_sends;
break;
/* Get the send context */
skb = (struct sk_buff *)(unsigned long)packet->trans_id;
case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
netvsc_send_tx_complete(net_device, incoming_channel,
device, packet);
break;
/* Notify the layer above us */
if (skb) {
nvsc_packet = (struct hv_netvsc_packet *) skb->cb;
send_index = nvsc_packet->send_buf_index;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
q_idx = nvsc_packet->q_idx;
channel = incoming_channel;
dev_kfree_skb_any(skb);
}
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
queue_sends = atomic_dec_return(&net_device->
queue_sends[q_idx]);
if (net_device->destroy && num_outstanding_sends == 0)
wake_up(&net_device->wait_drain);
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
!net_device_ctx->start_remove &&
(hv_ringbuf_avail_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
netif_tx_wake_queue(netdev_get_tx_queue(
ndev, q_idx));
} else {
netdev_err(ndev, "Unknown send completion packet type- "
"%d received!!\n", nvsp_packet->hdr.msg_type);
default:
netdev_err(ndev,
"Unknown send completion type %d received!!\n",
nvsp_packet->hdr.msg_type);
}
}
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
@ -756,7 +840,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
return msg_size;
}
static inline int netvsc_send_pkt(
static int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
struct netvsc_device *net_device,
@ -872,7 +956,7 @@ int netvsc_send(struct hv_device *device,
struct sk_buff *skb)
{
struct netvsc_device *net_device;
int ret = 0, m_ret = 0;
int ret = 0;
struct vmbus_channel *out_channel;
u16 q_idx = packet->q_idx;
u32 pktlen = packet->total_data_buflen, msd_len = 0;
@ -961,8 +1045,8 @@ int netvsc_send(struct hv_device *device,
}
if (msd_send) {
m_ret = netvsc_send_pkt(device, msd_send, net_device,
NULL, msd_skb);
int m_ret = netvsc_send_pkt(device, msd_send, net_device,
NULL, msd_skb);
if (m_ret != 0) {
netvsc_free_send_slot(net_device,
@ -1157,7 +1241,6 @@ static void netvsc_receive(struct netvsc_device *net_device,
/* Pass it to the upper layer */
status = rndis_filter_receive(device, netvsc_packet, &data,
channel);
}
if (!net_device->mrc[q_idx].buf) {
@ -1182,7 +1265,6 @@ static void netvsc_receive(struct netvsc_device *net_device,
rcd->status = status;
}
static void netvsc_send_table(struct hv_device *hdev,
struct nvsp_message *nvmsg)
{
@ -1263,7 +1345,6 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
}
}
void netvsc_channel_cb(void *context)
{
int ret;
@ -1320,8 +1401,6 @@ void netvsc_channel_cb(void *context)
ndev,
request_id,
desc);
} else {
/*
* We are done for this pass.
@ -1350,8 +1429,6 @@ void netvsc_channel_cb(void *context)
kfree(buffer);
netvsc_chk_recv_comp(net_device, channel, q_idx);
return;
}
/*
@ -1373,9 +1450,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
net_device->ring_size = ring_size;
/* Initialize the NetVSC channel extension */
init_completion(&net_device->channel_init_wait);
set_per_channel_state(device->channel, net_device->cb_buffer);
/* Open the channel */

View File

@ -40,7 +40,6 @@
#include "hyperv_net.h"
#define RING_SIZE_MIN 64
#define LINKCHANGE_INT (2 * HZ)
#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
@ -358,18 +357,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct rndis_message *rndis_msg;
struct rndis_packet *rndis_pkt;
u32 rndis_msg_size;
bool isvlan;
bool linear = false;
struct rndis_per_packet_info *ppi;
struct ndis_tcp_ip_checksum_info *csum_info;
struct ndis_tcp_lso_info *lso_info;
int hdr_offset;
u32 net_trans_info;
u32 hash;
u32 skb_length;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf;
struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
/* We will atmost need two pages to describe the rndis
* header. We can only transmit MAX_PAGE_BUFFER_COUNT number
@ -377,22 +372,20 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* more pages we try linearizing it.
*/
check_size:
skb_length = skb->len;
num_data_pgs = netvsc_get_slots(skb) + 2;
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
num_data_pgs, skb->len);
ret = -EFAULT;
goto drop;
} else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
if (skb_linearize(skb)) {
net_alert_ratelimited("failed to linearize skb\n");
ret = -ENOMEM;
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
++net_device_ctx->eth_stats.tx_scattered;
if (skb_linearize(skb))
goto no_memory;
num_data_pgs = netvsc_get_slots(skb) + 2;
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
++net_device_ctx->eth_stats.tx_too_big;
goto drop;
}
linear = true;
goto check_size;
}
/*
@ -401,17 +394,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* structure.
*/
ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
if (ret) {
netdev_err(net, "unable to alloc hv_netvsc_packet\n");
ret = -ENOMEM;
goto drop;
}
if (ret)
goto no_memory;
/* Use the skb control buffer for building up the packet */
BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
FIELD_SIZEOF(struct sk_buff, cb));
packet = (struct hv_netvsc_packet *)skb->cb;
packet->q_idx = skb_get_queue_mapping(skb);
packet->total_data_buflen = skb->len;
@ -420,8 +410,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
isvlan = skb->vlan_tci & VLAN_TAG_PRESENT;
/* Add the rndis header */
rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
rndis_msg->msg_len = packet->total_data_buflen;
@ -440,7 +428,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
*(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
}
if (isvlan) {
if (skb_vlan_tag_present(skb)) {
struct ndis_pkt_8021q_info *vlan;
rndis_msg_size += NDIS_VLAN_PPI_SIZE;
@ -461,8 +449,37 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* Setup the sendside checksum offload only if this is not a
* GSO packet.
*/
if (skb_is_gso(skb))
goto do_lso;
if (skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
TCP_LARGESEND_PKTINFO);
lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
ppi->ppi_offset);
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
if (net_trans_info & (INFO_IPV4 << 16)) {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
ip_hdr(skb)->tot_len = 0;
ip_hdr(skb)->check = 0;
tcp_hdr(skb)->check =
~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
} else {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
}
lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
goto do_send;
}
if ((skb->ip_summed == CHECKSUM_NONE) ||
(skb->ip_summed == CHECKSUM_UNNECESSARY))
@ -495,7 +512,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
ret = skb_cow_head(skb, 0);
if (ret)
goto drop;
goto no_memory;
uh = udp_hdr(skb);
udp_len = ntohs(uh->len);
@ -509,35 +526,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
csum_info->transmit.udp_checksum = 0;
}
goto do_send;
do_lso:
rndis_msg_size += NDIS_LSO_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
TCP_LARGESEND_PKTINFO);
lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
ppi->ppi_offset);
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
if (net_trans_info & (INFO_IPV4 << 16)) {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
ip_hdr(skb)->tot_len = 0;
ip_hdr(skb)->check = 0;
tcp_hdr(skb)->check =
~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
} else {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
}
lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
do_send:
/* Start filling in the page buffers with the rndis hdr */
@ -550,21 +538,33 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx->device_ctx, packet,
rndis_msg, &pb, skb);
if (likely(ret == 0)) {
struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
drop:
if (ret == 0) {
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->packets++;
tx_stats->bytes += skb_length;
u64_stats_update_end(&tx_stats->syncp);
} else {
if (ret != -EAGAIN) {
dev_kfree_skb_any(skb);
net->stats.tx_dropped++;
}
return NETDEV_TX_OK;
}
return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
if (ret == -EAGAIN) {
++net_device_ctx->eth_stats.tx_busy;
return NETDEV_TX_BUSY;
}
if (ret == -ENOSPC)
++net_device_ctx->eth_stats.tx_no_space;
drop:
dev_kfree_skb_any(skb);
net->stats.tx_dropped++;
return NETDEV_TX_OK;
no_memory:
++net_device_ctx->eth_stats.tx_no_memory;
goto drop;
}
/*
@ -617,7 +617,6 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct hv_netvsc_packet *packet,
struct ndis_tcp_ip_checksum_info *csum_info,
@ -741,8 +740,12 @@ int netvsc_recv_callback(struct hv_device *device_obj,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *dev = net_device_ctx->device_ctx;
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info));
}
static void netvsc_get_channels(struct net_device *net,
@ -1018,6 +1021,51 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
return err;
}
static const struct {
char name[ETH_GSTRING_LEN];
u16 offset;
} netvsc_stats[] = {
{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
{ "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
{ "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
};
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
case ETH_SS_STATS:
return ARRAY_SIZE(netvsc_stats);
default:
return -EINVAL;
}
}
static void netvsc_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
const void *nds = &ndc->eth_stats;
int i;
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
}
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
memcpy(data + i * ETH_GSTRING_LEN,
netvsc_stats[i].name, ETH_GSTRING_LEN);
break;
}
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netvsc_poll_controller(struct net_device *net)
{
@ -1030,6 +1078,9 @@ static void netvsc_poll_controller(struct net_device *net)
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ethtool_stats = netvsc_get_ethtool_stats,
.get_sset_count = netvsc_get_sset_count,
.get_strings = netvsc_get_strings,
.get_channels = netvsc_get_channels,
.set_channels = netvsc_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
@ -1167,9 +1218,8 @@ static void netvsc_free_netdev(struct net_device *netdev)
static struct net_device *get_netvsc_net_device(char *mac)
{
struct net_device *dev, *found = NULL;
int rtnl_locked;
rtnl_locked = rtnl_trylock();
ASSERT_RTNL();
for_each_netdev(&init_net, dev) {
if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
@ -1179,8 +1229,6 @@ static struct net_device *get_netvsc_net_device(char *mac)
break;
}
}
if (rtnl_locked)
rtnl_unlock();
return found;
}
@ -1274,7 +1322,6 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
return NOTIFY_OK;
}
static int netvsc_vf_down(struct net_device *vf_netdev)
{
struct net_device *ndev;
@ -1308,7 +1355,6 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
return NOTIFY_OK;
}
static int netvsc_unregister_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
@ -1436,7 +1482,6 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
ndev_ctx = netdev_priv(net);
net_device = ndev_ctx->nvdev;
@ -1483,7 +1528,6 @@ static struct hv_driver netvsc_drv = {
.remove = netvsc_remove,
};
/*
* On Hyper-V, every VF interface is matched with a corresponding
* synthetic interface. The synthetic interface is presented first

View File

@ -663,13 +663,14 @@ rndis_filter_set_offload_params(struct net_device *ndev,
return ret;
}
u8 netvsc_hash_key[HASH_KEYLEN] = {
static const u8 netvsc_hash_key[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key)
static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
{
@ -720,7 +721,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
for (i = 0; i < HASH_KEYLEN; i++)
keyp[i] = netvsc_hash_key[i];
ret = rndis_filter_send_request(rdev, request);
if (ret != 0)
goto cleanup;
@ -738,7 +738,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
return ret;
}
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
u32 size = sizeof(u32);
@ -814,7 +813,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
return ret;
}
static int rndis_filter_init_device(struct rndis_device *dev)
{
struct rndis_request *request;
@ -902,7 +900,6 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
if (request)
put_rndis_request(dev, request);
return;
}
static int rndis_filter_open_device(struct rndis_device *dev)
@ -972,7 +969,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
}
int rndis_filter_device_add(struct hv_device *dev,
void *additional_info)
void *additional_info)
{
int ret;
struct net_device *net = hv_get_drvdata(dev);
@ -1054,7 +1051,6 @@ int rndis_filter_device_add(struct hv_device *dev,
offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
ret = rndis_filter_set_offload_params(net, &offloads);
if (ret)
goto err_dev_remv;
@ -1180,7 +1176,6 @@ void rndis_filter_device_remove(struct hv_device *dev)
netvsc_device_remove(dev);
}
int rndis_filter_open(struct netvsc_device *nvdev)
{
if (!nvdev)

View File

@ -1114,6 +1114,13 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
{
const struct kobject *kobj = &device_obj->device.kobj;
return kobj->name;
}
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
@ -1422,88 +1429,4 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
return false;
}
/*
* An API to support in-place processing of incoming VMBUS packets.
*/
#define VMBUS_PKT_TRAILER 8
static inline struct vmpacket_descriptor *
get_next_pkt_raw(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
u32 read_loc = ring_info->priv_read_index;
void *ring_buffer = hv_get_ring_buffer(ring_info);
struct vmpacket_descriptor *cur_desc;
u32 packetlen;
u32 dsize = ring_info->ring_datasize;
u32 delta = read_loc - ring_info->ring_buffer->read_index;
u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
return NULL;
if ((read_loc + sizeof(*cur_desc)) > dsize)
return NULL;
cur_desc = ring_buffer + read_loc;
packetlen = cur_desc->len8 << 3;
/*
* If the packet under consideration is wrapping around,
* return failure.
*/
if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
return NULL;
return cur_desc;
}
/*
* A helper function to step through packets "in-place"
* This API is to be called after each successful call
* get_next_pkt_raw().
*/
static inline void put_pkt_raw(struct vmbus_channel *channel,
struct vmpacket_descriptor *desc)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
u32 read_loc = ring_info->priv_read_index;
u32 packetlen = desc->len8 << 3;
u32 dsize = ring_info->ring_datasize;
if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
BUG();
/*
* Include the packet trailer.
*/
ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
}
/*
* This call commits the read index and potentially signals the host.
* Here is the pattern for using the "in-place" consumption APIs:
*
* while (get_next_pkt_raw() {
* process the packet "in-place";
* put_pkt_raw();
* }
* if (packets processed in place)
* commit_rd_index();
*/
static inline void commit_rd_index(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
/*
* Make sure all reads are done before we update the read index since
* the writer may start writing to the read area once the read index
* is updated.
*/
virt_rmb();
ring_info->ring_buffer->read_index = ring_info->priv_read_index;
if (hv_need_to_signal_on_read(ring_info))
vmbus_set_event(channel);
}
#endif /* _HYPERV_H */