linux_dsm_epyc7002/drivers/net/virtio_net.c

1049 lines
26 KiB
C
Raw Normal View History

/* A simple network driver using virtio.
*
* Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
static int csum = 1, gso = 1;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
/* FIXME: MTU in config. */
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
#define GOOD_COPY_LEN 128
#define VIRTNET_SEND_COMMAND_SG_MAX 2
struct virtnet_info
{
struct virtio_device *vdev;
struct virtqueue *rvq, *svq, *cvq;
struct net_device *dev;
struct napi_struct napi;
unsigned int status;
/* The skb we couldn't send because buffers were full. */
struct sk_buff *last_xmit_skb;
/* If we need to free in a timer, this is it. */
struct timer_list xmit_free_timer;
/* Number of input buffers, and max we've ever had. */
unsigned int num, max;
/* For cleaning up after transmission. */
struct tasklet_struct tasklet;
bool free_in_tasklet;
/* I like... big packets and I cannot lie! */
bool big_packets;
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
/* Receive & send queues. */
struct sk_buff_head recv;
struct sk_buff_head send;
/* Chain pages by the private ptr. */
struct page *pages;
};
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
static inline void *skb_vnet_hdr(struct sk_buff *skb)
{
return (struct virtio_net_hdr *)skb->cb;
}
static void give_a_page(struct virtnet_info *vi, struct page *page)
{
page->private = (unsigned long)vi->pages;
vi->pages = page;
}
static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
{
unsigned int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
give_a_page(vi, skb_shinfo(skb)->frags[i].page);
skb_shinfo(skb)->nr_frags = 0;
skb->data_len = 0;
}
static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
{
struct page *p = vi->pages;
if (p)
vi->pages = (struct page *)p->private;
else
p = alloc_page(gfp_mask);
return p;
}
static void skb_xmit_done(struct virtqueue *svq)
{
struct virtnet_info *vi = svq->vdev->priv;
/* Suppress further interrupts. */
svq->vq_ops->disable_cb(svq);
/* We were probably waiting for more output buffers. */
netif_wake_queue(vi->dev);
/* Make sure we re-xmit last_xmit_skb: if there are no more packets
* queued, start_xmit won't be called. */
tasklet_schedule(&vi->tasklet);
}
static void receive_skb(struct net_device *dev, struct sk_buff *skb,
unsigned len)
{
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
int err;
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
int i;
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
goto drop;
}
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
if (vi->mergeable_rx_bufs) {
struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
unsigned int copy;
char *p = page_address(skb_shinfo(skb)->frags[0].page);
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
if (len > PAGE_SIZE)
len = PAGE_SIZE;
len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
memcpy(hdr, p, sizeof(*mhdr));
p += sizeof(*mhdr);
copy = len;
if (copy > skb_tailroom(skb))
copy = skb_tailroom(skb);
memcpy(skb_put(skb, copy), p, copy);
len -= copy;
if (!len) {
give_a_page(vi, skb_shinfo(skb)->frags[0].page);
skb_shinfo(skb)->nr_frags--;
} else {
skb_shinfo(skb)->frags[0].page_offset +=
sizeof(*mhdr) + copy;
skb_shinfo(skb)->frags[0].size = len;
skb->data_len += len;
skb->len += len;
}
while (--mhdr->num_buffers) {
struct sk_buff *nskb;
i = skb_shinfo(skb)->nr_frags;
if (i >= MAX_SKB_FRAGS) {
pr_debug("%s: packet too long %d\n", dev->name,
len);
dev->stats.rx_length_errors++;
goto drop;
}
nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
if (!nskb) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, mhdr->num_buffers);
dev->stats.rx_length_errors++;
goto drop;
}
__skb_unlink(nskb, &vi->recv);
vi->num--;
skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
skb_shinfo(nskb)->nr_frags = 0;
kfree_skb(nskb);
if (len > PAGE_SIZE)
len = PAGE_SIZE;
skb_shinfo(skb)->frags[i].size = len;
skb_shinfo(skb)->nr_frags++;
skb->data_len += len;
skb->len += len;
}
} else {
len -= sizeof(struct virtio_net_hdr);
if (len <= MAX_PACKET_LEN)
trim_pages(vi, skb);
err = pskb_trim(skb, len);
if (err) {
pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
len, err);
dev->stats.rx_dropped++;
goto drop;
}
}
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
skb->truesize += skb->data_len;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
goto frame_err;
}
skb->protocol = eth_type_trans(skb, dev);
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_UDP:
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
default:
if (net_ratelimit())
printk(KERN_WARNING "%s: bad gso type %u.\n",
dev->name, hdr->gso_type);
goto frame_err;
}
if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_size = hdr->gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
if (net_ratelimit())
printk(KERN_WARNING "%s: zero gso size.\n",
dev->name);
goto frame_err;
}
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
}
netif_receive_skb(skb);
return;
frame_err:
dev->stats.rx_frame_errors++;
drop:
dev_kfree_skb(skb);
}
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
static void try_fill_recv_maxbufs(struct virtnet_info *vi)
{
struct sk_buff *skb;
struct scatterlist sg[2+MAX_SKB_FRAGS];
int num, err, i;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
for (;;) {
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
struct virtio_net_hdr *hdr;
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
if (unlikely(!skb))
break;
skb_put(skb, MAX_PACKET_LEN);
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
hdr = skb_vnet_hdr(skb);
sg_set_buf(sg, hdr, sizeof(*hdr));
if (vi->big_packets) {
for (i = 0; i < MAX_SKB_FRAGS; i++) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
f->page = get_a_page(vi, GFP_ATOMIC);
if (!f->page)
break;
f->page_offset = 0;
f->size = PAGE_SIZE;
skb->data_len += PAGE_SIZE;
skb->len += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
}
}
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
skb_queue_head(&vi->recv, skb);
err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
if (err) {
skb_unlink(skb, &vi->recv);
trim_pages(vi, skb);
kfree_skb(skb);
break;
}
vi->num++;
}
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq);
}
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
static void try_fill_recv(struct virtnet_info *vi)
{
struct sk_buff *skb;
struct scatterlist sg[1];
int err;
if (!vi->mergeable_rx_bufs) {
try_fill_recv_maxbufs(vi);
return;
}
for (;;) {
skb_frag_t *f;
skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
if (unlikely(!skb))
break;
skb_reserve(skb, NET_IP_ALIGN);
f = &skb_shinfo(skb)->frags[0];
f->page = get_a_page(vi, GFP_ATOMIC);
if (!f->page) {
kfree_skb(skb);
break;
}
f->page_offset = 0;
f->size = PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
sg_init_one(sg, page_address(f->page), PAGE_SIZE);
skb_queue_head(&vi->recv, skb);
err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
if (err) {
skb_unlink(skb, &vi->recv);
kfree_skb(skb);
break;
}
vi->num++;
}
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq);
}
static void skb_recv_done(struct virtqueue *rvq)
{
struct virtnet_info *vi = rvq->vdev->priv;
/* Schedule NAPI, Suppress further interrupts if successful. */
if (napi_schedule_prep(&vi->napi)) {
rvq->vq_ops->disable_cb(rvq);
__napi_schedule(&vi->napi);
}
}
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
struct sk_buff *skb = NULL;
unsigned int len, received = 0;
again:
while (received < budget &&
(skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
__skb_unlink(skb, &vi->recv);
receive_skb(vi->dev, skb, len);
vi->num--;
received++;
}
/* FIXME: If we oom and completely run out of inbufs, we need
* to start a timer trying to fill more. */
if (vi->num < vi->max / 2)
try_fill_recv(vi);
/* Out of packets? */
if (received < budget) {
napi_complete(napi);
if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
virtio: fix race in enable_cb There is a race in virtio_net, dealing with disabling/enabling the callback. I saw the following oops: kernel BUG at /space/kvm/drivers/virtio/virtio_ring.c:218! illegal operation: 0001 [#1] SMP Modules linked in: sunrpc dm_mod CPU: 2 Not tainted 2.6.25-rc1zlive-host-10623-gd358142-dirty #99 Process swapper (pid: 0, task: 000000000f85a610, ksp: 000000000f873c60) Krnl PSW : 0404300180000000 00000000002b81a6 (vring_disable_cb+0x16/0x20) R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:0 CC:3 PM:0 EA:3 Krnl GPRS: 0000000000000001 0000000000000001 0000000010005800 0000000000000001 000000000f3a0900 000000000f85a610 0000000000000000 0000000000000000 0000000000000000 000000000f870000 0000000000000000 0000000000001237 000000000f3a0920 000000000010ff74 00000000002846f6 000000000fa0bcd8 Krnl Code: 00000000002b819a: a7110001 tmll %r1,1 00000000002b819e: a7840004 brc 8,2b81a6 00000000002b81a2: a7f40001 brc 15,2b81a4 >00000000002b81a6: a51b0001 oill %r1,1 00000000002b81aa: 40102000 sth %r1,0(%r2) 00000000002b81ae: 07fe bcr 15,%r14 00000000002b81b0: eb7ff0380024 stmg %r7,%r15,56(%r15) 00000000002b81b6: a7f13e00 tmll %r15,15872 Call Trace: ([<000000000fa0bcd0>] 0xfa0bcd0) [<00000000002b8350>] vring_interrupt+0x5c/0x6c [<000000000010ab08>] do_extint+0xb8/0xf0 [<0000000000110716>] ext_no_vtime+0x16/0x1a [<0000000000107e72>] cpu_idle+0x1c2/0x1e0 The problem can be triggered with a high amount of host->guest traffic. I think its the following race: poll says netif_rx_complete poll calls enable_cb enable_cb opens the interrupt mask a new packet comes, an interrupt is triggered----\ enable_cb sees that there is more work | enable_cb disables the interrupt | . V . interrupt is delivered . skb_recv_done does atomic napi test, ok some waiting disable_cb is called->check fails->bang! . poll would do napi check poll would do disable_cb The fix is to let enable_cb not disable the interrupt again, but expect the caller to do the cleanup if it returns false. In that case, the interrupt is only disabled, if the napi test_set_bit was successful. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (cleaned up doco)
2008-03-14 20:17:05 +07:00
&& napi_schedule_prep(napi)) {
vi->rvq->vq_ops->disable_cb(vi->rvq);
__napi_schedule(napi);
goto again;
virtio: fix race in enable_cb There is a race in virtio_net, dealing with disabling/enabling the callback. I saw the following oops: kernel BUG at /space/kvm/drivers/virtio/virtio_ring.c:218! illegal operation: 0001 [#1] SMP Modules linked in: sunrpc dm_mod CPU: 2 Not tainted 2.6.25-rc1zlive-host-10623-gd358142-dirty #99 Process swapper (pid: 0, task: 000000000f85a610, ksp: 000000000f873c60) Krnl PSW : 0404300180000000 00000000002b81a6 (vring_disable_cb+0x16/0x20) R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:0 CC:3 PM:0 EA:3 Krnl GPRS: 0000000000000001 0000000000000001 0000000010005800 0000000000000001 000000000f3a0900 000000000f85a610 0000000000000000 0000000000000000 0000000000000000 000000000f870000 0000000000000000 0000000000001237 000000000f3a0920 000000000010ff74 00000000002846f6 000000000fa0bcd8 Krnl Code: 00000000002b819a: a7110001 tmll %r1,1 00000000002b819e: a7840004 brc 8,2b81a6 00000000002b81a2: a7f40001 brc 15,2b81a4 >00000000002b81a6: a51b0001 oill %r1,1 00000000002b81aa: 40102000 sth %r1,0(%r2) 00000000002b81ae: 07fe bcr 15,%r14 00000000002b81b0: eb7ff0380024 stmg %r7,%r15,56(%r15) 00000000002b81b6: a7f13e00 tmll %r15,15872 Call Trace: ([<000000000fa0bcd0>] 0xfa0bcd0) [<00000000002b8350>] vring_interrupt+0x5c/0x6c [<000000000010ab08>] do_extint+0xb8/0xf0 [<0000000000110716>] ext_no_vtime+0x16/0x1a [<0000000000107e72>] cpu_idle+0x1c2/0x1e0 The problem can be triggered with a high amount of host->guest traffic. I think its the following race: poll says netif_rx_complete poll calls enable_cb enable_cb opens the interrupt mask a new packet comes, an interrupt is triggered----\ enable_cb sees that there is more work | enable_cb disables the interrupt | . V . interrupt is delivered . skb_recv_done does atomic napi test, ok some waiting disable_cb is called->check fails->bang! . poll would do napi check poll would do disable_cb The fix is to let enable_cb not disable the interrupt again, but expect the caller to do the cleanup if it returns false. In that case, the interrupt is only disabled, if the napi test_set_bit was successful. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (cleaned up doco)
2008-03-14 20:17:05 +07:00
}
}
return received;
}
static void free_old_xmit_skbs(struct virtnet_info *vi)
{
struct sk_buff *skb;
unsigned int len;
while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
__skb_unlink(skb, &vi->send);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
kfree_skb(skb);
}
}
/* If the virtio transport doesn't always notify us when all in-flight packets
* are consumed, we fall back to using this function on a timer to free them. */
static void xmit_free(unsigned long data)
{
struct virtnet_info *vi = (void *)data;
netif_tx_lock(vi->dev);
free_old_xmit_skbs(vi);
if (!skb_queue_empty(&vi->send))
mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
netif_tx_unlock(vi->dev);
}
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
{
int num, err;
struct scatterlist sg[2+MAX_SKB_FRAGS];
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
hdr->csum_start = skb->csum_start - skb_headroom(skb);
hdr->csum_offset = skb->csum_offset;
} else {
hdr->flags = 0;
hdr->csum_offset = hdr->csum_start = 0;
}
if (skb_is_gso(skb)) {
hdr->hdr_len = skb_transport_header(skb) - skb->data;
hdr->gso_size = skb_shinfo(skb)->gso_size;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
BUG();
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
} else {
hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
hdr->gso_size = hdr->hdr_len = 0;
}
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
mhdr->num_buffers = 0;
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
sg_set_buf(sg, mhdr, sizeof(*mhdr));
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
else
sg_set_buf(sg, hdr, sizeof(*hdr));
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
if (!err && !vi->free_in_tasklet)
mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
return err;
}
static void xmit_tasklet(unsigned long data)
{
struct virtnet_info *vi = (void *)data;
netif_tx_lock_bh(vi->dev);
if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) {
vi->svq->vq_ops->kick(vi->svq);
vi->last_xmit_skb = NULL;
}
if (vi->free_in_tasklet)
free_old_xmit_skbs(vi);
netif_tx_unlock_bh(vi->dev);
}
static int start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
again:
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(vi);
/* If we has a buffer left over from last time, send it now. */
virtio: fix virtio_net xmit of freed skb bug On Mon, 2008-05-26 at 17:42 +1000, Rusty Russell wrote: > If we fail to transmit a packet, we assume the queue is full and put > the skb into last_xmit_skb. However, if more space frees up before we > xmit it, we loop, and the result can be transmitting the same skb twice. > > Fix is simple: set skb to NULL if we've used it in some way, and check > before sending. ... > diff -r 564237b31993 drivers/net/virtio_net.c > --- a/drivers/net/virtio_net.c Mon May 19 12:22:00 2008 +1000 > +++ b/drivers/net/virtio_net.c Mon May 19 12:24:58 2008 +1000 > @@ -287,21 +287,25 @@ again: > free_old_xmit_skbs(vi); > > /* If we has a buffer left over from last time, send it now. */ > - if (vi->last_xmit_skb) { > + if (unlikely(vi->last_xmit_skb)) { > if (xmit_skb(vi, vi->last_xmit_skb) != 0) { > /* Drop this skb: we only queue one. */ > vi->dev->stats.tx_dropped++; > kfree_skb(skb); > + skb = NULL; > goto stop_queue; > } > vi->last_xmit_skb = NULL; With this, may drop an skb and then later in the function discover that we could have sent it after all. Poor wee skb :) How about the incremental patch below? Cheers, Mark. Subject: [PATCH] virtio_net: Delay dropping tx skbs Currently we drop the skb in start_xmit() if we have a queued buffer and fail to transmit it. However, if we delay dropping it until we've stopped the queue and enabled the tx notification callback, then there is a chance space might become available for it. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2008-05-27 18:06:26 +07:00
if (unlikely(vi->last_xmit_skb) &&
xmit_skb(vi, vi->last_xmit_skb) != 0)
goto stop_queue;
vi->last_xmit_skb = NULL;
/* Put new one in send queue and do transmit */
if (likely(skb)) {
__skb_queue_head(&vi->send, skb);
if (xmit_skb(vi, skb) != 0) {
vi->last_xmit_skb = skb;
skb = NULL;
goto stop_queue;
}
}
done:
vi->svq->vq_ops->kick(vi->svq);
return NETDEV_TX_OK;
stop_queue:
pr_debug("%s: virtio not prepared to send\n", dev->name);
netif_stop_queue(dev);
/* Activate callback for using skbs: if this returns false it
* means some were used in the meantime. */
if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
vi->svq->vq_ops->disable_cb(vi->svq);
netif_start_queue(dev);
goto again;
}
virtio: fix virtio_net xmit of freed skb bug On Mon, 2008-05-26 at 17:42 +1000, Rusty Russell wrote: > If we fail to transmit a packet, we assume the queue is full and put > the skb into last_xmit_skb. However, if more space frees up before we > xmit it, we loop, and the result can be transmitting the same skb twice. > > Fix is simple: set skb to NULL if we've used it in some way, and check > before sending. ... > diff -r 564237b31993 drivers/net/virtio_net.c > --- a/drivers/net/virtio_net.c Mon May 19 12:22:00 2008 +1000 > +++ b/drivers/net/virtio_net.c Mon May 19 12:24:58 2008 +1000 > @@ -287,21 +287,25 @@ again: > free_old_xmit_skbs(vi); > > /* If we has a buffer left over from last time, send it now. */ > - if (vi->last_xmit_skb) { > + if (unlikely(vi->last_xmit_skb)) { > if (xmit_skb(vi, vi->last_xmit_skb) != 0) { > /* Drop this skb: we only queue one. */ > vi->dev->stats.tx_dropped++; > kfree_skb(skb); > + skb = NULL; > goto stop_queue; > } > vi->last_xmit_skb = NULL; With this, may drop an skb and then later in the function discover that we could have sent it after all. Poor wee skb :) How about the incremental patch below? Cheers, Mark. Subject: [PATCH] virtio_net: Delay dropping tx skbs Currently we drop the skb in start_xmit() if we have a queued buffer and fail to transmit it. However, if we delay dropping it until we've stopped the queue and enabled the tx notification callback, then there is a chance space might become available for it. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2008-05-27 18:06:26 +07:00
if (skb) {
/* Drop this skb: we only queue one. */
vi->dev->stats.tx_dropped++;
kfree_skb(skb);
}
goto done;
}
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
int ret;
ret = eth_mac_addr(dev, p);
if (ret)
return ret;
vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
dev->dev_addr, dev->addr_len);
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
napi_schedule(&vi->napi);
}
#endif
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
napi_enable(&vi->napi);
/* If all buffers were filled by other side before we napi_enabled, we
* won't get another interrupt, so process any outstanding packets
virtio net: fix oops on interface-up I got the following oops during interface ifup. Unfortunately its not easily reproducable so I cant say for sure that my fix fixes this problem, but I am confident and I think its correct anyway: <2>kernel BUG at /space/kvm/drivers/virtio/virtio_ring.c:234! <4>illegal operation: 0001 [#1] PREEMPT SMP <4>Modules linked in: <4>CPU: 0 Not tainted 2.6.24zlive-guest-07293-gf1ca151-dirty #91 <4>Process swapper (pid: 0, task: 0000000000800938, ksp: 000000000084ddb8) <4>Krnl PSW : 0404300180000000 0000000000466374 (vring_disable_cb+0x30/0x34) <4> R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:0 CC:3 PM:0 EA:3 <4>Krnl GPRS: 0000000000000001 0000000000000001 0000000010003800 0000000000466344 <4> 000000000e980900 00000000008848b0 000000000084e748 0000000000000000 <4> 000000000087b300 0000000000001237 0000000000001237 000000000f85bdd8 <4> 000000000e980920 00000000001137c0 0000000000464754 000000000f85bdd8 <4>Krnl Code: 0000000000466368: e3b0b0700004 lg %r11,112(%r11) <4> 000000000046636e: 07fe bcr 15,%r14 <4> 0000000000466370: a7f40001 brc 15,466372 <4> >0000000000466374: a7f4fff6 brc 15,466360 <4> 0000000000466378: eb7ff0500024 stmg %r7,%r15,80(%r15) <4> 000000000046637e: a7f13e00 tmll %r15,15872 <4> 0000000000466382: b90400ef lgr %r14,%r15 <4> 0000000000466386: a7840001 brc 8,466388 <4>Call Trace: <4>([<000201500f85c000>] 0x201500f85c000) <4> [<0000000000466556>] vring_interrupt+0x72/0x88 <4> [<00000000004801a0>] kvm_extint_handler+0x34/0x44 <4> [<000000000010d22c>] do_extint+0xbc/0xf8 <4> [<0000000000113f98>] ext_no_vtime+0x16/0x1a <4> [<000000000010a182>] cpu_idle+0x216/0x238 <4>([<000000000010a162>] cpu_idle+0x1f6/0x238) <4> [<0000000000568656>] rest_init+0xaa/0xb8 <4> [<000000000084ee2c>] start_kernel+0x3fc/0x490 <4> [<0000000000100020>] _stext+0x20/0x80 <4> <4> <0>Kernel panic - not syncing: Fatal exception in interrupt <4> After looking at the code and the dump I think the following scenario happened: Ifup was running on cpu2 and the interrupt arrived on cpu0. Now virtnet_open on cpu 2 managed to execute napi_enable and disable_cb but did not execute rx_schedule. Meanwhile on cpu 0 skb_recv_done was called by vring_interrupt, executed netif_rx_schedule_prep, which succeeded and therefore called disable_cb. This triggered the BUG_ON, as interrupts were already disabled by cpu 2. I think the proper solution is to make the call to disable_cb depend on the atomic update of NAPI_STATE_SCHED by using netif_rx_schedule_prep in the same way as skb_recv_done. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Jeff Garzik <jeff@garzik.org>
2008-02-06 14:50:11 +07:00
* now. virtnet_poll wants re-enable the queue, so we disable here.
* We synchronize against interrupts via NAPI_STATE_SCHED */
if (napi_schedule_prep(&vi->napi)) {
virtio net: fix oops on interface-up I got the following oops during interface ifup. Unfortunately its not easily reproducable so I cant say for sure that my fix fixes this problem, but I am confident and I think its correct anyway: <2>kernel BUG at /space/kvm/drivers/virtio/virtio_ring.c:234! <4>illegal operation: 0001 [#1] PREEMPT SMP <4>Modules linked in: <4>CPU: 0 Not tainted 2.6.24zlive-guest-07293-gf1ca151-dirty #91 <4>Process swapper (pid: 0, task: 0000000000800938, ksp: 000000000084ddb8) <4>Krnl PSW : 0404300180000000 0000000000466374 (vring_disable_cb+0x30/0x34) <4> R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:0 CC:3 PM:0 EA:3 <4>Krnl GPRS: 0000000000000001 0000000000000001 0000000010003800 0000000000466344 <4> 000000000e980900 00000000008848b0 000000000084e748 0000000000000000 <4> 000000000087b300 0000000000001237 0000000000001237 000000000f85bdd8 <4> 000000000e980920 00000000001137c0 0000000000464754 000000000f85bdd8 <4>Krnl Code: 0000000000466368: e3b0b0700004 lg %r11,112(%r11) <4> 000000000046636e: 07fe bcr 15,%r14 <4> 0000000000466370: a7f40001 brc 15,466372 <4> >0000000000466374: a7f4fff6 brc 15,466360 <4> 0000000000466378: eb7ff0500024 stmg %r7,%r15,80(%r15) <4> 000000000046637e: a7f13e00 tmll %r15,15872 <4> 0000000000466382: b90400ef lgr %r14,%r15 <4> 0000000000466386: a7840001 brc 8,466388 <4>Call Trace: <4>([<000201500f85c000>] 0x201500f85c000) <4> [<0000000000466556>] vring_interrupt+0x72/0x88 <4> [<00000000004801a0>] kvm_extint_handler+0x34/0x44 <4> [<000000000010d22c>] do_extint+0xbc/0xf8 <4> [<0000000000113f98>] ext_no_vtime+0x16/0x1a <4> [<000000000010a182>] cpu_idle+0x216/0x238 <4>([<000000000010a162>] cpu_idle+0x1f6/0x238) <4> [<0000000000568656>] rest_init+0xaa/0xb8 <4> [<000000000084ee2c>] start_kernel+0x3fc/0x490 <4> [<0000000000100020>] _stext+0x20/0x80 <4> <4> <0>Kernel panic - not syncing: Fatal exception in interrupt <4> After looking at the code and the dump I think the following scenario happened: Ifup was running on cpu2 and the interrupt arrived on cpu0. Now virtnet_open on cpu 2 managed to execute napi_enable and disable_cb but did not execute rx_schedule. Meanwhile on cpu 0 skb_recv_done was called by vring_interrupt, executed netif_rx_schedule_prep, which succeeded and therefore called disable_cb. This triggered the BUG_ON, as interrupts were already disabled by cpu 2. I think the proper solution is to make the call to disable_cb depend on the atomic update of NAPI_STATE_SCHED by using netif_rx_schedule_prep in the same way as skb_recv_done. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Jeff Garzik <jeff@garzik.org>
2008-02-06 14:50:11 +07:00
vi->rvq->vq_ops->disable_cb(vi->rvq);
__napi_schedule(&vi->napi);
virtio net: fix oops on interface-up I got the following oops during interface ifup. Unfortunately its not easily reproducable so I cant say for sure that my fix fixes this problem, but I am confident and I think its correct anyway: <2>kernel BUG at /space/kvm/drivers/virtio/virtio_ring.c:234! <4>illegal operation: 0001 [#1] PREEMPT SMP <4>Modules linked in: <4>CPU: 0 Not tainted 2.6.24zlive-guest-07293-gf1ca151-dirty #91 <4>Process swapper (pid: 0, task: 0000000000800938, ksp: 000000000084ddb8) <4>Krnl PSW : 0404300180000000 0000000000466374 (vring_disable_cb+0x30/0x34) <4> R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:0 CC:3 PM:0 EA:3 <4>Krnl GPRS: 0000000000000001 0000000000000001 0000000010003800 0000000000466344 <4> 000000000e980900 00000000008848b0 000000000084e748 0000000000000000 <4> 000000000087b300 0000000000001237 0000000000001237 000000000f85bdd8 <4> 000000000e980920 00000000001137c0 0000000000464754 000000000f85bdd8 <4>Krnl Code: 0000000000466368: e3b0b0700004 lg %r11,112(%r11) <4> 000000000046636e: 07fe bcr 15,%r14 <4> 0000000000466370: a7f40001 brc 15,466372 <4> >0000000000466374: a7f4fff6 brc 15,466360 <4> 0000000000466378: eb7ff0500024 stmg %r7,%r15,80(%r15) <4> 000000000046637e: a7f13e00 tmll %r15,15872 <4> 0000000000466382: b90400ef lgr %r14,%r15 <4> 0000000000466386: a7840001 brc 8,466388 <4>Call Trace: <4>([<000201500f85c000>] 0x201500f85c000) <4> [<0000000000466556>] vring_interrupt+0x72/0x88 <4> [<00000000004801a0>] kvm_extint_handler+0x34/0x44 <4> [<000000000010d22c>] do_extint+0xbc/0xf8 <4> [<0000000000113f98>] ext_no_vtime+0x16/0x1a <4> [<000000000010a182>] cpu_idle+0x216/0x238 <4>([<000000000010a162>] cpu_idle+0x1f6/0x238) <4> [<0000000000568656>] rest_init+0xaa/0xb8 <4> [<000000000084ee2c>] start_kernel+0x3fc/0x490 <4> [<0000000000100020>] _stext+0x20/0x80 <4> <4> <0>Kernel panic - not syncing: Fatal exception in interrupt <4> After looking at the code and the dump I think the following scenario happened: Ifup was running on cpu2 and the interrupt arrived on cpu0. Now virtnet_open on cpu 2 managed to execute napi_enable and disable_cb but did not execute rx_schedule. Meanwhile on cpu 0 skb_recv_done was called by vring_interrupt, executed netif_rx_schedule_prep, which succeeded and therefore called disable_cb. This triggered the BUG_ON, as interrupts were already disabled by cpu 2. I think the proper solution is to make the call to disable_cb depend on the atomic update of NAPI_STATE_SCHED by using netif_rx_schedule_prep in the same way as skb_recv_done. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Jeff Garzik <jeff@garzik.org>
2008-02-06 14:50:11 +07:00
}
return 0;
}
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
* never fail unless improperly formated.
*/
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
struct scatterlist *data, int out, int in)
{
struct scatterlist sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = ~0;
unsigned int tmp;
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
BUG(); /* Caller should know better */
return false;
}
BUG_ON(out + in > VIRTNET_SEND_COMMAND_SG_MAX);
out++; /* Add header */
in++; /* Add return status */
ctrl.class = class;
ctrl.cmd = cmd;
sg_init_table(sg, out + in);
sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
memcpy(&sg[1], data, sizeof(struct scatterlist) * (out + in - 2));
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
if (vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) != 0)
BUG();
vi->cvq->vq_ops->kick(vi->cvq);
/*
* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
*/
while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
cpu_relax();
return status == VIRTIO_NET_OK;
}
static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
napi_disable(&vi->napi);
return 0;
}
static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
return -ENOSYS;
return ethtool_op_set_tx_hw_csum(dev, data);
}
static void virtnet_set_rx_mode(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg[2];
u8 promisc, allmulti;
struct virtio_net_ctrl_mac *mac_data;
struct dev_addr_list *addr;
void *buf;
int i;
/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
promisc = ((dev->flags & IFF_PROMISC) != 0);
allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
sg_set_buf(sg, &promisc, sizeof(promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC,
sg, 1, 0))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
promisc ? "en" : "dis");
sg_set_buf(sg, &allmulti, sizeof(allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI,
sg, 1, 0))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
/* MAC filter - use one buffer for both lists */
mac_data = buf = kzalloc(((dev->uc_count + dev->mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
if (!buf) {
dev_warn(&dev->dev, "No memory for MAC address buffer\n");
return;
}
/* Store the unicast list and count in the front of the buffer */
mac_data->entries = dev->uc_count;
addr = dev->uc_list;
for (i = 0; i < dev->uc_count; i++, addr = addr->next)
memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
sg_set_buf(&sg[0], mac_data,
sizeof(mac_data->entries) + (dev->uc_count * ETH_ALEN));
/* multicast list and count fill the end */
mac_data = (void *)&mac_data->macs[dev->uc_count][0];
mac_data->entries = dev->mc_count;
addr = dev->mc_list;
for (i = 0; i < dev->mc_count; i++, addr = addr->next)
memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
sg, 2, 0))
dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
kfree(buf);
}
static void virnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
sg_set_buf(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
}
static void virnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
sg_set_buf(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
}
static struct ethtool_ops virtnet_ethtool_ops = {
.set_tx_csum = virtnet_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.set_tso = ethtool_op_set_tso,
.get_link = ethtool_op_get_link,
};
#define MIN_MTU 68
#define MAX_MTU 65535
static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
.ndo_start_xmit = start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = virtnet_set_mac_address,
.ndo_set_rx_mode = virtnet_set_rx_mode,
.ndo_change_mtu = virtnet_change_mtu,
.ndo_vlan_rx_add_vid = virnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virnet_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll,
#endif
};
static void virtnet_update_status(struct virtnet_info *vi)
{
u16 v;
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
return;
vi->vdev->config->get(vi->vdev,
offsetof(struct virtio_net_config, status),
&v, sizeof(v));
/* Ignore unknown (future) status bits */
v &= VIRTIO_NET_S_LINK_UP;
if (vi->status == v)
return;
vi->status = v;
if (vi->status & VIRTIO_NET_S_LINK_UP) {
netif_carrier_on(vi->dev);
netif_wake_queue(vi->dev);
} else {
netif_carrier_off(vi->dev);
netif_stop_queue(vi->dev);
}
}
static void virtnet_config_changed(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
virtnet_update_status(vi);
}
static int virtnet_probe(struct virtio_device *vdev)
{
int err;
struct net_device *dev;
struct virtnet_info *vi;
/* Allocate ourselves a network device with room for our info */
dev = alloc_etherdev(sizeof(struct virtnet_info));
if (!dev)
return -ENOMEM;
/* Set up network device as normal. */
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->features |= NETIF_F_TSO | NETIF_F_UFO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
/* Individual feature bits: what can host handle? */
if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
dev->features |= NETIF_F_TSO;
if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
dev->features |= NETIF_F_TSO6;
if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
dev->features |= NETIF_F_TSO_ECN;
if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
dev->features |= NETIF_F_UFO;
}
/* Configuration may specify what MAC to use. Otherwise random. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
vdev->config->get(vdev,
offsetof(struct virtio_net_config, mac),
dev->dev_addr, dev->addr_len);
} else {
random_ether_addr(dev->dev_addr);
vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
dev->dev_addr, dev->addr_len);
}
/* Set up our device-specific information */
vi = netdev_priv(dev);
netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
vi->dev = dev;
vi->vdev = vdev;
vdev->priv = vi;
vi->pages = NULL;
/* If they give us a callback when all buffers are done, we don't need
* the timer. */
vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
|| virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
|| virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
vi->big_packets = true;
virtio_net: VIRTIO_NET_F_MSG_RXBUF (imprive rcv buffer allocation) If segmentation offload is enabled by the host, we currently allocate maximum sized packet buffers and pass them to the host. This uses up 20 ring entries, allowing us to supply only 20 packet buffers to the host with a 256 entry ring. This is a huge overhead when receiving small packets, and is most keenly felt when receiving MTU sized packets from off-host. The VIRTIO_NET_F_MRG_RXBUF feature flag is set by hosts which support using receive buffers which are smaller than the maximum packet size. In order to transfer large packets to the guest, the host merges together multiple receive buffers to form a larger logical buffer. The number of merged buffers is returned to the guest via a field in the virtio_net_hdr. Make use of this support by supplying single page receive buffers to the host. On receive, we extract the virtio_net_hdr, copy 128 bytes of the payload to the skb's linear data buffer and adjust the fragment offset to point to the remaining data. This ensures proper alignment and allows us to not use any paged data for small packets. If the payload occupies multiple pages, we simply append those pages as fragments and free the associated skbs. This scheme allows us to be efficient in our use of ring entries while still supporting large packets. Benchmarking using netperf from an external machine to a guest over a 10Gb/s network shows a 100% improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark with GSO disabled on the host side, throughput was seen to increase from 700Mb/s to 1.7Gb/s. Based on a patch from Herbert Xu. Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (use netdev_priv) Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-17 13:41:34 +07:00
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
/* We expect two virtqueues, receive then send. */
vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
if (IS_ERR(vi->rvq)) {
err = PTR_ERR(vi->rvq);
goto free;
}
vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done);
if (IS_ERR(vi->svq)) {
err = PTR_ERR(vi->svq);
goto free_recv;
}
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
vi->cvq = vdev->config->find_vq(vdev, 2, NULL);
if (IS_ERR(vi->cvq)) {
err = PTR_ERR(vi->svq);
goto free_send;
}
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
dev->features |= NETIF_F_HW_VLAN_FILTER;
}
/* Initialize our empty receive and send queues. */
skb_queue_head_init(&vi->recv);
skb_queue_head_init(&vi->send);
tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
if (!vi->free_in_tasklet)
setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
goto free_ctrl;
}
/* Last of all, set up some receive buffers. */
try_fill_recv(vi);
/* If we didn't even get one input buffer, we're useless. */
if (vi->num == 0) {
err = -ENOMEM;
goto unregister;
}
vi->status = VIRTIO_NET_S_LINK_UP;
virtnet_update_status(vi);
netif_carrier_on(dev);
pr_debug("virtnet: registered device %s\n", dev->name);
return 0;
unregister:
unregister_netdev(dev);
free_ctrl:
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
vdev->config->del_vq(vi->cvq);
free_send:
vdev->config->del_vq(vi->svq);
free_recv:
vdev->config->del_vq(vi->rvq);
free:
free_netdev(dev);
return err;
}
static void virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
struct sk_buff *skb;
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
if (!vi->free_in_tasklet)
del_timer_sync(&vi->xmit_free_timer);
/* Free our skbs in send and recv queues, if any. */
while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
kfree_skb(skb);
vi->num--;
}
__skb_queue_purge(&vi->send);
BUG_ON(vi->num != 0);
vdev->config->del_vq(vi->svq);
vdev->config->del_vq(vi->rvq);
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
vdev->config->del_vq(vi->cvq);
unregister_netdev(vi->dev);
while (vi->pages)
__free_pages(get_a_page(vi, GFP_KERNEL), 0);
free_netdev(vi->dev);
}
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static unsigned int features[] = {
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
VIRTIO_F_NOTIFY_ON_EMPTY,
};
static struct virtio_driver virtio_net = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtnet_probe,
.remove = __devexit_p(virtnet_remove),
.config_changed = virtnet_config_changed,
};
static int __init init(void)
{
return register_virtio_driver(&virtio_net);
}
static void __exit fini(void)
{
unregister_virtio_driver(&virtio_net);
}
module_init(init);
module_exit(fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");