2019-07-02 05:57:52 +07:00
|
|
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
|
|
|
|
/* Google virtual Ethernet (gve) driver
|
|
|
|
*
|
|
|
|
* Copyright (C) 2015-2019 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/timer.h>
|
2019-07-02 05:57:54 +07:00
|
|
|
#include <linux/workqueue.h>
|
2019-07-02 05:57:52 +07:00
|
|
|
#include <net/sch_generic.h>
|
|
|
|
#include "gve.h"
|
|
|
|
#include "gve_adminq.h"
|
|
|
|
#include "gve_register.h"
|
|
|
|
|
2019-07-02 05:57:53 +07:00
|
|
|
#define GVE_DEFAULT_RX_COPYBREAK (256)
|
|
|
|
|
2019-07-02 05:57:52 +07:00
|
|
|
#define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK)
|
|
|
|
#define GVE_VERSION "1.0.0"
|
|
|
|
#define GVE_VERSION_PREFIX "GVE-"
|
|
|
|
|
2019-07-02 05:57:55 +07:00
|
|
|
const char gve_version_str[] = GVE_VERSION;
|
2019-07-02 05:57:52 +07:00
|
|
|
static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
|
|
|
|
|
2019-07-02 05:57:53 +07:00
|
|
|
static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
|
|
|
|
{
|
|
|
|
struct gve_priv *priv = netdev_priv(dev);
|
|
|
|
unsigned int start;
|
|
|
|
int ring;
|
|
|
|
|
|
|
|
if (priv->rx) {
|
|
|
|
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
|
|
|
|
do {
|
2019-07-03 05:46:57 +07:00
|
|
|
start =
|
|
|
|
u64_stats_fetch_begin(&priv->rx[ring].statss);
|
2019-07-02 05:57:53 +07:00
|
|
|
s->rx_packets += priv->rx[ring].rpackets;
|
|
|
|
s->rx_bytes += priv->rx[ring].rbytes;
|
|
|
|
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
|
|
|
|
start));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (priv->tx) {
|
|
|
|
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
|
|
|
|
do {
|
2019-07-03 05:46:57 +07:00
|
|
|
start =
|
|
|
|
u64_stats_fetch_begin(&priv->tx[ring].statss);
|
2019-07-02 05:57:53 +07:00
|
|
|
s->tx_packets += priv->tx[ring].pkt_done;
|
|
|
|
s->tx_bytes += priv->tx[ring].bytes_done;
|
2019-08-20 16:11:44 +07:00
|
|
|
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
|
2019-07-02 05:57:53 +07:00
|
|
|
start));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:52 +07:00
|
|
|
static int gve_alloc_counter_array(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
priv->counter_array =
|
|
|
|
dma_alloc_coherent(&priv->pdev->dev,
|
|
|
|
priv->num_event_counters *
|
|
|
|
sizeof(*priv->counter_array),
|
|
|
|
&priv->counter_array_bus, GFP_KERNEL);
|
|
|
|
if (!priv->counter_array)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_free_counter_array(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
dma_free_coherent(&priv->pdev->dev,
|
|
|
|
priv->num_event_counters *
|
|
|
|
sizeof(*priv->counter_array),
|
|
|
|
priv->counter_array, priv->counter_array_bus);
|
|
|
|
priv->counter_array = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
|
|
|
|
{
|
2019-07-02 05:57:54 +07:00
|
|
|
struct gve_priv *priv = arg;
|
|
|
|
|
|
|
|
queue_work(priv->gve_wq, &priv->service_task);
|
2019-07-02 05:57:52 +07:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t gve_intr(int irq, void *arg)
|
|
|
|
{
|
2019-07-02 05:57:53 +07:00
|
|
|
struct gve_notify_block *block = arg;
|
|
|
|
struct gve_priv *priv = block->priv;
|
|
|
|
|
|
|
|
iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
|
|
|
|
napi_schedule_irqoff(&block->napi);
|
2019-07-02 05:57:52 +07:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:53 +07:00
|
|
|
static int gve_napi_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
struct gve_notify_block *block;
|
|
|
|
__be32 __iomem *irq_doorbell;
|
|
|
|
bool reschedule = false;
|
|
|
|
struct gve_priv *priv;
|
|
|
|
|
|
|
|
block = container_of(napi, struct gve_notify_block, napi);
|
|
|
|
priv = block->priv;
|
|
|
|
|
|
|
|
if (block->tx)
|
|
|
|
reschedule |= gve_tx_poll(block, budget);
|
|
|
|
if (block->rx)
|
|
|
|
reschedule |= gve_rx_poll(block, budget);
|
|
|
|
|
|
|
|
if (reschedule)
|
|
|
|
return budget;
|
|
|
|
|
|
|
|
napi_complete(napi);
|
|
|
|
irq_doorbell = gve_irq_doorbell(priv, block);
|
|
|
|
iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
|
|
|
|
|
|
|
|
/* Double check we have no extra work.
|
|
|
|
* Ensure unmask synchronizes with checking for work.
|
|
|
|
*/
|
|
|
|
dma_rmb();
|
|
|
|
if (block->tx)
|
|
|
|
reschedule |= gve_tx_poll(block, -1);
|
|
|
|
if (block->rx)
|
|
|
|
reschedule |= gve_rx_poll(block, -1);
|
|
|
|
if (reschedule && napi_reschedule(napi))
|
|
|
|
iowrite32be(GVE_IRQ_MASK, irq_doorbell);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:52 +07:00
|
|
|
static int gve_alloc_notify_blocks(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int num_vecs_requested = priv->num_ntfy_blks + 1;
|
|
|
|
char *name = priv->dev->name;
|
|
|
|
unsigned int active_cpus;
|
|
|
|
int vecs_enabled;
|
|
|
|
int i, j;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
priv->msix_vectors = kvzalloc(num_vecs_requested *
|
|
|
|
sizeof(*priv->msix_vectors), GFP_KERNEL);
|
|
|
|
if (!priv->msix_vectors)
|
|
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < num_vecs_requested; i++)
|
|
|
|
priv->msix_vectors[i].entry = i;
|
|
|
|
vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
|
|
|
|
GVE_MIN_MSIX, num_vecs_requested);
|
|
|
|
if (vecs_enabled < 0) {
|
|
|
|
dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
|
|
|
|
GVE_MIN_MSIX, vecs_enabled);
|
|
|
|
err = vecs_enabled;
|
|
|
|
goto abort_with_msix_vectors;
|
|
|
|
}
|
|
|
|
if (vecs_enabled != num_vecs_requested) {
|
2019-07-02 05:57:53 +07:00
|
|
|
int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
|
|
|
|
int vecs_per_type = new_num_ntfy_blks / 2;
|
|
|
|
int vecs_left = new_num_ntfy_blks % 2;
|
|
|
|
|
|
|
|
priv->num_ntfy_blks = new_num_ntfy_blks;
|
|
|
|
priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
|
|
|
|
vecs_per_type);
|
|
|
|
priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
|
|
|
|
vecs_per_type + vecs_left);
|
2019-07-02 05:57:52 +07:00
|
|
|
dev_err(&priv->pdev->dev,
|
2019-07-02 05:57:53 +07:00
|
|
|
"Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
|
|
|
|
vecs_enabled, priv->tx_cfg.max_queues,
|
|
|
|
priv->rx_cfg.max_queues);
|
|
|
|
if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
|
|
|
|
priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
|
|
|
|
if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
|
|
|
|
priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
|
2019-07-02 05:57:52 +07:00
|
|
|
}
|
|
|
|
/* Half the notification blocks go to TX and half to RX */
|
|
|
|
active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
|
|
|
|
|
|
|
|
/* Setup Management Vector - the last vector */
|
|
|
|
snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
|
|
|
|
name);
|
|
|
|
err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
|
|
|
|
gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
|
|
|
|
goto abort_with_msix_enabled;
|
|
|
|
}
|
|
|
|
priv->ntfy_blocks =
|
|
|
|
dma_alloc_coherent(&priv->pdev->dev,
|
|
|
|
priv->num_ntfy_blks *
|
|
|
|
sizeof(*priv->ntfy_blocks),
|
|
|
|
&priv->ntfy_block_bus, GFP_KERNEL);
|
|
|
|
if (!priv->ntfy_blocks) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto abort_with_mgmt_vector;
|
|
|
|
}
|
|
|
|
/* Setup the other blocks - the first n-1 vectors */
|
|
|
|
for (i = 0; i < priv->num_ntfy_blks; i++) {
|
|
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[i];
|
|
|
|
int msix_idx = i;
|
|
|
|
|
|
|
|
snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
|
|
|
|
name, i);
|
|
|
|
block->priv = priv;
|
|
|
|
err = request_irq(priv->msix_vectors[msix_idx].vector,
|
|
|
|
gve_intr, 0, block->name, block);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&priv->pdev->dev,
|
|
|
|
"Failed to receive msix vector %d\n", i);
|
|
|
|
goto abort_with_some_ntfy_blocks;
|
|
|
|
}
|
|
|
|
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
|
|
|
|
get_cpu_mask(i % active_cpus));
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
abort_with_some_ntfy_blocks:
|
|
|
|
for (j = 0; j < i; j++) {
|
|
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[j];
|
|
|
|
int msix_idx = j;
|
|
|
|
|
|
|
|
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
|
|
|
|
NULL);
|
|
|
|
free_irq(priv->msix_vectors[msix_idx].vector, block);
|
|
|
|
}
|
|
|
|
dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
|
|
|
|
sizeof(*priv->ntfy_blocks),
|
|
|
|
priv->ntfy_blocks, priv->ntfy_block_bus);
|
|
|
|
priv->ntfy_blocks = NULL;
|
|
|
|
abort_with_mgmt_vector:
|
|
|
|
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
|
|
|
|
abort_with_msix_enabled:
|
|
|
|
pci_disable_msix(priv->pdev);
|
|
|
|
abort_with_msix_vectors:
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(priv->msix_vectors);
|
2019-07-02 05:57:52 +07:00
|
|
|
priv->msix_vectors = NULL;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_free_notify_blocks(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Free the irqs */
|
|
|
|
for (i = 0; i < priv->num_ntfy_blks; i++) {
|
|
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[i];
|
|
|
|
int msix_idx = i;
|
|
|
|
|
|
|
|
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
|
|
|
|
NULL);
|
|
|
|
free_irq(priv->msix_vectors[msix_idx].vector, block);
|
|
|
|
}
|
|
|
|
dma_free_coherent(&priv->pdev->dev,
|
|
|
|
priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
|
|
|
|
priv->ntfy_blocks, priv->ntfy_block_bus);
|
|
|
|
priv->ntfy_blocks = NULL;
|
|
|
|
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
|
|
|
|
pci_disable_msix(priv->pdev);
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(priv->msix_vectors);
|
2019-07-02 05:57:52 +07:00
|
|
|
priv->msix_vectors = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_setup_device_resources(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = gve_alloc_counter_array(priv);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
err = gve_alloc_notify_blocks(priv);
|
|
|
|
if (err)
|
|
|
|
goto abort_with_counter;
|
|
|
|
err = gve_adminq_configure_device_resources(priv,
|
|
|
|
priv->counter_array_bus,
|
|
|
|
priv->num_event_counters,
|
|
|
|
priv->ntfy_block_bus,
|
|
|
|
priv->num_ntfy_blks);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
dev_err(&priv->pdev->dev,
|
|
|
|
"could not setup device_resources: err=%d\n", err);
|
|
|
|
err = -ENXIO;
|
|
|
|
goto abort_with_ntfy_blocks;
|
|
|
|
}
|
|
|
|
gve_set_device_resources_ok(priv);
|
|
|
|
return 0;
|
|
|
|
abort_with_ntfy_blocks:
|
|
|
|
gve_free_notify_blocks(priv);
|
|
|
|
abort_with_counter:
|
|
|
|
gve_free_counter_array(priv);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:54 +07:00
|
|
|
static void gve_trigger_reset(struct gve_priv *priv);
|
|
|
|
|
2019-07-02 05:57:52 +07:00
|
|
|
static void gve_teardown_device_resources(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Tell device its resources are being freed */
|
|
|
|
if (gve_get_device_resources_ok(priv)) {
|
|
|
|
err = gve_adminq_deconfigure_device_resources(priv);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&priv->pdev->dev,
|
|
|
|
"Could not deconfigure device resources: err=%d\n",
|
|
|
|
err);
|
2019-07-02 05:57:54 +07:00
|
|
|
gve_trigger_reset(priv);
|
2019-07-02 05:57:52 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
gve_free_counter_array(priv);
|
|
|
|
gve_free_notify_blocks(priv);
|
|
|
|
gve_clear_device_resources_ok(priv);
|
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:53 +07:00
|
|
|
static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
|
|
|
|
{
|
|
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
|
|
|
|
|
|
netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
|
|
|
|
NAPI_POLL_WEIGHT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
|
|
|
|
{
|
|
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
|
|
|
|
|
|
netif_napi_del(&block->napi);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_register_qpls(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num_qpls; i++) {
|
|
|
|
err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
|
|
|
|
if (err) {
|
|
|
|
netif_err(priv, drv, priv->dev,
|
|
|
|
"failed to register queue page list %d\n",
|
|
|
|
priv->qpls[i].id);
|
2019-07-02 05:57:54 +07:00
|
|
|
/* This failure will trigger a reset - no need to clean
|
|
|
|
* up
|
|
|
|
*/
|
2019-07-02 05:57:53 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_unregister_qpls(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num_qpls; i++) {
|
|
|
|
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
|
2019-07-02 05:57:54 +07:00
|
|
|
/* This failure will trigger a reset - no need to clean up */
|
2019-07-02 05:57:53 +07:00
|
|
|
if (err) {
|
|
|
|
netif_err(priv, drv, priv->dev,
|
|
|
|
"Failed to unregister queue page list %d\n",
|
|
|
|
priv->qpls[i].id);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_create_rings(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->tx_cfg.num_queues; i++) {
|
|
|
|
err = gve_adminq_create_tx_queue(priv, i);
|
|
|
|
if (err) {
|
|
|
|
netif_err(priv, drv, priv->dev, "failed to create tx queue %d\n",
|
|
|
|
i);
|
2019-07-02 05:57:54 +07:00
|
|
|
/* This failure will trigger a reset - no need to clean
|
|
|
|
* up
|
|
|
|
*/
|
2019-07-02 05:57:53 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
netif_dbg(priv, drv, priv->dev, "created tx queue %d\n", i);
|
|
|
|
}
|
|
|
|
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
|
|
|
|
err = gve_adminq_create_rx_queue(priv, i);
|
|
|
|
if (err) {
|
|
|
|
netif_err(priv, drv, priv->dev, "failed to create rx queue %d\n",
|
|
|
|
i);
|
2019-07-02 05:57:54 +07:00
|
|
|
/* This failure will trigger a reset - no need to clean
|
|
|
|
* up
|
|
|
|
*/
|
2019-07-02 05:57:53 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
/* Rx data ring has been prefilled with packet buffers at
|
|
|
|
* queue allocation time.
|
|
|
|
* Write the doorbell to provide descriptor slots and packet
|
|
|
|
* buffers to the NIC.
|
|
|
|
*/
|
|
|
|
gve_rx_write_doorbell(priv, &priv->rx[i]);
|
|
|
|
netif_dbg(priv, drv, priv->dev, "created rx queue %d\n", i);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_alloc_rings(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int ntfy_idx;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Setup tx rings */
|
|
|
|
priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!priv->tx)
|
|
|
|
return -ENOMEM;
|
|
|
|
err = gve_tx_alloc_rings(priv);
|
|
|
|
if (err)
|
|
|
|
goto free_tx;
|
|
|
|
/* Setup rx rings */
|
|
|
|
priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!priv->rx) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto free_tx_queue;
|
|
|
|
}
|
|
|
|
err = gve_rx_alloc_rings(priv);
|
|
|
|
if (err)
|
|
|
|
goto free_rx;
|
|
|
|
/* Add tx napi & init sync stats*/
|
|
|
|
for (i = 0; i < priv->tx_cfg.num_queues; i++) {
|
|
|
|
u64_stats_init(&priv->tx[i].statss);
|
|
|
|
ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
|
|
|
|
gve_add_napi(priv, ntfy_idx);
|
|
|
|
}
|
|
|
|
/* Add rx napi & init sync stats*/
|
|
|
|
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
|
|
|
|
u64_stats_init(&priv->rx[i].statss);
|
|
|
|
ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
|
|
|
|
gve_add_napi(priv, ntfy_idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_rx:
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(priv->rx);
|
2019-07-02 05:57:53 +07:00
|
|
|
priv->rx = NULL;
|
|
|
|
free_tx_queue:
|
|
|
|
gve_tx_free_rings(priv);
|
|
|
|
free_tx:
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(priv->tx);
|
2019-07-02 05:57:53 +07:00
|
|
|
priv->tx = NULL;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_destroy_rings(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->tx_cfg.num_queues; i++) {
|
|
|
|
err = gve_adminq_destroy_tx_queue(priv, i);
|
|
|
|
if (err) {
|
|
|
|
netif_err(priv, drv, priv->dev,
|
|
|
|
"failed to destroy tx queue %d\n",
|
|
|
|
i);
|
2019-07-02 05:57:54 +07:00
|
|
|
/* This failure will trigger a reset - no need to clean
|
|
|
|
* up
|
|
|
|
*/
|
2019-07-02 05:57:53 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
netif_dbg(priv, drv, priv->dev, "destroyed tx queue %d\n", i);
|
|
|
|
}
|
|
|
|
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
|
|
|
|
err = gve_adminq_destroy_rx_queue(priv, i);
|
|
|
|
if (err) {
|
|
|
|
netif_err(priv, drv, priv->dev,
|
|
|
|
"failed to destroy rx queue %d\n",
|
|
|
|
i);
|
2019-07-02 05:57:54 +07:00
|
|
|
/* This failure will trigger a reset - no need to clean
|
|
|
|
* up
|
|
|
|
*/
|
2019-07-02 05:57:53 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
netif_dbg(priv, drv, priv->dev, "destroyed rx queue %d\n", i);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_free_rings(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int ntfy_idx;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (priv->tx) {
|
|
|
|
for (i = 0; i < priv->tx_cfg.num_queues; i++) {
|
|
|
|
ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
|
|
|
|
gve_remove_napi(priv, ntfy_idx);
|
|
|
|
}
|
|
|
|
gve_tx_free_rings(priv);
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(priv->tx);
|
2019-07-02 05:57:53 +07:00
|
|
|
priv->tx = NULL;
|
|
|
|
}
|
|
|
|
if (priv->rx) {
|
|
|
|
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
|
|
|
|
ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
|
|
|
|
gve_remove_napi(priv, ntfy_idx);
|
|
|
|
}
|
|
|
|
gve_rx_free_rings(priv);
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(priv->rx);
|
2019-07-02 05:57:53 +07:00
|
|
|
priv->rx = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
*page = alloc_page(GFP_KERNEL);
|
2019-07-03 23:50:37 +07:00
|
|
|
if (!*page)
|
2019-07-02 05:57:53 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
|
|
|
|
if (dma_mapping_error(dev, *dma)) {
|
|
|
|
put_page(*page);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
|
|
|
|
int pages)
|
|
|
|
{
|
|
|
|
struct gve_queue_page_list *qpl = &priv->qpls[id];
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (pages + priv->num_registered_pages > priv->max_registered_pages) {
|
|
|
|
netif_err(priv, drv, priv->dev,
|
|
|
|
"Reached max number of registered pages %llu > %llu\n",
|
|
|
|
pages + priv->num_registered_pages,
|
|
|
|
priv->max_registered_pages);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
qpl->id = id;
|
2019-11-27 06:36:19 +07:00
|
|
|
qpl->num_entries = 0;
|
2019-07-02 05:57:53 +07:00
|
|
|
qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
|
|
|
|
/* caller handles clean up */
|
|
|
|
if (!qpl->pages)
|
|
|
|
return -ENOMEM;
|
|
|
|
qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
|
|
|
|
GFP_KERNEL);
|
|
|
|
/* caller handles clean up */
|
|
|
|
if (!qpl->page_buses)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < pages; i++) {
|
|
|
|
err = gve_alloc_page(&priv->pdev->dev, &qpl->pages[i],
|
|
|
|
&qpl->page_buses[i],
|
|
|
|
gve_qpl_dma_dir(priv, id));
|
|
|
|
/* caller handles clean up */
|
|
|
|
if (err)
|
|
|
|
return -ENOMEM;
|
2019-11-27 06:36:19 +07:00
|
|
|
qpl->num_entries++;
|
2019-07-02 05:57:53 +07:00
|
|
|
}
|
|
|
|
priv->num_registered_pages += pages;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
if (!dma_mapping_error(dev, dma))
|
|
|
|
dma_unmap_page(dev, dma, PAGE_SIZE, dir);
|
|
|
|
if (page)
|
|
|
|
put_page(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_free_queue_page_list(struct gve_priv *priv,
|
|
|
|
int id)
|
|
|
|
{
|
|
|
|
struct gve_queue_page_list *qpl = &priv->qpls[id];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!qpl->pages)
|
|
|
|
return;
|
|
|
|
if (!qpl->page_buses)
|
|
|
|
goto free_pages;
|
|
|
|
|
|
|
|
for (i = 0; i < qpl->num_entries; i++)
|
|
|
|
gve_free_page(&priv->pdev->dev, qpl->pages[i],
|
|
|
|
qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
|
|
|
|
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(qpl->page_buses);
|
2019-07-02 05:57:53 +07:00
|
|
|
free_pages:
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(qpl->pages);
|
2019-07-02 05:57:53 +07:00
|
|
|
priv->num_registered_pages -= qpl->num_entries;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_alloc_qpls(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
|
|
|
|
int i, j;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
|
|
|
|
if (!priv->qpls)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < gve_num_tx_qpls(priv); i++) {
|
|
|
|
err = gve_alloc_queue_page_list(priv, i,
|
|
|
|
priv->tx_pages_per_qpl);
|
|
|
|
if (err)
|
|
|
|
goto free_qpls;
|
|
|
|
}
|
|
|
|
for (; i < num_qpls; i++) {
|
|
|
|
err = gve_alloc_queue_page_list(priv, i,
|
|
|
|
priv->rx_pages_per_qpl);
|
|
|
|
if (err)
|
|
|
|
goto free_qpls;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
|
|
|
|
sizeof(unsigned long) * BITS_PER_BYTE;
|
|
|
|
priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
|
|
|
|
sizeof(unsigned long), GFP_KERNEL);
|
2019-07-05 08:16:42 +07:00
|
|
|
if (!priv->qpl_cfg.qpl_id_map) {
|
|
|
|
err = -ENOMEM;
|
2019-07-02 05:57:53 +07:00
|
|
|
goto free_qpls;
|
2019-07-05 08:16:42 +07:00
|
|
|
}
|
2019-07-02 05:57:53 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_qpls:
|
|
|
|
for (j = 0; j <= i; j++)
|
|
|
|
gve_free_queue_page_list(priv, j);
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(priv->qpls);
|
2019-07-02 05:57:53 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_free_qpls(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
|
|
|
|
int i;
|
|
|
|
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(priv->qpl_cfg.qpl_id_map);
|
2019-07-02 05:57:53 +07:00
|
|
|
|
|
|
|
for (i = 0; i < num_qpls; i++)
|
|
|
|
gve_free_queue_page_list(priv, i);
|
|
|
|
|
2019-07-17 09:05:11 +07:00
|
|
|
kvfree(priv->qpls);
|
2019-07-02 05:57:53 +07:00
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:54 +07:00
|
|
|
/* Use this to schedule a reset when the device is capable of continuing
|
|
|
|
* to handle other requests in its current state. If it is not, do a reset
|
|
|
|
* in thread instead.
|
|
|
|
*/
|
|
|
|
void gve_schedule_reset(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
gve_set_do_reset(priv);
|
|
|
|
queue_work(priv->gve_wq, &priv->service_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
|
|
|
|
static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
|
2019-07-02 05:57:53 +07:00
|
|
|
static void gve_turndown(struct gve_priv *priv);
|
|
|
|
static void gve_turnup(struct gve_priv *priv);
|
|
|
|
|
|
|
|
static int gve_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct gve_priv *priv = netdev_priv(dev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = gve_alloc_qpls(priv);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
err = gve_alloc_rings(priv);
|
|
|
|
if (err)
|
|
|
|
goto free_qpls;
|
|
|
|
|
|
|
|
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
|
|
|
|
if (err)
|
|
|
|
goto free_rings;
|
|
|
|
err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
|
|
|
|
if (err)
|
|
|
|
goto free_rings;
|
|
|
|
|
|
|
|
err = gve_register_qpls(priv);
|
|
|
|
if (err)
|
2019-07-02 05:57:54 +07:00
|
|
|
goto reset;
|
2019-07-02 05:57:53 +07:00
|
|
|
err = gve_create_rings(priv);
|
|
|
|
if (err)
|
2019-07-02 05:57:54 +07:00
|
|
|
goto reset;
|
2019-07-02 05:57:53 +07:00
|
|
|
gve_set_device_rings_ok(priv);
|
|
|
|
|
|
|
|
gve_turnup(priv);
|
|
|
|
netif_carrier_on(dev);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_rings:
|
|
|
|
gve_free_rings(priv);
|
|
|
|
free_qpls:
|
|
|
|
gve_free_qpls(priv);
|
|
|
|
return err;
|
2019-07-02 05:57:54 +07:00
|
|
|
|
|
|
|
reset:
|
|
|
|
/* This must have been called from a reset due to the rtnl lock
|
|
|
|
* so just return at this point.
|
|
|
|
*/
|
|
|
|
if (gve_get_reset_in_progress(priv))
|
|
|
|
return err;
|
|
|
|
/* Otherwise reset before returning */
|
|
|
|
gve_reset_and_teardown(priv, true);
|
|
|
|
/* if this fails there is nothing we can do so just ignore the return */
|
|
|
|
gve_reset_recovery(priv, false);
|
|
|
|
/* return the original error */
|
|
|
|
return err;
|
2019-07-02 05:57:53 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct gve_priv *priv = netdev_priv(dev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
if (gve_get_device_rings_ok(priv)) {
|
|
|
|
gve_turndown(priv);
|
|
|
|
err = gve_destroy_rings(priv);
|
|
|
|
if (err)
|
2019-07-02 05:57:54 +07:00
|
|
|
goto err;
|
2019-07-02 05:57:53 +07:00
|
|
|
err = gve_unregister_qpls(priv);
|
|
|
|
if (err)
|
2019-07-02 05:57:54 +07:00
|
|
|
goto err;
|
2019-07-02 05:57:53 +07:00
|
|
|
gve_clear_device_rings_ok(priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
gve_free_rings(priv);
|
|
|
|
gve_free_qpls(priv);
|
|
|
|
return 0;
|
2019-07-02 05:57:54 +07:00
|
|
|
|
|
|
|
err:
|
|
|
|
/* This must have been called from a reset due to the rtnl lock
|
|
|
|
* so just return at this point.
|
|
|
|
*/
|
|
|
|
if (gve_get_reset_in_progress(priv))
|
|
|
|
return err;
|
|
|
|
/* Otherwise reset before returning */
|
|
|
|
gve_reset_and_teardown(priv, true);
|
|
|
|
return gve_reset_recovery(priv, false);
|
2019-07-02 05:57:53 +07:00
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:55 +07:00
|
|
|
int gve_adjust_queues(struct gve_priv *priv,
|
|
|
|
struct gve_queue_config new_rx_config,
|
|
|
|
struct gve_queue_config new_tx_config)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (netif_carrier_ok(priv->dev)) {
|
|
|
|
/* To make this process as simple as possible we teardown the
|
|
|
|
* device, set the new configuration, and then bring the device
|
|
|
|
* up again.
|
|
|
|
*/
|
|
|
|
err = gve_close(priv->dev);
|
|
|
|
/* we have already tried to reset in close,
|
|
|
|
* just fail at this point
|
|
|
|
*/
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
priv->tx_cfg = new_tx_config;
|
|
|
|
priv->rx_cfg = new_rx_config;
|
|
|
|
|
|
|
|
err = gve_open(priv->dev);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* Set the config for the next up. */
|
|
|
|
priv->tx_cfg = new_tx_config;
|
|
|
|
priv->rx_cfg = new_rx_config;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
netif_err(priv, drv, priv->dev,
|
|
|
|
"Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
|
|
|
|
gve_turndown(priv);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:53 +07:00
|
|
|
static void gve_turndown(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
if (netif_carrier_ok(priv->dev))
|
|
|
|
netif_carrier_off(priv->dev);
|
|
|
|
|
|
|
|
if (!gve_get_napi_enabled(priv))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Disable napi to prevent more work from coming in */
|
|
|
|
for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
|
|
|
|
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
|
|
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
|
|
|
|
|
|
napi_disable(&block->napi);
|
|
|
|
}
|
|
|
|
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
|
|
|
|
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
|
|
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
|
|
|
|
|
|
napi_disable(&block->napi);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Stop tx queues */
|
|
|
|
netif_tx_disable(priv->dev);
|
|
|
|
|
|
|
|
gve_clear_napi_enabled(priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_turnup(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
/* Start the tx queues */
|
|
|
|
netif_tx_start_all_queues(priv->dev);
|
|
|
|
|
|
|
|
/* Enable napi and unmask interrupts for all queues */
|
|
|
|
for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
|
|
|
|
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
|
|
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
|
|
|
|
|
|
napi_enable(&block->napi);
|
|
|
|
iowrite32be(0, gve_irq_doorbell(priv, block));
|
|
|
|
}
|
|
|
|
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
|
|
|
|
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
|
|
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
|
|
|
|
|
|
napi_enable(&block->napi);
|
|
|
|
iowrite32be(0, gve_irq_doorbell(priv, block));
|
|
|
|
}
|
|
|
|
|
|
|
|
gve_set_napi_enabled(priv);
|
|
|
|
}
|
|
|
|
|
netdev: pass the stuck queue to the timeout handler
This allows incrementing the correct timeout statistic without any mess.
Down the road, devices can learn to reset just the specific queue.
The patch was generated with the following script:
use strict;
use warnings;
our $^I = '.bak';
my @work = (
["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"],
["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"],
["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"],
["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"],
["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"],
["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"],
["drivers/net/appletalk/cops.c", "cops_timeout"],
["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"],
["drivers/net/arcnet/arcnet.c", "arcnet_timeout"],
["drivers/net/arcnet/com20020.c", "arcnet_timeout"],
["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"],
["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"],
["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"],
["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"],
["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"],
["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"],
["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"],
["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"],
["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"],
["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"],
["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"],
["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"],
["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"],
["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"],
["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"],
["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"],
["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"],
["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"],
["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"],
["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"],
["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"],
["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"],
["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"],
["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"],
["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"],
["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"],
["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"],
["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"],
["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"],
["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"],
["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"],
["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"],
["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"],
["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"],
["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"],
["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"],
["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"],
["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"],
["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"],
["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"],
["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"],
["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"],
["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"],
["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"],
["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"],
["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"],
["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"],
["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"],
["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"],
["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"],
["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"],
["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"],
["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"],
["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"],
["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"],
["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"],
["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"],
["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"],
["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"],
["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"],
["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"],
["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"],
["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"],
["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"],
["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"],
["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"],
["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"],
["drivers/net/ethernet/jme.c", "jme_tx_timeout"],
["drivers/net/ethernet/korina.c", "korina_tx_timeout"],
["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"],
["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"],
["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"],
["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"],
["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"],
["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"],
["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"],
["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"],
["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"],
["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"],
["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"],
["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"],
["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"],
["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"],
["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"],
["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"],
["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"],
["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"],
["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"],
["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"],
["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"],
["drivers/net/ethernet/realtek/atp.c", "tx_timeout"],
["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"],
["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"],
["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"],
["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"],
["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"],
["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"],
["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"],
["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"],
["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"],
["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"],
["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"],
["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"],
["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"],
["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"],
["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"],
["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"],
["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"],
["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"],
["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"],
["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"],
["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"],
["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"],
["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"],
["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"],
["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"],
["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"],
["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"],
["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"],
["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"],
["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"],
["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"],
["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"],
["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"],
["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"],
["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"],
["drivers/net/slip/slip.c", "sl_tx_timeout"],
["include/linux/usb/usbnet.h", "usbnet_tx_timeout"],
["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"],
["drivers/net/usb/catc.c", "catc_tx_timeout"],
["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"],
["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"],
["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"],
["drivers/net/usb/hso.c", "hso_net_tx_timeout"],
["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"],
["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"],
["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"],
["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"],
["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"],
["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"],
["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"],
["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"],
["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"],
["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"],
["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"],
["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"],
["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"],
["drivers/net/wan/cosa.c", "cosa_net_timeout"],
["drivers/net/wan/farsync.c", "fst_tx_timeout"],
["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"],
["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"],
["drivers/net/wan/x25_asy.c", "x25_asy_timeout"],
["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"],
["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"],
["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"],
["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"],
["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"],
["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"],
["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"],
["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"],
["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"],
["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"],
["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"],
["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"],
["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"],
["drivers/tty/synclink.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"],
["net/atm/lec.c", "lec_tx_timeout"],
["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"]
);
for my $p (@work) {
my @pair = @$p;
my $file = $pair[0];
my $func = $pair[1];
print STDERR $file , ": ", $func,"\n";
our @ARGV = ($file);
while (<ARGV>) {
if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) {
print STDERR "found $1+$2 in $file\n";
}
if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) {
print STDERR "$func found in $file\n";
}
print;
}
}
where the list of files and functions is simply from:
git grep ndo_tx_timeout, with manual addition of headers
in the rare cases where the function is from a header,
then manually changing the few places which actually
call ndo_tx_timeout.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Heiner Kallweit <hkallweit1@gmail.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Shannon Nelson <snelson@pensando.io>
Reviewed-by: Martin Habets <mhabets@solarflare.com>
changes from v9:
fixup a forward declaration
changes from v9:
more leftovers from v3 change
changes from v8:
fix up a missing direct call to timeout
rebased on net-next
changes from v7:
fixup leftovers from v3 change
changes from v6:
fix typo in rtl driver
changes from v5:
add missing files (allow any net device argument name)
changes from v4:
add a missing driver header
changes from v3:
change queue # to unsigned
Changes from v2:
added headers
Changes from v1:
Fix errors found by kbuild:
generalize the pattern a bit, to pick up
a couple of instances missed by the previous
version.
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 21:23:51 +07:00
|
|
|
static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
|
2019-07-02 05:57:53 +07:00
|
|
|
{
|
|
|
|
struct gve_priv *priv = netdev_priv(dev);
|
|
|
|
|
2019-07-02 05:57:54 +07:00
|
|
|
gve_schedule_reset(priv);
|
2019-07-02 05:57:53 +07:00
|
|
|
priv->tx_timeo_cnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct net_device_ops gve_netdev_ops = {
|
|
|
|
.ndo_start_xmit = gve_tx,
|
|
|
|
.ndo_open = gve_open,
|
|
|
|
.ndo_stop = gve_close,
|
|
|
|
.ndo_get_stats64 = gve_get_stats,
|
|
|
|
.ndo_tx_timeout = gve_tx_timeout,
|
|
|
|
};
|
|
|
|
|
2019-07-02 05:57:54 +07:00
|
|
|
static void gve_handle_status(struct gve_priv *priv, u32 status)
|
|
|
|
{
|
|
|
|
if (GVE_DEVICE_STATUS_RESET_MASK & status) {
|
|
|
|
dev_info(&priv->pdev->dev, "Device requested reset.\n");
|
|
|
|
gve_set_do_reset(priv);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_handle_reset(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
/* A service task will be scheduled at the end of probe to catch any
|
|
|
|
* resets that need to happen, and we don't want to reset until
|
|
|
|
* probe is done.
|
|
|
|
*/
|
|
|
|
if (gve_get_probe_in_progress(priv))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (gve_get_do_reset(priv)) {
|
|
|
|
rtnl_lock();
|
|
|
|
gve_reset(priv, false);
|
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle NIC status register changes and reset requests */
|
|
|
|
static void gve_service_task(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct gve_priv *priv = container_of(work, struct gve_priv,
|
|
|
|
service_task);
|
|
|
|
|
|
|
|
gve_handle_status(priv,
|
|
|
|
ioread32be(&priv->reg_bar0->device_status));
|
|
|
|
|
|
|
|
gve_handle_reset(priv);
|
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:52 +07:00
|
|
|
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
|
|
|
|
{
|
|
|
|
int num_ntfy;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Set up the adminq */
|
|
|
|
err = gve_adminq_alloc(&priv->pdev->dev, priv);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&priv->pdev->dev,
|
|
|
|
"Failed to alloc admin queue: err=%d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skip_describe_device)
|
|
|
|
goto setup_device;
|
|
|
|
|
|
|
|
/* Get the initial information we need from the device */
|
|
|
|
err = gve_adminq_describe_device(priv);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&priv->pdev->dev,
|
|
|
|
"Could not get device information: err=%d\n", err);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (priv->dev->max_mtu > PAGE_SIZE) {
|
|
|
|
priv->dev->max_mtu = PAGE_SIZE;
|
|
|
|
err = gve_adminq_set_mtu(priv, priv->dev->mtu);
|
|
|
|
if (err) {
|
|
|
|
netif_err(priv, drv, priv->dev, "Could not set mtu");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
priv->dev->mtu = priv->dev->max_mtu;
|
|
|
|
num_ntfy = pci_msix_vec_count(priv->pdev);
|
|
|
|
if (num_ntfy <= 0) {
|
|
|
|
dev_err(&priv->pdev->dev,
|
|
|
|
"could not count MSI-x vectors: err=%d\n", num_ntfy);
|
|
|
|
err = num_ntfy;
|
|
|
|
goto err;
|
|
|
|
} else if (num_ntfy < GVE_MIN_MSIX) {
|
|
|
|
dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
|
|
|
|
GVE_MIN_MSIX, num_ntfy);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:53 +07:00
|
|
|
priv->num_registered_pages = 0;
|
|
|
|
priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
|
2019-07-02 05:57:52 +07:00
|
|
|
/* gvnic has one Notification Block per MSI-x vector, except for the
|
|
|
|
* management vector
|
|
|
|
*/
|
|
|
|
priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
|
|
|
|
priv->mgmt_msix_idx = priv->num_ntfy_blks;
|
|
|
|
|
2019-07-02 05:57:53 +07:00
|
|
|
priv->tx_cfg.max_queues =
|
|
|
|
min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
|
|
|
|
priv->rx_cfg.max_queues =
|
|
|
|
min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
|
|
|
|
|
|
|
|
priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
|
|
|
|
priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
|
|
|
|
if (priv->default_num_queues > 0) {
|
|
|
|
priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
|
|
|
|
priv->tx_cfg.num_queues);
|
|
|
|
priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
|
|
|
|
priv->rx_cfg.num_queues);
|
|
|
|
}
|
|
|
|
|
|
|
|
netif_info(priv, drv, priv->dev, "TX queues %d, RX queues %d\n",
|
|
|
|
priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
|
|
|
|
netif_info(priv, drv, priv->dev, "Max TX queues %d, Max RX queues %d\n",
|
|
|
|
priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
|
|
|
|
|
2019-07-02 05:57:52 +07:00
|
|
|
setup_device:
|
|
|
|
err = gve_setup_device_resources(priv);
|
|
|
|
if (!err)
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
gve_adminq_free(&priv->pdev->dev, priv);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_teardown_priv_resources(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
gve_teardown_device_resources(priv);
|
|
|
|
gve_adminq_free(&priv->pdev->dev, priv);
|
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:54 +07:00
|
|
|
static void gve_trigger_reset(struct gve_priv *priv)
|
|
|
|
{
|
|
|
|
/* Reset the device by releasing the AQ */
|
|
|
|
gve_adminq_release(priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
|
|
|
|
{
|
|
|
|
gve_trigger_reset(priv);
|
|
|
|
/* With the reset having already happened, close cannot fail */
|
|
|
|
if (was_up)
|
|
|
|
gve_close(priv->dev);
|
|
|
|
gve_teardown_priv_resources(priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = gve_init_priv(priv, true);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
if (was_up) {
|
|
|
|
err = gve_open(priv->dev);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
|
|
|
|
gve_turndown(priv);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int gve_reset(struct gve_priv *priv, bool attempt_teardown)
|
|
|
|
{
|
|
|
|
bool was_up = netif_carrier_ok(priv->dev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
dev_info(&priv->pdev->dev, "Performing reset\n");
|
|
|
|
gve_clear_do_reset(priv);
|
|
|
|
gve_set_reset_in_progress(priv);
|
|
|
|
/* If we aren't attempting to teardown normally, just go turndown and
|
|
|
|
* reset right away.
|
|
|
|
*/
|
|
|
|
if (!attempt_teardown) {
|
|
|
|
gve_turndown(priv);
|
|
|
|
gve_reset_and_teardown(priv, was_up);
|
|
|
|
} else {
|
|
|
|
/* Otherwise attempt to close normally */
|
|
|
|
if (was_up) {
|
|
|
|
err = gve_close(priv->dev);
|
|
|
|
/* If that fails reset as we did above */
|
|
|
|
if (err)
|
|
|
|
gve_reset_and_teardown(priv, was_up);
|
|
|
|
}
|
|
|
|
/* Clean up any remaining resources */
|
|
|
|
gve_teardown_priv_resources(priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set it all back up */
|
|
|
|
err = gve_reset_recovery(priv, was_up);
|
|
|
|
gve_clear_reset_in_progress(priv);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-02 05:57:52 +07:00
|
|
|
static void gve_write_version(u8 __iomem *driver_version_register)
|
|
|
|
{
|
|
|
|
const char *c = gve_version_prefix;
|
|
|
|
|
|
|
|
while (*c) {
|
|
|
|
writeb(*c, driver_version_register);
|
|
|
|
c++;
|
|
|
|
}
|
|
|
|
|
|
|
|
c = gve_version_str;
|
|
|
|
while (*c) {
|
|
|
|
writeb(*c, driver_version_register);
|
|
|
|
c++;
|
|
|
|
}
|
|
|
|
writeb('\n', driver_version_register);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
{
|
|
|
|
int max_tx_queues, max_rx_queues;
|
|
|
|
struct net_device *dev;
|
|
|
|
__be32 __iomem *db_bar;
|
|
|
|
struct gve_registers __iomem *reg_bar;
|
|
|
|
struct gve_priv *priv;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = pci_enable_device(pdev);
|
|
|
|
if (err)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
err = pci_request_regions(pdev, "gvnic-cfg");
|
|
|
|
if (err)
|
|
|
|
goto abort_with_enabled;
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
|
|
|
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
|
|
|
|
goto abort_with_pci_region;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"Failed to set consistent dma mask: err=%d\n", err);
|
|
|
|
goto abort_with_pci_region;
|
|
|
|
}
|
|
|
|
|
|
|
|
reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
|
|
|
|
if (!reg_bar) {
|
2019-07-02 05:57:53 +07:00
|
|
|
dev_err(&pdev->dev, "Failed to map pci bar!\n");
|
2019-07-02 05:57:52 +07:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto abort_with_pci_region;
|
|
|
|
}
|
|
|
|
|
|
|
|
db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
|
|
|
|
if (!db_bar) {
|
|
|
|
dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto abort_with_reg_bar;
|
|
|
|
}
|
|
|
|
|
|
|
|
gve_write_version(®_bar->driver_version);
|
|
|
|
/* Get max queues to alloc etherdev */
|
|
|
|
max_rx_queues = ioread32be(®_bar->max_tx_queues);
|
|
|
|
max_tx_queues = ioread32be(®_bar->max_rx_queues);
|
|
|
|
/* Alloc and setup the netdev and priv */
|
|
|
|
dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
|
|
|
|
if (!dev) {
|
|
|
|
dev_err(&pdev->dev, "could not allocate netdev\n");
|
|
|
|
goto abort_with_db_bar;
|
|
|
|
}
|
|
|
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
|
|
|
pci_set_drvdata(pdev, dev);
|
2019-07-02 05:57:55 +07:00
|
|
|
dev->ethtool_ops = &gve_ethtool_ops;
|
2019-07-02 05:57:53 +07:00
|
|
|
dev->netdev_ops = &gve_netdev_ops;
|
2019-07-02 05:57:52 +07:00
|
|
|
/* advertise features */
|
|
|
|
dev->hw_features = NETIF_F_HIGHDMA;
|
|
|
|
dev->hw_features |= NETIF_F_SG;
|
|
|
|
dev->hw_features |= NETIF_F_HW_CSUM;
|
|
|
|
dev->hw_features |= NETIF_F_TSO;
|
|
|
|
dev->hw_features |= NETIF_F_TSO6;
|
|
|
|
dev->hw_features |= NETIF_F_TSO_ECN;
|
|
|
|
dev->hw_features |= NETIF_F_RXCSUM;
|
|
|
|
dev->hw_features |= NETIF_F_RXHASH;
|
|
|
|
dev->features = dev->hw_features;
|
2019-07-02 05:57:53 +07:00
|
|
|
dev->watchdog_timeo = 5 * HZ;
|
2019-07-02 05:57:52 +07:00
|
|
|
dev->min_mtu = ETH_MIN_MTU;
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
|
|
|
priv = netdev_priv(dev);
|
|
|
|
priv->dev = dev;
|
|
|
|
priv->pdev = pdev;
|
|
|
|
priv->msg_enable = DEFAULT_MSG_LEVEL;
|
|
|
|
priv->reg_bar0 = reg_bar;
|
|
|
|
priv->db_bar2 = db_bar;
|
2019-07-02 05:57:54 +07:00
|
|
|
priv->service_task_flags = 0x0;
|
2019-07-02 05:57:52 +07:00
|
|
|
priv->state_flags = 0x0;
|
2019-07-02 05:57:54 +07:00
|
|
|
|
|
|
|
gve_set_probe_in_progress(priv);
|
|
|
|
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
|
|
|
|
if (!priv->gve_wq) {
|
|
|
|
dev_err(&pdev->dev, "Could not allocate workqueue");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto abort_with_netdev;
|
|
|
|
}
|
|
|
|
INIT_WORK(&priv->service_task, gve_service_task);
|
2019-07-02 05:57:53 +07:00
|
|
|
priv->tx_cfg.max_queues = max_tx_queues;
|
|
|
|
priv->rx_cfg.max_queues = max_rx_queues;
|
2019-07-02 05:57:52 +07:00
|
|
|
|
|
|
|
err = gve_init_priv(priv, false);
|
|
|
|
if (err)
|
2019-07-02 05:57:54 +07:00
|
|
|
goto abort_with_wq;
|
2019-07-02 05:57:52 +07:00
|
|
|
|
|
|
|
err = register_netdev(dev);
|
|
|
|
if (err)
|
2019-07-02 05:57:54 +07:00
|
|
|
goto abort_with_wq;
|
2019-07-02 05:57:52 +07:00
|
|
|
|
|
|
|
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
|
2019-07-02 05:57:54 +07:00
|
|
|
gve_clear_probe_in_progress(priv);
|
|
|
|
queue_work(priv->gve_wq, &priv->service_task);
|
2019-07-02 05:57:52 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-07-02 05:57:54 +07:00
|
|
|
abort_with_wq:
|
|
|
|
destroy_workqueue(priv->gve_wq);
|
|
|
|
|
2019-07-02 05:57:52 +07:00
|
|
|
abort_with_netdev:
|
|
|
|
free_netdev(dev);
|
|
|
|
|
|
|
|
abort_with_db_bar:
|
|
|
|
pci_iounmap(pdev, db_bar);
|
|
|
|
|
|
|
|
abort_with_reg_bar:
|
|
|
|
pci_iounmap(pdev, reg_bar);
|
|
|
|
|
|
|
|
abort_with_pci_region:
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
|
|
|
|
abort_with_enabled:
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gve_remove(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
|
struct gve_priv *priv = netdev_priv(netdev);
|
|
|
|
__be32 __iomem *db_bar = priv->db_bar2;
|
|
|
|
void __iomem *reg_bar = priv->reg_bar0;
|
|
|
|
|
|
|
|
unregister_netdev(netdev);
|
|
|
|
gve_teardown_priv_resources(priv);
|
2019-07-02 05:57:54 +07:00
|
|
|
destroy_workqueue(priv->gve_wq);
|
2019-07-02 05:57:52 +07:00
|
|
|
free_netdev(netdev);
|
|
|
|
pci_iounmap(pdev, db_bar);
|
|
|
|
pci_iounmap(pdev, reg_bar);
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct pci_device_id gve_id_table[] = {
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct pci_driver gvnic_driver = {
|
|
|
|
.name = "gvnic",
|
|
|
|
.id_table = gve_id_table,
|
|
|
|
.probe = gve_probe,
|
|
|
|
.remove = gve_remove,
|
|
|
|
};
|
|
|
|
|
|
|
|
module_pci_driver(gvnic_driver);
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(pci, gve_id_table);
|
|
|
|
MODULE_AUTHOR("Google, Inc.");
|
|
|
|
MODULE_DESCRIPTION("gVNIC Driver");
|
|
|
|
MODULE_LICENSE("Dual MIT/GPL");
|
|
|
|
MODULE_VERSION(GVE_VERSION);
|