2006-11-09 20:51:17 +07:00
|
|
|
/*
|
2011-11-08 17:12:32 +07:00
|
|
|
* Cadence MACB/GEM Ethernet Controller driver
|
2006-11-09 20:51:17 +07:00
|
|
|
*
|
|
|
|
* Copyright (C) 2004-2006 Atmel Corporation
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
2011-03-09 03:27:08 +07:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
2006-11-09 20:51:17 +07:00
|
|
|
#include <linux/clk.h>
|
2018-08-07 16:25:14 +07:00
|
|
|
#include <linux/crc32.h>
|
2006-11-09 20:51:17 +07:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
2012-11-19 13:00:21 +07:00
|
|
|
#include <linux/circ_buf.h>
|
2006-11-09 20:51:17 +07:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/init.h>
|
2013-12-11 07:07:21 +07:00
|
|
|
#include <linux/io.h>
|
2012-11-11 20:56:27 +07:00
|
|
|
#include <linux/gpio.h>
|
2015-12-17 16:51:04 +07:00
|
|
|
#include <linux/gpio/consumer.h>
|
2011-06-06 17:43:46 +07:00
|
|
|
#include <linux/interrupt.h>
|
2006-11-09 20:51:17 +07:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2011-03-09 03:17:06 +07:00
|
|
|
#include <linux/platform_data/macb.h>
|
2006-11-09 20:51:17 +07:00
|
|
|
#include <linux/platform_device.h>
|
2007-07-13 00:07:24 +07:00
|
|
|
#include <linux/phy.h>
|
2011-12-21 04:13:07 +07:00
|
|
|
#include <linux/of.h>
|
2011-11-18 21:29:25 +07:00
|
|
|
#include <linux/of_device.h>
|
2015-12-17 16:51:04 +07:00
|
|
|
#include <linux/of_gpio.h>
|
2013-08-22 22:57:28 +07:00
|
|
|
#include <linux/of_mdio.h>
|
2011-11-18 21:29:25 +07:00
|
|
|
#include <linux/of_net.h>
|
2016-11-16 17:02:34 +07:00
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/udp.h>
|
|
|
|
#include <linux/tcp.h>
|
2006-11-09 20:51:17 +07:00
|
|
|
#include "macb.h"
|
|
|
|
|
2013-06-05 04:57:11 +07:00
|
|
|
#define MACB_RX_BUFFER_SIZE 128
|
|
|
|
#define RX_BUFFER_MULTIPLE 64 /* bytes */
|
2016-10-19 21:56:58 +07:00
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
|
2016-10-19 21:56:58 +07:00
|
|
|
#define MIN_RX_RING_SIZE 64
|
|
|
|
#define MAX_RX_RING_SIZE 8192
|
2017-01-27 22:08:20 +07:00
|
|
|
#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
|
2016-10-19 21:56:57 +07:00
|
|
|
* (bp)->rx_ring_size)
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
|
2016-10-19 21:56:58 +07:00
|
|
|
#define MIN_TX_RING_SIZE 64
|
|
|
|
#define MAX_TX_RING_SIZE 4096
|
2017-01-27 22:08:20 +07:00
|
|
|
#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
|
2016-10-19 21:56:57 +07:00
|
|
|
* (bp)->tx_ring_size)
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2012-11-19 13:00:21 +07:00
|
|
|
/* level of occupied TX descriptors under which we wake up TX process */
|
2016-10-19 21:56:57 +07:00
|
|
|
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
|
|
|
|
| MACB_BIT(ISR_ROVR))
|
2012-10-31 13:04:57 +07:00
|
|
|
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
|
|
|
|
| MACB_BIT(ISR_RLE) \
|
|
|
|
| MACB_BIT(TXERR))
|
2018-12-17 17:02:42 +07:00
|
|
|
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
|
|
|
|
| MACB_BIT(TXUBR))
|
2012-10-31 13:04:57 +07:00
|
|
|
|
2016-11-16 17:02:34 +07:00
|
|
|
/* Max length of transmit frame must be a multiple of 8 bytes */
|
|
|
|
#define MACB_TX_LEN_ALIGN 8
|
|
|
|
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
|
|
|
|
#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
|
2014-07-24 18:50:59 +07:00
|
|
|
|
ethernet: use core min/max MTU checking
et131x: min_mtu 64, max_mtu 9216
altera_tse: min_mtu 64, max_mtu 1500
amd8111e: min_mtu 60, max_mtu 9000
bnad: min_mtu 46, max_mtu 9000
macb: min_mtu 68, max_mtu 1500 or 10240 depending on hardware capability
xgmac: min_mtu 46, max_mtu 9000
cxgb2: min_mtu 68, max_mtu 9582 (pm3393) or 9600 (vsc7326)
enic: min_mtu 68, max_mtu 9000
gianfar: min_mtu 50, max_mu 9586
hns_enet: min_mtu 68, max_mtu 9578 (v1) or 9706 (v2)
ksz884x: min_mtu 60, max_mtu 1894
myri10ge: min_mtu 68, max_mtu 9000
natsemi: min_mtu 64, max_mtu 2024
nfp: min_mtu 68, max_mtu hardware-specific
forcedeth: min_mtu 64, max_mtu 1500 or 9100, depending on hardware
pch_gbe: min_mtu 46, max_mtu 10300
pasemi_mac: min_mtu 64, max_mtu 9000
qcaspi: min_mtu 46, max_mtu 1500
- remove qcaspi_netdev_change_mtu as it is now redundant
rocker: min_mtu 68, max_mtu 9000
sxgbe: min_mtu 68, max_mtu 9000
stmmac: min_mtu 46, max_mtu depends on hardware
tehuti: min_mtu 60, max_mtu 16384
- driver had no max mtu checking, but product docs say 16k jumbo packets
are supported by the hardware
netcp: min_mtu 68, max_mtu 9486
- remove netcp_ndo_change_mtu as it is now redundant
via-velocity: min_mtu 64, max_mtu 9000
octeon: min_mtu 46, max_mtu 65370
CC: netdev@vger.kernel.org
CC: Mark Einon <mark.einon@gmail.com>
CC: Vince Bridgers <vbridger@opensource.altera.com>
CC: Rasesh Mody <rasesh.mody@qlogic.com>
CC: Nicolas Ferre <nicolas.ferre@atmel.com>
CC: Santosh Raspatur <santosh@chelsio.com>
CC: Hariprasad S <hariprasad@chelsio.com>
CC: Christian Benvenuti <benve@cisco.com>
CC: Sujith Sankar <ssujith@cisco.com>
CC: Govindarajulu Varadarajan <_govind@gmx.com>
CC: Neel Patel <neepatel@cisco.com>
CC: Claudiu Manoil <claudiu.manoil@freescale.com>
CC: Yisen Zhuang <yisen.zhuang@huawei.com>
CC: Salil Mehta <salil.mehta@huawei.com>
CC: Hyong-Youb Kim <hykim@myri.com>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Olof Johansson <olof@lixom.net>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Byungho An <bh74.an@samsung.com>
CC: Girish K S <ks.giri@samsung.com>
CC: Vipul Pandya <vipul.pandya@samsung.com>
CC: Giuseppe Cavallaro <peppe.cavallaro@st.com>
CC: Alexandre Torgue <alexandre.torgue@st.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Wingman Kwok <w-kwok2@ti.com>
CC: Murali Karicheri <m-karicheri2@ti.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-18 02:54:17 +07:00
|
|
|
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
|
2017-07-03 20:31:05 +07:00
|
|
|
#define MACB_NETIF_LSO NETIF_F_TSO
|
2015-05-06 23:57:18 +07:00
|
|
|
|
2016-02-09 21:07:16 +07:00
|
|
|
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
|
|
|
|
#define MACB_WOL_ENABLED (0x1 << 1)
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Graceful stop timeouts in us. We should allow up to
|
2012-10-31 13:04:57 +07:00
|
|
|
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
|
|
|
|
*/
|
|
|
|
#define MACB_HALT_TIMEOUT 1230
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2017-01-27 22:08:20 +07:00
|
|
|
/* DMA buffer descriptor might be different size
|
2017-06-29 13:12:51 +07:00
|
|
|
* depends on hardware configuration:
|
|
|
|
*
|
|
|
|
* 1. dma address width 32 bits:
|
|
|
|
* word 1: 32 bit address of Data Buffer
|
|
|
|
* word 2: control
|
|
|
|
*
|
|
|
|
* 2. dma address width 64 bits:
|
|
|
|
* word 1: 32 bit address of Data Buffer
|
|
|
|
* word 2: control
|
|
|
|
* word 3: upper 32 bit address of Data Buffer
|
|
|
|
* word 4: unused
|
|
|
|
*
|
|
|
|
* 3. dma address width 32 bits with hardware timestamping:
|
|
|
|
* word 1: 32 bit address of Data Buffer
|
|
|
|
* word 2: control
|
|
|
|
* word 3: timestamp word 1
|
|
|
|
* word 4: timestamp word 2
|
|
|
|
*
|
|
|
|
* 4. dma address width 64 bits with hardware timestamping:
|
|
|
|
* word 1: 32 bit address of Data Buffer
|
|
|
|
* word 2: control
|
|
|
|
* word 3: upper 32 bit address of Data Buffer
|
|
|
|
* word 4: unused
|
|
|
|
* word 5: timestamp word 1
|
|
|
|
* word 6: timestamp word 2
|
2017-01-27 22:08:20 +07:00
|
|
|
*/
|
|
|
|
static unsigned int macb_dma_desc_get_size(struct macb *bp)
|
|
|
|
{
|
2017-06-29 13:12:51 +07:00
|
|
|
#ifdef MACB_EXT_DESC
|
|
|
|
unsigned int desc_size;
|
|
|
|
|
|
|
|
switch (bp->hw_dma_cap) {
|
|
|
|
case HW_DMA_CAP_64B:
|
|
|
|
desc_size = sizeof(struct macb_dma_desc)
|
|
|
|
+ sizeof(struct macb_dma_desc_64);
|
|
|
|
break;
|
|
|
|
case HW_DMA_CAP_PTP:
|
|
|
|
desc_size = sizeof(struct macb_dma_desc)
|
|
|
|
+ sizeof(struct macb_dma_desc_ptp);
|
|
|
|
break;
|
|
|
|
case HW_DMA_CAP_64B_PTP:
|
|
|
|
desc_size = sizeof(struct macb_dma_desc)
|
|
|
|
+ sizeof(struct macb_dma_desc_64)
|
|
|
|
+ sizeof(struct macb_dma_desc_ptp);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
desc_size = sizeof(struct macb_dma_desc);
|
|
|
|
}
|
|
|
|
return desc_size;
|
2017-01-27 22:08:20 +07:00
|
|
|
#endif
|
|
|
|
return sizeof(struct macb_dma_desc);
|
|
|
|
}
|
|
|
|
|
2017-06-29 13:12:51 +07:00
|
|
|
static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
|
2017-01-27 22:08:20 +07:00
|
|
|
{
|
2017-06-29 13:12:51 +07:00
|
|
|
#ifdef MACB_EXT_DESC
|
|
|
|
switch (bp->hw_dma_cap) {
|
|
|
|
case HW_DMA_CAP_64B:
|
|
|
|
case HW_DMA_CAP_PTP:
|
|
|
|
desc_idx <<= 1;
|
|
|
|
break;
|
|
|
|
case HW_DMA_CAP_64B_PTP:
|
|
|
|
desc_idx *= 3;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2017-01-27 22:08:20 +07:00
|
|
|
#endif
|
2017-06-29 13:12:51 +07:00
|
|
|
return desc_idx;
|
2017-01-27 22:08:20 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
|
|
|
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
|
|
|
|
{
|
2017-06-29 13:12:51 +07:00
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
|
|
|
return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
|
|
|
|
return NULL;
|
2017-01-27 22:08:20 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-10-31 13:04:55 +07:00
|
|
|
/* Ring buffer accessors */
|
2016-10-19 21:56:57 +07:00
|
|
|
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
|
2012-10-31 13:04:55 +07:00
|
|
|
{
|
2016-10-19 21:56:57 +07:00
|
|
|
return index & (bp->tx_ring_size - 1);
|
2012-10-31 13:04:55 +07:00
|
|
|
}
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
|
|
|
|
unsigned int index)
|
2012-10-31 13:04:55 +07:00
|
|
|
{
|
2017-01-27 22:08:20 +07:00
|
|
|
index = macb_tx_ring_wrap(queue->bp, index);
|
|
|
|
index = macb_adj_dma_desc_idx(queue->bp, index);
|
|
|
|
return &queue->tx_ring[index];
|
2012-10-31 13:04:55 +07:00
|
|
|
}
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
|
|
|
|
unsigned int index)
|
2012-10-31 13:04:55 +07:00
|
|
|
{
|
2016-10-19 21:56:57 +07:00
|
|
|
return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
|
2012-10-31 13:04:55 +07:00
|
|
|
}
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
|
2012-10-31 13:04:55 +07:00
|
|
|
{
|
|
|
|
dma_addr_t offset;
|
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
offset = macb_tx_ring_wrap(queue->bp, index) *
|
2017-01-27 22:08:20 +07:00
|
|
|
macb_dma_desc_get_size(queue->bp);
|
2012-10-31 13:04:55 +07:00
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
return queue->tx_ring_dma + offset;
|
2012-10-31 13:04:55 +07:00
|
|
|
}
|
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
|
2012-10-31 13:04:55 +07:00
|
|
|
{
|
2016-10-19 21:56:57 +07:00
|
|
|
return index & (bp->rx_ring_size - 1);
|
2012-10-31 13:04:55 +07:00
|
|
|
}
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
|
2012-10-31 13:04:55 +07:00
|
|
|
{
|
2017-12-01 01:19:15 +07:00
|
|
|
index = macb_rx_ring_wrap(queue->bp, index);
|
|
|
|
index = macb_adj_dma_desc_idx(queue->bp, index);
|
|
|
|
return &queue->rx_ring[index];
|
2012-10-31 13:04:55 +07:00
|
|
|
}
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
|
2012-10-31 13:04:55 +07:00
|
|
|
{
|
2017-12-01 01:19:15 +07:00
|
|
|
return queue->rx_buffers + queue->bp->rx_buffer_size *
|
|
|
|
macb_rx_ring_wrap(queue->bp, index);
|
2012-10-31 13:04:55 +07:00
|
|
|
}
|
|
|
|
|
2015-07-25 01:23:59 +07:00
|
|
|
/* I/O accessors */
|
|
|
|
static u32 hw_readl_native(struct macb *bp, int offset)
|
|
|
|
{
|
|
|
|
return __raw_readl(bp->regs + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hw_writel_native(struct macb *bp, int offset, u32 value)
|
|
|
|
{
|
|
|
|
__raw_writel(value, bp->regs + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 hw_readl(struct macb *bp, int offset)
|
|
|
|
{
|
|
|
|
return readl_relaxed(bp->regs + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hw_writel(struct macb *bp, int offset, u32 value)
|
|
|
|
{
|
|
|
|
writel_relaxed(value, bp->regs + offset);
|
|
|
|
}
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Find the CPU endianness by using the loopback bit of NCR register. When the
|
2016-03-30 09:11:15 +07:00
|
|
|
* CPU is in big endian we need to program swapped mode for management
|
2015-07-25 01:23:59 +07:00
|
|
|
* descriptor access.
|
|
|
|
*/
|
|
|
|
static bool hw_is_native_io(void __iomem *addr)
|
|
|
|
{
|
|
|
|
u32 value = MACB_BIT(LLB);
|
|
|
|
|
|
|
|
__raw_writel(value, addr + MACB_NCR);
|
|
|
|
value = __raw_readl(addr + MACB_NCR);
|
|
|
|
|
|
|
|
/* Write 0 back to disable everything */
|
|
|
|
__raw_writel(0, addr + MACB_NCR);
|
|
|
|
|
|
|
|
return value == MACB_BIT(LLB);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool hw_is_gem(void __iomem *addr, bool native_io)
|
|
|
|
{
|
|
|
|
u32 id;
|
|
|
|
|
|
|
|
if (native_io)
|
|
|
|
id = __raw_readl(addr + MACB_MID);
|
|
|
|
else
|
|
|
|
id = readl_relaxed(addr + MACB_MID);
|
|
|
|
|
|
|
|
return MACB_BFEXT(IDNUM, id) >= 0x2;
|
|
|
|
}
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
static void macb_set_hwaddr(struct macb *bp)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
|
|
|
u32 bottom;
|
|
|
|
u16 top;
|
|
|
|
|
|
|
|
bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
|
2011-11-08 17:12:32 +07:00
|
|
|
macb_or_gem_writel(bp, SA1B, bottom);
|
2006-11-09 20:51:17 +07:00
|
|
|
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
|
2011-11-08 17:12:32 +07:00
|
|
|
macb_or_gem_writel(bp, SA1T, top);
|
2012-11-11 20:56:28 +07:00
|
|
|
|
|
|
|
/* Clear unused address register sets */
|
|
|
|
macb_or_gem_writel(bp, SA2B, 0);
|
|
|
|
macb_or_gem_writel(bp, SA2T, 0);
|
|
|
|
macb_or_gem_writel(bp, SA3B, 0);
|
|
|
|
macb_or_gem_writel(bp, SA3T, 0);
|
|
|
|
macb_or_gem_writel(bp, SA4B, 0);
|
|
|
|
macb_or_gem_writel(bp, SA4T, 0);
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
static void macb_get_hwaddr(struct macb *bp)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2012-11-07 15:14:51 +07:00
|
|
|
struct macb_platform_data *pdata;
|
2006-11-09 20:51:17 +07:00
|
|
|
u32 bottom;
|
|
|
|
u16 top;
|
|
|
|
u8 addr[6];
|
2012-11-07 15:14:50 +07:00
|
|
|
int i;
|
|
|
|
|
2013-08-30 12:12:21 +07:00
|
|
|
pdata = dev_get_platdata(&bp->pdev->dev);
|
2012-11-07 15:14:51 +07:00
|
|
|
|
2016-03-30 09:11:13 +07:00
|
|
|
/* Check all 4 address register for valid address */
|
2012-11-07 15:14:50 +07:00
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
bottom = macb_or_gem_readl(bp, SA1B + i * 8);
|
|
|
|
top = macb_or_gem_readl(bp, SA1T + i * 8);
|
|
|
|
|
2012-11-07 15:14:51 +07:00
|
|
|
if (pdata && pdata->rev_eth_addr) {
|
|
|
|
addr[5] = bottom & 0xff;
|
|
|
|
addr[4] = (bottom >> 8) & 0xff;
|
|
|
|
addr[3] = (bottom >> 16) & 0xff;
|
|
|
|
addr[2] = (bottom >> 24) & 0xff;
|
|
|
|
addr[1] = top & 0xff;
|
|
|
|
addr[0] = (top & 0xff00) >> 8;
|
|
|
|
} else {
|
|
|
|
addr[0] = bottom & 0xff;
|
|
|
|
addr[1] = (bottom >> 8) & 0xff;
|
|
|
|
addr[2] = (bottom >> 16) & 0xff;
|
|
|
|
addr[3] = (bottom >> 24) & 0xff;
|
|
|
|
addr[4] = top & 0xff;
|
|
|
|
addr[5] = (top >> 8) & 0xff;
|
|
|
|
}
|
2012-11-07 15:14:50 +07:00
|
|
|
|
|
|
|
if (is_valid_ether_addr(addr)) {
|
|
|
|
memcpy(bp->dev->dev_addr, addr, sizeof(addr));
|
|
|
|
return;
|
|
|
|
}
|
2008-06-10 06:33:57 +07:00
|
|
|
}
|
2012-11-07 15:14:50 +07:00
|
|
|
|
2015-07-25 01:24:01 +07:00
|
|
|
dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
|
2012-11-07 15:14:50 +07:00
|
|
|
eth_hw_addr_random(bp->dev);
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2007-07-13 00:07:24 +07:00
|
|
|
struct macb *bp = bus->priv;
|
2006-11-09 20:51:17 +07:00
|
|
|
int value;
|
|
|
|
|
|
|
|
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
|
|
|
|
| MACB_BF(RW, MACB_MAN_READ)
|
2007-07-13 00:07:24 +07:00
|
|
|
| MACB_BF(PHYA, mii_id)
|
|
|
|
| MACB_BF(REGA, regnum)
|
2006-11-09 20:51:17 +07:00
|
|
|
| MACB_BF(CODE, MACB_MAN_CODE)));
|
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
/* wait for end of transfer */
|
|
|
|
while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
|
|
|
|
cpu_relax();
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
|
|
|
|
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
|
|
|
|
u16 value)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2007-07-13 00:07:24 +07:00
|
|
|
struct macb *bp = bus->priv;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
|
|
|
|
| MACB_BF(RW, MACB_MAN_WRITE)
|
2007-07-13 00:07:24 +07:00
|
|
|
| MACB_BF(PHYA, mii_id)
|
|
|
|
| MACB_BF(REGA, regnum)
|
2006-11-09 20:51:17 +07:00
|
|
|
| MACB_BF(CODE, MACB_MAN_CODE)
|
2007-07-13 00:07:24 +07:00
|
|
|
| MACB_BF(DATA, value)));
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
/* wait for end of transfer */
|
|
|
|
while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2013-12-11 07:07:23 +07:00
|
|
|
/**
|
|
|
|
* macb_set_tx_clk() - Set a clock to a new frequency
|
|
|
|
* @clk Pointer to the clock to change
|
|
|
|
* @rate New frequency in Hz
|
|
|
|
* @dev Pointer to the struct net_device
|
|
|
|
*/
|
|
|
|
static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
|
|
|
|
{
|
|
|
|
long ferr, rate, rate_rounded;
|
|
|
|
|
2015-03-07 13:23:31 +07:00
|
|
|
if (!clk)
|
|
|
|
return;
|
|
|
|
|
2013-12-11 07:07:23 +07:00
|
|
|
switch (speed) {
|
|
|
|
case SPEED_10:
|
|
|
|
rate = 2500000;
|
|
|
|
break;
|
|
|
|
case SPEED_100:
|
|
|
|
rate = 25000000;
|
|
|
|
break;
|
|
|
|
case SPEED_1000:
|
|
|
|
rate = 125000000;
|
|
|
|
break;
|
|
|
|
default:
|
2013-12-11 11:57:57 +07:00
|
|
|
return;
|
2013-12-11 07:07:23 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
rate_rounded = clk_round_rate(clk, rate);
|
|
|
|
if (rate_rounded < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* RGMII allows 50 ppm frequency error. Test and warn if this limit
|
|
|
|
* is not satisfied.
|
|
|
|
*/
|
|
|
|
ferr = abs(rate_rounded - rate);
|
|
|
|
ferr = DIV_ROUND_UP(ferr, rate / 100000);
|
|
|
|
if (ferr > 5)
|
|
|
|
netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
|
2016-03-30 09:11:13 +07:00
|
|
|
rate);
|
2013-12-11 07:07:23 +07:00
|
|
|
|
|
|
|
if (clk_set_rate(clk, rate_rounded))
|
|
|
|
netdev_err(dev, "adjusting tx_clk failed.\n");
|
|
|
|
}
|
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
static void macb_handle_link_change(struct net_device *dev)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2007-07-13 00:07:24 +07:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
2016-06-22 05:32:35 +07:00
|
|
|
struct phy_device *phydev = dev->phydev;
|
2007-07-13 00:07:24 +07:00
|
|
|
unsigned long flags;
|
|
|
|
int status_change = 0;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
|
|
|
|
|
|
|
if (phydev->link) {
|
|
|
|
if ((bp->speed != phydev->speed) ||
|
|
|
|
(bp->duplex != phydev->duplex)) {
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = macb_readl(bp, NCFGR);
|
|
|
|
reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
|
2012-10-31 13:04:50 +07:00
|
|
|
if (macb_is_gem(bp))
|
|
|
|
reg &= ~GEM_BIT(GBE);
|
2007-07-13 00:07:24 +07:00
|
|
|
|
|
|
|
if (phydev->duplex)
|
|
|
|
reg |= MACB_BIT(FD);
|
2008-02-21 20:50:54 +07:00
|
|
|
if (phydev->speed == SPEED_100)
|
2007-07-13 00:07:24 +07:00
|
|
|
reg |= MACB_BIT(SPD);
|
2014-07-24 18:50:58 +07:00
|
|
|
if (phydev->speed == SPEED_1000 &&
|
|
|
|
bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
|
2012-10-31 13:04:50 +07:00
|
|
|
reg |= GEM_BIT(GBE);
|
2007-07-13 00:07:24 +07:00
|
|
|
|
2012-10-31 13:04:50 +07:00
|
|
|
macb_or_gem_writel(bp, NCFGR, reg);
|
2007-07-13 00:07:24 +07:00
|
|
|
|
|
|
|
bp->speed = phydev->speed;
|
|
|
|
bp->duplex = phydev->duplex;
|
|
|
|
status_change = 1;
|
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
if (phydev->link != bp->link) {
|
2008-07-23 05:41:24 +07:00
|
|
|
if (!phydev->link) {
|
2007-07-13 00:07:24 +07:00
|
|
|
bp->speed = 0;
|
|
|
|
bp->duplex = -1;
|
|
|
|
}
|
|
|
|
bp->link = phydev->link;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
status_change = 1;
|
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
|
|
|
|
|
|
if (status_change) {
|
2012-07-04 06:14:13 +07:00
|
|
|
if (phydev->link) {
|
2015-03-13 06:07:54 +07:00
|
|
|
/* Update the TX clock rate if and only if the link is
|
|
|
|
* up and there has been a link change.
|
|
|
|
*/
|
|
|
|
macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
|
|
|
|
|
2012-07-04 06:14:13 +07:00
|
|
|
netif_carrier_on(dev);
|
2011-03-09 03:27:08 +07:00
|
|
|
netdev_info(dev, "link up (%d/%s)\n",
|
|
|
|
phydev->speed,
|
|
|
|
phydev->duplex == DUPLEX_FULL ?
|
|
|
|
"Full" : "Half");
|
2012-07-04 06:14:13 +07:00
|
|
|
} else {
|
|
|
|
netif_carrier_off(dev);
|
2011-03-09 03:27:08 +07:00
|
|
|
netdev_info(dev, "link down\n");
|
2012-07-04 06:14:13 +07:00
|
|
|
}
|
2007-07-13 00:07:24 +07:00
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
/* based on au1000_eth. c*/
|
|
|
|
static int macb_mii_probe(struct net_device *dev)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2007-07-13 00:07:24 +07:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
2012-11-11 20:56:27 +07:00
|
|
|
struct macb_platform_data *pdata;
|
2010-02-08 12:12:08 +07:00
|
|
|
struct phy_device *phydev;
|
2018-03-14 04:32:13 +07:00
|
|
|
struct device_node *np;
|
|
|
|
int phy_irq, ret, i;
|
|
|
|
|
|
|
|
pdata = dev_get_platdata(&bp->pdev->dev);
|
|
|
|
np = bp->pdev->dev.of_node;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
if (np) {
|
|
|
|
if (of_phy_is_fixed_link(np)) {
|
|
|
|
bp->phy_node = of_node_get(np);
|
|
|
|
} else {
|
2018-03-14 04:32:15 +07:00
|
|
|
bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
|
|
|
|
/* fallback to standard phy registration if no
|
|
|
|
* phy-handle was found nor any phy found during
|
|
|
|
* dt phy registration
|
2018-03-14 04:32:13 +07:00
|
|
|
*/
|
2018-03-14 04:32:15 +07:00
|
|
|
if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
|
2018-03-14 04:32:13 +07:00
|
|
|
for (i = 0; i < PHY_MAX_ADDR; i++) {
|
|
|
|
struct phy_device *phydev;
|
|
|
|
|
|
|
|
phydev = mdiobus_scan(bp->mii_bus, i);
|
|
|
|
if (IS_ERR(phydev) &&
|
|
|
|
PTR_ERR(phydev) != -ENODEV) {
|
|
|
|
ret = PTR_ERR(phydev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-07-13 00:07:24 +07:00
|
|
|
|
2017-06-23 21:54:10 +07:00
|
|
|
if (bp->phy_node) {
|
|
|
|
phydev = of_phy_connect(dev, bp->phy_node,
|
|
|
|
&macb_handle_link_change, 0,
|
|
|
|
bp->phy_interface);
|
|
|
|
if (!phydev)
|
|
|
|
return -ENODEV;
|
|
|
|
} else {
|
|
|
|
phydev = phy_find_first(bp->mii_bus);
|
|
|
|
if (!phydev) {
|
|
|
|
netdev_err(dev, "no PHY found\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
2007-07-13 00:07:24 +07:00
|
|
|
|
2017-06-23 21:54:10 +07:00
|
|
|
if (pdata) {
|
|
|
|
if (gpio_is_valid(pdata->phy_irq_pin)) {
|
|
|
|
ret = devm_gpio_request(&bp->pdev->dev,
|
|
|
|
pdata->phy_irq_pin, "phy int");
|
|
|
|
if (!ret) {
|
|
|
|
phy_irq = gpio_to_irq(pdata->phy_irq_pin);
|
|
|
|
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
phydev->irq = PHY_POLL;
|
2017-04-26 17:06:28 +07:00
|
|
|
}
|
2012-11-11 20:56:27 +07:00
|
|
|
}
|
2007-07-13 00:07:24 +07:00
|
|
|
|
2017-06-23 21:54:10 +07:00
|
|
|
/* attach the mac to the phy */
|
|
|
|
ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
|
|
|
|
bp->phy_interface);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "Could not attach to PHY\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2007-07-13 00:07:24 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* mask with MAC supported features */
|
2014-07-24 18:50:58 +07:00
|
|
|
if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
|
2018-09-12 06:53:11 +07:00
|
|
|
phy_set_max_speed(phydev, SPEED_1000);
|
2012-10-31 13:04:50 +07:00
|
|
|
else
|
2018-09-12 06:53:11 +07:00
|
|
|
phy_set_max_speed(phydev, SPEED_100);
|
2007-07-13 00:07:24 +07:00
|
|
|
|
2015-05-22 21:22:10 +07:00
|
|
|
if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
|
2018-09-12 06:53:14 +07:00
|
|
|
phy_remove_link_mode(phydev,
|
|
|
|
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
|
2007-07-13 00:07:24 +07:00
|
|
|
|
|
|
|
bp->link = 0;
|
|
|
|
bp->speed = 0;
|
|
|
|
bp->duplex = -1;
|
|
|
|
|
|
|
|
return 0;
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
static int macb_mii_init(struct macb *bp)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2011-03-09 03:17:06 +07:00
|
|
|
struct macb_platform_data *pdata;
|
2013-08-22 22:57:28 +07:00
|
|
|
struct device_node *np;
|
2018-08-21 22:35:48 +07:00
|
|
|
int err = -ENXIO;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2009-07-23 13:31:31 +07:00
|
|
|
/* Enable management port */
|
2007-07-13 00:07:24 +07:00
|
|
|
macb_writel(bp, NCR, MACB_BIT(MPE));
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2008-10-09 06:29:57 +07:00
|
|
|
bp->mii_bus = mdiobus_alloc();
|
2016-03-30 09:11:13 +07:00
|
|
|
if (!bp->mii_bus) {
|
2008-10-09 06:29:57 +07:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
bp->mii_bus->name = "MACB_mii_bus";
|
|
|
|
bp->mii_bus->read = &macb_mdio_read;
|
|
|
|
bp->mii_bus->write = &macb_mdio_write;
|
2012-01-10 06:59:11 +07:00
|
|
|
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
2016-03-30 09:11:13 +07:00
|
|
|
bp->pdev->name, bp->pdev->id);
|
2008-10-09 06:29:57 +07:00
|
|
|
bp->mii_bus->priv = bp;
|
2016-05-03 08:38:45 +07:00
|
|
|
bp->mii_bus->parent = &bp->pdev->dev;
|
2013-08-30 12:12:21 +07:00
|
|
|
pdata = dev_get_platdata(&bp->pdev->dev);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2011-02-28 11:05:25 +07:00
|
|
|
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2013-08-22 22:57:28 +07:00
|
|
|
np = bp->pdev->dev.of_node;
|
2018-08-21 22:35:48 +07:00
|
|
|
if (np && of_phy_is_fixed_link(np)) {
|
|
|
|
if (of_phy_register_fixed_link(np) < 0) {
|
|
|
|
dev_err(&bp->pdev->dev,
|
|
|
|
"broken fixed-link specification %pOF\n", np);
|
|
|
|
goto err_out_free_mdiobus;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mdiobus_register(bp->mii_bus);
|
|
|
|
} else {
|
|
|
|
if (pdata)
|
|
|
|
bp->mii_bus->phy_mask = pdata->phy_mask;
|
|
|
|
|
|
|
|
err = of_mdiobus_register(bp->mii_bus, np);
|
|
|
|
}
|
2017-06-23 21:54:10 +07:00
|
|
|
|
2013-08-22 22:57:28 +07:00
|
|
|
if (err)
|
2018-08-21 22:35:48 +07:00
|
|
|
goto err_out_free_fixed_link;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2013-08-27 19:36:14 +07:00
|
|
|
err = macb_mii_probe(bp->dev);
|
|
|
|
if (err)
|
2007-07-13 00:07:24 +07:00
|
|
|
goto err_out_unregister_bus;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
return 0;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
err_out_unregister_bus:
|
2008-10-09 06:29:57 +07:00
|
|
|
mdiobus_unregister(bp->mii_bus);
|
2018-08-21 22:35:48 +07:00
|
|
|
err_out_free_fixed_link:
|
2017-11-08 15:56:34 +07:00
|
|
|
if (np && of_phy_is_fixed_link(np))
|
|
|
|
of_phy_deregister_fixed_link(np);
|
2018-03-14 04:32:13 +07:00
|
|
|
err_out_free_mdiobus:
|
|
|
|
of_node_put(bp->phy_node);
|
2008-10-09 06:29:57 +07:00
|
|
|
mdiobus_free(bp->mii_bus);
|
2007-07-13 00:07:24 +07:00
|
|
|
err_out:
|
|
|
|
return err;
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_update_stats(struct macb *bp)
|
|
|
|
{
|
2011-03-09 23:26:35 +07:00
|
|
|
u32 *p = &bp->hw_stats.macb.rx_pause_frames;
|
|
|
|
u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
|
2015-07-25 01:23:59 +07:00
|
|
|
int offset = MACB_PFR;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
|
|
|
|
|
2016-03-30 09:11:11 +07:00
|
|
|
for (; p < end; p++, offset += 4)
|
2015-07-28 04:24:48 +07:00
|
|
|
*p += bp->macb_reg_readl(bp, offset);
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
static int macb_halt_tx(struct macb *bp)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2012-10-31 13:04:57 +07:00
|
|
|
unsigned long halt_time, timeout;
|
|
|
|
u32 status;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
|
|
|
|
do {
|
|
|
|
halt_time = jiffies;
|
|
|
|
status = macb_readl(bp, TSR);
|
|
|
|
if (!(status & MACB_BIT(TGO)))
|
|
|
|
return 0;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2018-09-01 19:11:05 +07:00
|
|
|
udelay(250);
|
2012-10-31 13:04:57 +07:00
|
|
|
} while (time_before(halt_time, timeout));
|
2007-12-20 00:23:44 +07:00
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
2009-01-19 12:57:35 +07:00
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
|
|
|
|
{
|
|
|
|
if (tx_skb->mapping) {
|
|
|
|
if (tx_skb->mapped_as_page)
|
|
|
|
dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
|
|
|
|
tx_skb->size, DMA_TO_DEVICE);
|
|
|
|
else
|
|
|
|
dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
|
|
|
|
tx_skb->size, DMA_TO_DEVICE);
|
|
|
|
tx_skb->mapping = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx_skb->skb) {
|
|
|
|
dev_kfree_skb_any(tx_skb->skb);
|
|
|
|
tx_skb->skb = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-27 22:08:20 +07:00
|
|
|
static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
|
2016-08-09 14:45:53 +07:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
2017-01-27 22:08:20 +07:00
|
|
|
struct macb_dma_desc_64 *desc_64;
|
|
|
|
|
2017-06-29 13:12:51 +07:00
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
|
2017-01-27 22:08:20 +07:00
|
|
|
desc_64 = macb_64b_desc(bp, desc);
|
|
|
|
desc_64->addrh = upper_32_bits(addr);
|
2018-12-17 20:05:39 +07:00
|
|
|
/* The low bits of RX address contain the RX_USED bit, clearing
|
|
|
|
* of which allows packet RX. Make sure the high bits are also
|
|
|
|
* visible to HW at that point.
|
|
|
|
*/
|
|
|
|
dma_wmb();
|
2017-01-27 22:08:20 +07:00
|
|
|
}
|
2016-08-09 14:45:53 +07:00
|
|
|
#endif
|
2017-01-27 22:08:20 +07:00
|
|
|
desc->addr = lower_32_bits(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
|
|
|
|
{
|
|
|
|
dma_addr_t addr = 0;
|
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
|
|
|
struct macb_dma_desc_64 *desc_64;
|
|
|
|
|
2017-06-29 13:12:51 +07:00
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
|
2017-01-27 22:08:20 +07:00
|
|
|
desc_64 = macb_64b_desc(bp, desc);
|
|
|
|
addr = ((u64)(desc_64->addrh) << 32);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
|
|
|
|
return addr;
|
2016-08-09 14:45:53 +07:00
|
|
|
}
|
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
static void macb_tx_error_task(struct work_struct *work)
|
|
|
|
{
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb_queue *queue = container_of(work, struct macb_queue,
|
|
|
|
tx_error_task);
|
|
|
|
struct macb *bp = queue->bp;
|
2012-10-31 13:04:57 +07:00
|
|
|
struct macb_tx_skb *tx_skb;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb_dma_desc *desc;
|
2012-10-31 13:04:57 +07:00
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned int tail;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
|
|
|
|
(unsigned int)(queue - bp->queues),
|
|
|
|
queue->tx_tail, queue->tx_head);
|
2007-12-20 00:23:44 +07:00
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
/* Prevent the queue IRQ handlers from running: each of them may call
|
|
|
|
* macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
|
|
|
|
* As explained below, we have to halt the transmission before updating
|
|
|
|
* TBQP registers so we call netif_tx_stop_all_queues() to notify the
|
|
|
|
* network engine about the macb/gem being halted.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
2007-12-20 00:23:44 +07:00
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
/* Make sure nobody is trying to queue up new packets */
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
netif_tx_stop_all_queues(bp->dev);
|
2011-08-04 05:11:47 +07:00
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Stop transmission now
|
2012-10-31 13:04:57 +07:00
|
|
|
* (in case we have just queued new packets)
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
* macb/gem must be halted to write TBQP register
|
2012-10-31 13:04:57 +07:00
|
|
|
*/
|
|
|
|
if (macb_halt_tx(bp))
|
|
|
|
/* Just complain for now, reinitializing TX path can be good */
|
|
|
|
netdev_err(bp->dev, "BUG: halt tx timed out\n");
|
2007-12-20 00:23:44 +07:00
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Treat frames in TX queue including the ones that caused the error.
|
2012-10-31 13:04:57 +07:00
|
|
|
* Free transmit buffers in upper layer.
|
|
|
|
*/
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
|
|
|
|
u32 ctrl;
|
2012-10-31 13:04:55 +07:00
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
desc = macb_tx_desc(queue, tail);
|
2012-10-31 13:04:57 +07:00
|
|
|
ctrl = desc->ctrl;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
tx_skb = macb_tx_skb(queue, tail);
|
2012-10-31 13:04:57 +07:00
|
|
|
skb = tx_skb->skb;
|
2007-12-20 00:23:44 +07:00
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
if (ctrl & MACB_BIT(TX_USED)) {
|
2014-07-24 18:50:59 +07:00
|
|
|
/* skb is set for the last buffer of the frame */
|
|
|
|
while (!skb) {
|
|
|
|
macb_tx_unmap(bp, tx_skb);
|
|
|
|
tail++;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
tx_skb = macb_tx_skb(queue, tail);
|
2014-07-24 18:50:59 +07:00
|
|
|
skb = tx_skb->skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ctrl still refers to the first buffer descriptor
|
|
|
|
* since it's the only one written back by the hardware
|
|
|
|
*/
|
|
|
|
if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
|
|
|
|
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
|
2016-10-19 21:56:57 +07:00
|
|
|
macb_tx_ring_wrap(bp, tail),
|
|
|
|
skb->data);
|
2017-04-07 15:17:30 +07:00
|
|
|
bp->dev->stats.tx_packets++;
|
2017-12-01 01:19:56 +07:00
|
|
|
queue->stats.tx_packets++;
|
2017-04-07 15:17:30 +07:00
|
|
|
bp->dev->stats.tx_bytes += skb->len;
|
2017-12-01 01:19:56 +07:00
|
|
|
queue->stats.tx_bytes += skb->len;
|
2014-07-24 18:50:59 +07:00
|
|
|
}
|
2012-10-31 13:04:57 +07:00
|
|
|
} else {
|
2016-03-30 09:11:12 +07:00
|
|
|
/* "Buffers exhausted mid-frame" errors may only happen
|
|
|
|
* if the driver is buggy, so complain loudly about
|
|
|
|
* those. Statistics are updated by hardware.
|
2012-10-31 13:04:57 +07:00
|
|
|
*/
|
|
|
|
if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"BUG: TX buffers exhausted mid-frame\n");
|
2009-01-19 12:57:35 +07:00
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
desc->ctrl = ctrl | MACB_BIT(TX_USED);
|
|
|
|
}
|
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
macb_tx_unmap(bp, tx_skb);
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
/* Set end of TX queue */
|
|
|
|
desc = macb_tx_desc(queue, 0);
|
2017-01-27 22:08:20 +07:00
|
|
|
macb_set_addr(bp, desc, 0);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
desc->ctrl = MACB_BIT(TX_USED);
|
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
/* Reinitialize the TX desc queue */
|
2017-01-27 22:08:20 +07:00
|
|
|
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
|
2016-08-09 14:45:53 +07:00
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
2017-06-29 13:12:51 +07:00
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
2017-01-27 22:08:20 +07:00
|
|
|
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
|
2016-08-09 14:45:53 +07:00
|
|
|
#endif
|
2012-10-31 13:04:57 +07:00
|
|
|
/* Make TX ring reflect state of hardware */
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue->tx_head = 0;
|
|
|
|
queue->tx_tail = 0;
|
2012-10-31 13:04:57 +07:00
|
|
|
|
|
|
|
/* Housework before enabling TX IRQ */
|
|
|
|
macb_writel(bp, TSR, macb_readl(bp, TSR));
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue_writel(queue, IER, MACB_TX_INT_FLAGS);
|
|
|
|
|
|
|
|
/* Now we are ready to start transmission again */
|
|
|
|
netif_tx_start_all_queues(bp->dev);
|
|
|
|
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
2012-10-31 13:04:57 +07:00
|
|
|
}
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
static void macb_tx_interrupt(struct macb_queue *queue)
|
2012-10-31 13:04:57 +07:00
|
|
|
{
|
|
|
|
unsigned int tail;
|
|
|
|
unsigned int head;
|
|
|
|
u32 status;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb *bp = queue->bp;
|
|
|
|
u16 queue_index = queue - bp->queues;
|
2012-10-31 13:04:57 +07:00
|
|
|
|
|
|
|
status = macb_readl(bp, TSR);
|
|
|
|
macb_writel(bp, TSR, status);
|
|
|
|
|
2013-05-14 10:00:16 +07:00
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue_writel(queue, ISR, MACB_BIT(TCOMP));
|
2013-03-28 06:07:05 +07:00
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
|
2016-03-30 09:11:13 +07:00
|
|
|
(unsigned long)status);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
head = queue->tx_head;
|
|
|
|
for (tail = queue->tx_tail; tail != head; tail++) {
|
2012-10-31 13:04:55 +07:00
|
|
|
struct macb_tx_skb *tx_skb;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
u32 ctrl;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
desc = macb_tx_desc(queue, tail);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2012-10-31 13:04:51 +07:00
|
|
|
/* Make hw descriptor updates visible to CPU */
|
2006-11-09 20:51:17 +07:00
|
|
|
rmb();
|
2012-10-31 13:04:51 +07:00
|
|
|
|
2012-10-31 13:04:55 +07:00
|
|
|
ctrl = desc->ctrl;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
/* TX_USED bit is only set by hardware on the very first buffer
|
|
|
|
* descriptor of the transmitted frame.
|
|
|
|
*/
|
2012-10-31 13:04:55 +07:00
|
|
|
if (!(ctrl & MACB_BIT(TX_USED)))
|
2006-11-09 20:51:17 +07:00
|
|
|
break;
|
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
/* Process all buffers of the current transmitted frame */
|
|
|
|
for (;; tail++) {
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
tx_skb = macb_tx_skb(queue, tail);
|
2014-07-24 18:50:59 +07:00
|
|
|
skb = tx_skb->skb;
|
|
|
|
|
|
|
|
/* First, update TX stats if needed */
|
|
|
|
if (skb) {
|
2017-06-29 13:14:16 +07:00
|
|
|
if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
|
|
|
|
/* skb now belongs to timestamp buffer
|
|
|
|
* and will be removed later
|
|
|
|
*/
|
|
|
|
tx_skb->skb = NULL;
|
|
|
|
}
|
2014-07-24 18:50:59 +07:00
|
|
|
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
|
2016-10-19 21:56:57 +07:00
|
|
|
macb_tx_ring_wrap(bp, tail),
|
|
|
|
skb->data);
|
2017-04-07 15:17:30 +07:00
|
|
|
bp->dev->stats.tx_packets++;
|
2017-12-01 01:19:56 +07:00
|
|
|
queue->stats.tx_packets++;
|
2017-04-07 15:17:30 +07:00
|
|
|
bp->dev->stats.tx_bytes += skb->len;
|
2017-12-01 01:19:56 +07:00
|
|
|
queue->stats.tx_bytes += skb->len;
|
2014-07-24 18:50:59 +07:00
|
|
|
}
|
2012-10-31 13:04:55 +07:00
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
/* Now we can safely release resources */
|
|
|
|
macb_tx_unmap(bp, tx_skb);
|
|
|
|
|
|
|
|
/* skb is set only for the last buffer of the frame.
|
|
|
|
* WARNING: at this point skb has been freed by
|
|
|
|
* macb_tx_unmap().
|
|
|
|
*/
|
|
|
|
if (skb)
|
|
|
|
break;
|
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue->tx_tail = tail;
|
|
|
|
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
|
|
|
|
CIRC_CNT(queue->tx_head, queue->tx_tail,
|
2016-10-19 21:56:57 +07:00
|
|
|
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
netif_wake_subqueue(bp->dev, queue_index);
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
static void gem_rx_refill(struct macb_queue *queue)
|
2013-06-05 04:57:12 +07:00
|
|
|
{
|
|
|
|
unsigned int entry;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
dma_addr_t paddr;
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb *bp = queue->bp;
|
2017-01-27 22:08:20 +07:00
|
|
|
struct macb_dma_desc *desc;
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
|
|
|
|
bp->rx_ring_size) > 0) {
|
|
|
|
entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
|
2013-06-05 04:57:12 +07:00
|
|
|
|
|
|
|
/* Make hw descriptor updates visible to CPU */
|
|
|
|
rmb();
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_prepared_head++;
|
|
|
|
desc = macb_rx_desc(queue, entry);
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
if (!queue->rx_skbuff[entry]) {
|
2013-06-05 04:57:12 +07:00
|
|
|
/* allocate sk_buff for this free entry in ring */
|
|
|
|
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
|
2016-03-30 09:11:13 +07:00
|
|
|
if (unlikely(!skb)) {
|
2013-06-05 04:57:12 +07:00
|
|
|
netdev_err(bp->dev,
|
|
|
|
"Unable to allocate sk_buff\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now fill corresponding descriptor entry */
|
|
|
|
paddr = dma_map_single(&bp->pdev->dev, skb->data,
|
2016-03-30 09:11:12 +07:00
|
|
|
bp->rx_buffer_size,
|
|
|
|
DMA_FROM_DEVICE);
|
2014-03-04 23:46:39 +07:00
|
|
|
if (dma_mapping_error(&bp->pdev->dev, paddr)) {
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_skbuff[entry] = skb;
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
if (entry == bp->rx_ring_size - 1)
|
2013-06-05 04:57:12 +07:00
|
|
|
paddr |= MACB_BIT(RX_WRAP);
|
2017-01-27 22:08:20 +07:00
|
|
|
desc->ctrl = 0;
|
2018-12-17 20:05:40 +07:00
|
|
|
/* Setting addr clears RX_USED and allows reception,
|
|
|
|
* make sure ctrl is cleared first to avoid a race.
|
|
|
|
*/
|
|
|
|
dma_wmb();
|
|
|
|
macb_set_addr(bp, desc, paddr);
|
2013-06-05 04:57:12 +07:00
|
|
|
|
|
|
|
/* properly align Ethernet header */
|
|
|
|
skb_reserve(skb, NET_IP_ALIGN);
|
2015-04-29 10:04:46 +07:00
|
|
|
} else {
|
2017-01-27 22:08:20 +07:00
|
|
|
desc->ctrl = 0;
|
2018-12-17 20:05:40 +07:00
|
|
|
dma_wmb();
|
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
2013-06-05 04:57:12 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
|
|
|
|
queue, queue->rx_prepared_head, queue->rx_tail);
|
2013-06-05 04:57:12 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark DMA descriptors from begin up to and not including end as unused */
|
2017-12-01 01:19:15 +07:00
|
|
|
static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
|
2013-06-05 04:57:12 +07:00
|
|
|
unsigned int end)
|
|
|
|
{
|
|
|
|
unsigned int frag;
|
|
|
|
|
|
|
|
for (frag = begin; frag != end; frag++) {
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
|
2016-03-30 09:11:12 +07:00
|
|
|
|
2013-06-05 04:57:12 +07:00
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* When this happens, the hardware stats registers for
|
2013-06-05 04:57:12 +07:00
|
|
|
* whatever caused this is updated, so we don't have to record
|
|
|
|
* anything.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
static int gem_rx(struct macb_queue *queue, int budget)
|
2013-06-05 04:57:12 +07:00
|
|
|
{
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb *bp = queue->bp;
|
2013-06-05 04:57:12 +07:00
|
|
|
unsigned int len;
|
|
|
|
unsigned int entry;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
while (count < budget) {
|
2016-08-09 14:45:53 +07:00
|
|
|
u32 ctrl;
|
|
|
|
dma_addr_t addr;
|
|
|
|
bool rxused;
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
entry = macb_rx_ring_wrap(bp, queue->rx_tail);
|
|
|
|
desc = macb_rx_desc(queue, entry);
|
2013-06-05 04:57:12 +07:00
|
|
|
|
|
|
|
/* Make hw descriptor updates visible to CPU */
|
|
|
|
rmb();
|
|
|
|
|
2016-08-09 14:45:53 +07:00
|
|
|
rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
|
2017-01-27 22:08:20 +07:00
|
|
|
addr = macb_get_addr(bp, desc);
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2016-08-09 14:45:53 +07:00
|
|
|
if (!rxused)
|
2013-06-05 04:57:12 +07:00
|
|
|
break;
|
|
|
|
|
2018-12-17 20:05:41 +07:00
|
|
|
/* Ensure ctrl is at least as up-to-date as rxused */
|
|
|
|
dma_rmb();
|
|
|
|
|
|
|
|
ctrl = desc->ctrl;
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_tail++;
|
2013-06-05 04:57:12 +07:00
|
|
|
count++;
|
|
|
|
|
|
|
|
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"not whole frame pointed by descriptor\n");
|
2017-04-07 15:17:30 +07:00
|
|
|
bp->dev->stats.rx_dropped++;
|
2017-12-01 01:19:56 +07:00
|
|
|
queue->stats.rx_dropped++;
|
2013-06-05 04:57:12 +07:00
|
|
|
break;
|
|
|
|
}
|
2017-12-01 01:19:15 +07:00
|
|
|
skb = queue->rx_skbuff[entry];
|
2013-06-05 04:57:12 +07:00
|
|
|
if (unlikely(!skb)) {
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"inconsistent Rx descriptor chain\n");
|
2017-04-07 15:17:30 +07:00
|
|
|
bp->dev->stats.rx_dropped++;
|
2017-12-01 01:19:56 +07:00
|
|
|
queue->stats.rx_dropped++;
|
2013-06-05 04:57:12 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* now everything is ready for receiving packet */
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_skbuff[entry] = NULL;
|
2015-05-06 23:57:17 +07:00
|
|
|
len = ctrl & bp->rx_frm_len_mask;
|
2013-06-05 04:57:12 +07:00
|
|
|
|
|
|
|
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
|
|
|
|
|
|
|
|
skb_put(skb, len);
|
|
|
|
dma_unmap_single(&bp->pdev->dev, addr,
|
2014-03-04 23:46:40 +07:00
|
|
|
bp->rx_buffer_size, DMA_FROM_DEVICE);
|
2013-06-05 04:57:12 +07:00
|
|
|
|
|
|
|
skb->protocol = eth_type_trans(skb, bp->dev);
|
|
|
|
skb_checksum_none_assert(skb);
|
2014-07-24 18:51:01 +07:00
|
|
|
if (bp->dev->features & NETIF_F_RXCSUM &&
|
|
|
|
!(bp->dev->flags & IFF_PROMISC) &&
|
|
|
|
GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-04-07 15:17:30 +07:00
|
|
|
bp->dev->stats.rx_packets++;
|
2017-12-01 01:19:56 +07:00
|
|
|
queue->stats.rx_packets++;
|
2017-04-07 15:17:30 +07:00
|
|
|
bp->dev->stats.rx_bytes += skb->len;
|
2017-12-01 01:19:56 +07:00
|
|
|
queue->stats.rx_bytes += skb->len;
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-06-29 13:14:16 +07:00
|
|
|
gem_ptp_do_rxstamp(bp, skb, desc);
|
|
|
|
|
2013-06-05 04:57:12 +07:00
|
|
|
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
|
|
|
|
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
|
|
|
|
skb->len, skb->csum);
|
|
|
|
print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
|
2014-12-11 17:15:54 +07:00
|
|
|
skb_mac_header(skb), 16, true);
|
2013-06-05 04:57:12 +07:00
|
|
|
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
|
|
|
|
skb->data, 32, true);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
netif_receive_skb(skb);
|
|
|
|
}
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
gem_rx_refill(queue);
|
2013-06-05 04:57:12 +07:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
|
2006-11-09 20:51:17 +07:00
|
|
|
unsigned int last_frag)
|
|
|
|
{
|
|
|
|
unsigned int len;
|
|
|
|
unsigned int frag;
|
2012-10-31 13:04:58 +07:00
|
|
|
unsigned int offset;
|
2006-11-09 20:51:17 +07:00
|
|
|
struct sk_buff *skb;
|
2012-10-31 13:04:55 +07:00
|
|
|
struct macb_dma_desc *desc;
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb *bp = queue->bp;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
desc = macb_rx_desc(queue, last_frag);
|
2015-05-06 23:57:17 +07:00
|
|
|
len = desc->ctrl & bp->rx_frm_len_mask;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2012-10-31 13:04:52 +07:00
|
|
|
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
|
2016-10-19 21:56:57 +07:00
|
|
|
macb_rx_ring_wrap(bp, first_frag),
|
|
|
|
macb_rx_ring_wrap(bp, last_frag), len);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* The ethernet header starts NET_IP_ALIGN bytes into the
|
2012-10-31 13:04:58 +07:00
|
|
|
* first buffer. Since the header is 14 bytes, this makes the
|
|
|
|
* payload word-aligned.
|
|
|
|
*
|
|
|
|
* Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
|
|
|
|
* the two padding bytes into the skb so that we avoid hitting
|
|
|
|
* the slowpath in memcpy(), and pull them off afterwards.
|
|
|
|
*/
|
|
|
|
skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
|
2006-11-09 20:51:17 +07:00
|
|
|
if (!skb) {
|
2017-04-07 15:17:30 +07:00
|
|
|
bp->dev->stats.rx_dropped++;
|
2012-10-31 13:04:55 +07:00
|
|
|
for (frag = first_frag; ; frag++) {
|
2017-12-01 01:19:15 +07:00
|
|
|
desc = macb_rx_desc(queue, frag);
|
2012-10-31 13:04:55 +07:00
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
2006-11-09 20:51:17 +07:00
|
|
|
if (frag == last_frag)
|
|
|
|
break;
|
|
|
|
}
|
2012-10-31 13:04:51 +07:00
|
|
|
|
|
|
|
/* Make descriptor updates visible to hardware */
|
2006-11-09 20:51:17 +07:00
|
|
|
wmb();
|
2012-10-31 13:04:51 +07:00
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-10-31 13:04:58 +07:00
|
|
|
offset = 0;
|
|
|
|
len += NET_IP_ALIGN;
|
2010-09-03 03:07:41 +07:00
|
|
|
skb_checksum_none_assert(skb);
|
2006-11-09 20:51:17 +07:00
|
|
|
skb_put(skb, len);
|
|
|
|
|
2012-10-31 13:04:55 +07:00
|
|
|
for (frag = first_frag; ; frag++) {
|
2013-06-05 04:57:11 +07:00
|
|
|
unsigned int frag_len = bp->rx_buffer_size;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
if (offset + frag_len > len) {
|
2016-03-25 16:37:34 +07:00
|
|
|
if (unlikely(frag != last_frag)) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return -1;
|
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
frag_len = len - offset;
|
|
|
|
}
|
2007-03-31 21:55:19 +07:00
|
|
|
skb_copy_to_linear_data_offset(skb, offset,
|
2017-12-01 01:19:15 +07:00
|
|
|
macb_rx_buffer(queue, frag),
|
2016-03-30 09:11:13 +07:00
|
|
|
frag_len);
|
2013-06-05 04:57:11 +07:00
|
|
|
offset += bp->rx_buffer_size;
|
2017-12-01 01:19:15 +07:00
|
|
|
desc = macb_rx_desc(queue, frag);
|
2012-10-31 13:04:55 +07:00
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
if (frag == last_frag)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-10-31 13:04:51 +07:00
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
2012-10-31 13:04:58 +07:00
|
|
|
__skb_pull(skb, NET_IP_ALIGN);
|
2006-11-09 20:51:17 +07:00
|
|
|
skb->protocol = eth_type_trans(skb, bp->dev);
|
|
|
|
|
2017-04-07 15:17:30 +07:00
|
|
|
bp->dev->stats.rx_packets++;
|
|
|
|
bp->dev->stats.rx_bytes += skb->len;
|
2012-10-31 13:04:52 +07:00
|
|
|
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
|
2016-03-30 09:11:13 +07:00
|
|
|
skb->len, skb->csum);
|
2006-11-09 20:51:17 +07:00
|
|
|
netif_receive_skb(skb);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
static inline void macb_init_rx_ring(struct macb_queue *queue)
|
2016-03-25 16:37:34 +07:00
|
|
|
{
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb *bp = queue->bp;
|
2016-03-25 16:37:34 +07:00
|
|
|
dma_addr_t addr;
|
2017-01-27 22:08:20 +07:00
|
|
|
struct macb_dma_desc *desc = NULL;
|
2016-03-25 16:37:34 +07:00
|
|
|
int i;
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
addr = queue->rx_buffers_dma;
|
2016-10-19 21:56:57 +07:00
|
|
|
for (i = 0; i < bp->rx_ring_size; i++) {
|
2017-12-01 01:19:15 +07:00
|
|
|
desc = macb_rx_desc(queue, i);
|
2017-01-27 22:08:20 +07:00
|
|
|
macb_set_addr(bp, desc, addr);
|
|
|
|
desc->ctrl = 0;
|
2016-03-25 16:37:34 +07:00
|
|
|
addr += bp->rx_buffer_size;
|
|
|
|
}
|
2017-01-27 22:08:20 +07:00
|
|
|
desc->addr |= MACB_BIT(RX_WRAP);
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_tail = 0;
|
2016-03-25 16:37:34 +07:00
|
|
|
}
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
static int macb_rx(struct macb_queue *queue, int budget)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb *bp = queue->bp;
|
2016-03-25 16:37:34 +07:00
|
|
|
bool reset_rx_queue = false;
|
2006-11-09 20:51:17 +07:00
|
|
|
int received = 0;
|
2012-10-31 13:04:55 +07:00
|
|
|
unsigned int tail;
|
2006-11-09 20:51:17 +07:00
|
|
|
int first_frag = -1;
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
for (tail = queue->rx_tail; budget > 0; tail++) {
|
|
|
|
struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
|
2017-01-27 22:08:20 +07:00
|
|
|
u32 ctrl;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2012-10-31 13:04:51 +07:00
|
|
|
/* Make hw descriptor updates visible to CPU */
|
2006-11-09 20:51:17 +07:00
|
|
|
rmb();
|
2012-10-31 13:04:51 +07:00
|
|
|
|
2017-01-27 22:08:20 +07:00
|
|
|
if (!(desc->addr & MACB_BIT(RX_USED)))
|
2006-11-09 20:51:17 +07:00
|
|
|
break;
|
|
|
|
|
2018-12-17 20:05:41 +07:00
|
|
|
/* Ensure ctrl is at least as up-to-date as addr */
|
|
|
|
dma_rmb();
|
|
|
|
|
|
|
|
ctrl = desc->ctrl;
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
if (ctrl & MACB_BIT(RX_SOF)) {
|
|
|
|
if (first_frag != -1)
|
2017-12-01 01:19:15 +07:00
|
|
|
discard_partial_frame(queue, first_frag, tail);
|
2006-11-09 20:51:17 +07:00
|
|
|
first_frag = tail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrl & MACB_BIT(RX_EOF)) {
|
|
|
|
int dropped;
|
2016-03-25 16:37:34 +07:00
|
|
|
|
|
|
|
if (unlikely(first_frag == -1)) {
|
|
|
|
reset_rx_queue = true;
|
|
|
|
continue;
|
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
dropped = macb_rx_frame(queue, first_frag, tail);
|
2006-11-09 20:51:17 +07:00
|
|
|
first_frag = -1;
|
2016-03-25 16:37:34 +07:00
|
|
|
if (unlikely(dropped < 0)) {
|
|
|
|
reset_rx_queue = true;
|
|
|
|
continue;
|
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
if (!dropped) {
|
|
|
|
received++;
|
|
|
|
budget--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-25 16:37:34 +07:00
|
|
|
if (unlikely(reset_rx_queue)) {
|
|
|
|
unsigned long flags;
|
|
|
|
u32 ctrl;
|
|
|
|
|
|
|
|
netdev_err(bp->dev, "RX queue corruption: reset it\n");
|
|
|
|
|
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
|
|
|
|
|
|
|
ctrl = macb_readl(bp, NCR);
|
|
|
|
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
macb_init_rx_ring(queue);
|
|
|
|
queue_writel(queue, RBQP, queue->rx_ring_dma);
|
2016-03-25 16:37:34 +07:00
|
|
|
|
|
|
|
macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
|
|
return received;
|
|
|
|
}
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
if (first_frag != -1)
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_tail = first_frag;
|
2006-11-09 20:51:17 +07:00
|
|
|
else
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_tail = tail;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
return received;
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
static int macb_poll(struct napi_struct *napi, int budget)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
|
|
|
|
struct macb *bp = queue->bp;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
int work_done;
|
2006-11-09 20:51:17 +07:00
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = macb_readl(bp, RSR);
|
|
|
|
macb_writel(bp, RSR, status);
|
|
|
|
|
2012-10-31 13:04:52 +07:00
|
|
|
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
|
2016-03-30 09:11:13 +07:00
|
|
|
(unsigned long)status, budget);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
work_done = bp->macbgem_ops.mog_rx(queue, budget);
|
2010-10-25 08:44:22 +07:00
|
|
|
if (work_done < budget) {
|
2017-01-30 23:22:01 +07:00
|
|
|
napi_complete_done(napi, work_done);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2013-02-12 17:08:48 +07:00
|
|
|
/* Packets received while interrupts were disabled */
|
|
|
|
status = macb_readl(bp, RSR);
|
2014-05-05 05:43:01 +07:00
|
|
|
if (status) {
|
2014-05-05 05:43:00 +07:00
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
2017-12-01 01:19:15 +07:00
|
|
|
queue_writel(queue, ISR, MACB_BIT(RCOMP));
|
2013-02-12 17:08:48 +07:00
|
|
|
napi_reschedule(napi);
|
2014-05-05 05:43:00 +07:00
|
|
|
} else {
|
2017-12-01 01:19:15 +07:00
|
|
|
queue_writel(queue, IER, MACB_RX_INT_FLAGS);
|
2014-05-05 05:43:00 +07:00
|
|
|
}
|
2010-10-25 08:44:22 +07:00
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
/* TODO: Handle errors */
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
return work_done;
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2018-01-27 13:39:01 +07:00
|
|
|
static void macb_hresp_error_task(unsigned long data)
|
|
|
|
{
|
|
|
|
struct macb *bp = (struct macb *)data;
|
|
|
|
struct net_device *dev = bp->dev;
|
|
|
|
struct macb_queue *queue = bp->queues;
|
|
|
|
unsigned int q;
|
|
|
|
u32 ctrl;
|
|
|
|
|
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
|
|
|
queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
|
|
|
|
MACB_TX_INT_FLAGS |
|
|
|
|
MACB_BIT(HRESP));
|
|
|
|
}
|
|
|
|
ctrl = macb_readl(bp, NCR);
|
|
|
|
ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
|
|
|
|
macb_writel(bp, NCR, ctrl);
|
|
|
|
|
|
|
|
netif_tx_stop_all_queues(dev);
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
|
|
|
bp->macbgem_ops.mog_init_rings(bp);
|
|
|
|
|
|
|
|
/* Initialize TX and RX buffers */
|
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
|
|
|
queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
|
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
|
|
|
queue_writel(queue, RBQPH,
|
|
|
|
upper_32_bits(queue->rx_ring_dma));
|
|
|
|
#endif
|
|
|
|
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
|
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
|
|
|
queue_writel(queue, TBQPH,
|
|
|
|
upper_32_bits(queue->tx_ring_dma));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Enable interrupts */
|
|
|
|
queue_writel(queue, IER,
|
|
|
|
MACB_RX_INT_FLAGS |
|
|
|
|
MACB_TX_INT_FLAGS |
|
|
|
|
MACB_BIT(HRESP));
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
|
|
|
|
macb_writel(bp, NCR, ctrl);
|
|
|
|
|
|
|
|
netif_carrier_on(dev);
|
|
|
|
netif_tx_start_all_queues(dev);
|
|
|
|
}
|
|
|
|
|
2018-12-17 17:02:42 +07:00
|
|
|
static void macb_tx_restart(struct macb_queue *queue)
|
|
|
|
{
|
|
|
|
unsigned int head = queue->tx_head;
|
|
|
|
unsigned int tail = queue->tx_tail;
|
|
|
|
struct macb *bp = queue->bp;
|
|
|
|
|
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
queue_writel(queue, ISR, MACB_BIT(TXUBR));
|
|
|
|
|
|
|
|
if (head == tail)
|
|
|
|
return;
|
|
|
|
|
|
|
|
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
|
|
|
|
}
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb_queue *queue = dev_id;
|
|
|
|
struct macb *bp = queue->bp;
|
|
|
|
struct net_device *dev = bp->dev;
|
2015-05-06 03:00:25 +07:00
|
|
|
u32 status, ctrl;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
status = queue_readl(queue, ISR);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
if (unlikely(!status))
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
|
|
spin_lock(&bp->lock);
|
|
|
|
|
|
|
|
while (status) {
|
|
|
|
/* close possible race with dev_close */
|
|
|
|
if (unlikely(!netif_running(dev))) {
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue_writel(queue, IDR, -1);
|
2016-01-15 02:27:27 +07:00
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
queue_writel(queue, ISR, -1);
|
2006-11-09 20:51:17 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
|
|
|
|
(unsigned int)(queue - bp->queues),
|
|
|
|
(unsigned long)status);
|
2012-10-31 13:04:52 +07:00
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
if (status & MACB_RX_INT_FLAGS) {
|
2016-03-30 09:11:12 +07:00
|
|
|
/* There's no point taking any more interrupts
|
2010-10-25 08:44:22 +07:00
|
|
|
* until we have processed the buffers. The
|
|
|
|
* scheduling call may fail if the poll routine
|
|
|
|
* is already scheduled, so disable interrupts
|
|
|
|
* now.
|
|
|
|
*/
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
|
2013-05-14 10:00:16 +07:00
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue_writel(queue, ISR, MACB_BIT(RCOMP));
|
2010-10-25 08:44:22 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
if (napi_schedule_prep(&queue->napi)) {
|
2012-10-31 13:04:52 +07:00
|
|
|
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
|
2017-12-01 01:19:15 +07:00
|
|
|
__napi_schedule(&queue->napi);
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
|
|
|
|
schedule_work(&queue->tx_error_task);
|
2014-05-05 05:42:59 +07:00
|
|
|
|
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
|
2014-05-05 05:42:59 +07:00
|
|
|
|
2012-10-31 13:04:57 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status & MACB_BIT(TCOMP))
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
macb_tx_interrupt(queue);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2018-12-17 17:02:42 +07:00
|
|
|
if (status & MACB_BIT(TXUBR))
|
|
|
|
macb_tx_restart(queue);
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Link change detection isn't possible with RMII, so we'll
|
2006-11-09 20:51:17 +07:00
|
|
|
* add that if/when we get our hands on a full-blown MII PHY.
|
|
|
|
*/
|
|
|
|
|
2015-05-14 05:01:36 +07:00
|
|
|
/* There is a hardware issue under heavy load where DMA can
|
|
|
|
* stop, this causes endless "used buffer descriptor read"
|
|
|
|
* interrupts but it can be cleared by re-enabling RX. See
|
|
|
|
* the at91 manual, section 41.3.1 or the Zynq manual
|
|
|
|
* section 16.7.4 for details.
|
|
|
|
*/
|
2015-05-06 03:00:25 +07:00
|
|
|
if (status & MACB_BIT(RXUBR)) {
|
|
|
|
ctrl = macb_readl(bp, NCR);
|
|
|
|
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
|
2016-11-28 20:55:00 +07:00
|
|
|
wmb();
|
2015-05-06 03:00:25 +07:00
|
|
|
macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
|
|
|
|
|
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
2016-03-24 21:40:04 +07:00
|
|
|
queue_writel(queue, ISR, MACB_BIT(RXUBR));
|
2015-05-06 03:00:25 +07:00
|
|
|
}
|
|
|
|
|
2011-04-13 12:03:24 +07:00
|
|
|
if (status & MACB_BIT(ISR_ROVR)) {
|
|
|
|
/* We missed at least one packet */
|
2011-11-08 17:12:32 +07:00
|
|
|
if (macb_is_gem(bp))
|
|
|
|
bp->hw_stats.gem.rx_overruns++;
|
|
|
|
else
|
|
|
|
bp->hw_stats.macb.rx_overruns++;
|
2014-05-05 05:42:59 +07:00
|
|
|
|
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
|
2011-04-13 12:03:24 +07:00
|
|
|
}
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
if (status & MACB_BIT(HRESP)) {
|
2018-01-27 13:39:01 +07:00
|
|
|
tasklet_schedule(&bp->hresp_err_tasklet);
|
2011-03-09 03:27:08 +07:00
|
|
|
netdev_err(dev, "DMA bus error: HRESP not OK\n");
|
2014-05-05 05:42:59 +07:00
|
|
|
|
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue_writel(queue, ISR, MACB_BIT(HRESP));
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
status = queue_readl(queue, ISR);
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&bp->lock);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2009-05-05 01:08:41 +07:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Polling receive - used by netconsole and other diagnostic tools
|
2009-05-05 01:08:41 +07:00
|
|
|
* to allow network i/o with interrupts disabled.
|
|
|
|
*/
|
|
|
|
static void macb_poll_controller(struct net_device *dev)
|
|
|
|
{
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
struct macb_queue *queue;
|
2009-05-05 01:08:41 +07:00
|
|
|
unsigned long flags;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
unsigned int q;
|
2009-05-05 01:08:41 +07:00
|
|
|
|
|
|
|
local_irq_save(flags);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
|
|
|
|
macb_interrupt(dev->irq, queue);
|
2009-05-05 01:08:41 +07:00
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
static unsigned int macb_tx_map(struct macb *bp,
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb_queue *queue,
|
2016-11-16 17:02:34 +07:00
|
|
|
struct sk_buff *skb,
|
|
|
|
unsigned int hdrlen)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
|
|
|
dma_addr_t mapping;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
unsigned int len, entry, i, tx_head = queue->tx_head;
|
2014-07-24 18:50:59 +07:00
|
|
|
struct macb_tx_skb *tx_skb = NULL;
|
2012-10-31 13:04:55 +07:00
|
|
|
struct macb_dma_desc *desc;
|
2014-07-24 18:50:59 +07:00
|
|
|
unsigned int offset, size, count = 0;
|
|
|
|
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
|
2016-11-16 17:02:34 +07:00
|
|
|
unsigned int eof = 1, mss_mfs = 0;
|
|
|
|
u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
|
|
|
|
|
|
|
|
/* LSO */
|
|
|
|
if (skb_shinfo(skb)->gso_size != 0) {
|
|
|
|
if (ip_hdr(skb)->protocol == IPPROTO_UDP)
|
|
|
|
/* UDP - UFO */
|
|
|
|
lso_ctrl = MACB_LSO_UFO_ENABLE;
|
|
|
|
else
|
|
|
|
/* TCP - TSO */
|
|
|
|
lso_ctrl = MACB_LSO_TSO_ENABLE;
|
|
|
|
}
|
2014-07-24 18:50:59 +07:00
|
|
|
|
|
|
|
/* First, map non-paged data */
|
|
|
|
len = skb_headlen(skb);
|
2016-11-16 17:02:34 +07:00
|
|
|
|
|
|
|
/* first buffer length */
|
|
|
|
size = hdrlen;
|
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
offset = 0;
|
|
|
|
while (len) {
|
2016-10-19 21:56:57 +07:00
|
|
|
entry = macb_tx_ring_wrap(bp, tx_head);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
tx_skb = &queue->tx_skb[entry];
|
2014-07-24 18:50:59 +07:00
|
|
|
|
|
|
|
mapping = dma_map_single(&bp->pdev->dev,
|
|
|
|
skb->data + offset,
|
|
|
|
size, DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(&bp->pdev->dev, mapping))
|
|
|
|
goto dma_error;
|
|
|
|
|
|
|
|
/* Save info to properly release resources */
|
|
|
|
tx_skb->skb = NULL;
|
|
|
|
tx_skb->mapping = mapping;
|
|
|
|
tx_skb->size = size;
|
|
|
|
tx_skb->mapped_as_page = false;
|
|
|
|
|
|
|
|
len -= size;
|
|
|
|
offset += size;
|
|
|
|
count++;
|
|
|
|
tx_head++;
|
2016-11-16 17:02:34 +07:00
|
|
|
|
|
|
|
size = min(len, bp->max_tx_length);
|
2014-07-24 18:50:59 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Then, map paged data from fragments */
|
|
|
|
for (f = 0; f < nr_frags; f++) {
|
|
|
|
const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
|
|
|
|
|
|
|
|
len = skb_frag_size(frag);
|
|
|
|
offset = 0;
|
|
|
|
while (len) {
|
|
|
|
size = min(len, bp->max_tx_length);
|
2016-10-19 21:56:57 +07:00
|
|
|
entry = macb_tx_ring_wrap(bp, tx_head);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
tx_skb = &queue->tx_skb[entry];
|
2014-07-24 18:50:59 +07:00
|
|
|
|
|
|
|
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
|
|
|
|
offset, size, DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(&bp->pdev->dev, mapping))
|
|
|
|
goto dma_error;
|
|
|
|
|
|
|
|
/* Save info to properly release resources */
|
|
|
|
tx_skb->skb = NULL;
|
|
|
|
tx_skb->mapping = mapping;
|
|
|
|
tx_skb->size = size;
|
|
|
|
tx_skb->mapped_as_page = true;
|
|
|
|
|
|
|
|
len -= size;
|
|
|
|
offset += size;
|
|
|
|
count++;
|
|
|
|
tx_head++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Should never happen */
|
2016-03-30 09:11:13 +07:00
|
|
|
if (unlikely(!tx_skb)) {
|
2014-07-24 18:50:59 +07:00
|
|
|
netdev_err(bp->dev, "BUG! empty skb!\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This is the last buffer of the frame: save socket buffer */
|
|
|
|
tx_skb->skb = skb;
|
|
|
|
|
|
|
|
/* Update TX ring: update buffer descriptors in reverse order
|
|
|
|
* to avoid race condition
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Set 'TX_USED' bit in buffer descriptor at tx_head position
|
|
|
|
* to set the end of TX queue
|
|
|
|
*/
|
|
|
|
i = tx_head;
|
2016-10-19 21:56:57 +07:00
|
|
|
entry = macb_tx_ring_wrap(bp, i);
|
2014-07-24 18:50:59 +07:00
|
|
|
ctrl = MACB_BIT(TX_USED);
|
2017-01-27 22:08:20 +07:00
|
|
|
desc = macb_tx_desc(queue, entry);
|
2014-07-24 18:50:59 +07:00
|
|
|
desc->ctrl = ctrl;
|
|
|
|
|
2016-11-16 17:02:34 +07:00
|
|
|
if (lso_ctrl) {
|
|
|
|
if (lso_ctrl == MACB_LSO_UFO_ENABLE)
|
|
|
|
/* include header and FCS in value given to h/w */
|
|
|
|
mss_mfs = skb_shinfo(skb)->gso_size +
|
|
|
|
skb_transport_offset(skb) +
|
|
|
|
ETH_FCS_LEN;
|
|
|
|
else /* TSO */ {
|
|
|
|
mss_mfs = skb_shinfo(skb)->gso_size;
|
|
|
|
/* TCP Sequence Number Source Select
|
|
|
|
* can be set only for TSO
|
|
|
|
*/
|
|
|
|
seq_ctrl = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
do {
|
|
|
|
i--;
|
2016-10-19 21:56:57 +07:00
|
|
|
entry = macb_tx_ring_wrap(bp, i);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
tx_skb = &queue->tx_skb[entry];
|
2017-01-27 22:08:20 +07:00
|
|
|
desc = macb_tx_desc(queue, entry);
|
2014-07-24 18:50:59 +07:00
|
|
|
|
|
|
|
ctrl = (u32)tx_skb->size;
|
|
|
|
if (eof) {
|
|
|
|
ctrl |= MACB_BIT(TX_LAST);
|
|
|
|
eof = 0;
|
|
|
|
}
|
2016-10-19 21:56:57 +07:00
|
|
|
if (unlikely(entry == (bp->tx_ring_size - 1)))
|
2014-07-24 18:50:59 +07:00
|
|
|
ctrl |= MACB_BIT(TX_WRAP);
|
|
|
|
|
2016-11-16 17:02:34 +07:00
|
|
|
/* First descriptor is header descriptor */
|
|
|
|
if (i == queue->tx_head) {
|
|
|
|
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
|
|
|
|
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
|
2018-08-07 16:25:14 +07:00
|
|
|
if ((bp->dev->features & NETIF_F_HW_CSUM) &&
|
|
|
|
skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
|
|
|
|
ctrl |= MACB_BIT(TX_NOCRC);
|
2016-11-16 17:02:34 +07:00
|
|
|
} else
|
|
|
|
/* Only set MSS/MFS on payload descriptors
|
|
|
|
* (second or later descriptor)
|
|
|
|
*/
|
|
|
|
ctrl |= MACB_BF(MSS_MFS, mss_mfs);
|
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
/* Set TX buffer descriptor */
|
2017-01-27 22:08:20 +07:00
|
|
|
macb_set_addr(bp, desc, tx_skb->mapping);
|
2014-07-24 18:50:59 +07:00
|
|
|
/* desc->addr must be visible to hardware before clearing
|
|
|
|
* 'TX_USED' bit in desc->ctrl.
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
desc->ctrl = ctrl;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
} while (i != queue->tx_head);
|
2014-07-24 18:50:59 +07:00
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue->tx_head = tx_head;
|
2014-07-24 18:50:59 +07:00
|
|
|
|
|
|
|
return count;
|
|
|
|
|
|
|
|
dma_error:
|
|
|
|
netdev_err(bp->dev, "TX DMA map failed\n");
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
for (i = queue->tx_head; i != tx_head; i++) {
|
|
|
|
tx_skb = macb_tx_skb(queue, i);
|
2014-07-24 18:50:59 +07:00
|
|
|
|
|
|
|
macb_tx_unmap(bp, tx_skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-16 17:02:34 +07:00
|
|
|
static netdev_features_t macb_features_check(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
unsigned int nr_frags, f;
|
|
|
|
unsigned int hdrlen;
|
|
|
|
|
|
|
|
/* Validate LSO compatibility */
|
|
|
|
|
|
|
|
/* there is only one buffer */
|
|
|
|
if (!skb_is_nonlinear(skb))
|
|
|
|
return features;
|
|
|
|
|
|
|
|
/* length of header */
|
|
|
|
hdrlen = skb_transport_offset(skb);
|
|
|
|
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
|
|
|
|
hdrlen += tcp_hdrlen(skb);
|
|
|
|
|
|
|
|
/* For LSO:
|
|
|
|
* When software supplies two or more payload buffers all payload buffers
|
|
|
|
* apart from the last must be a multiple of 8 bytes in size.
|
|
|
|
*/
|
|
|
|
if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
|
|
|
|
return features & ~MACB_NETIF_LSO;
|
|
|
|
|
|
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
/* No need to check last fragment */
|
|
|
|
nr_frags--;
|
|
|
|
for (f = 0; f < nr_frags; f++) {
|
|
|
|
const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
|
|
|
|
|
|
|
|
if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
|
|
|
|
return features & ~MACB_NETIF_LSO;
|
|
|
|
}
|
|
|
|
return features;
|
|
|
|
}
|
|
|
|
|
2016-09-04 23:09:47 +07:00
|
|
|
static inline int macb_clear_csum(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
/* no change for packets without checksum offloading */
|
|
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* make sure we can modify the header */
|
|
|
|
if (unlikely(skb_cow_head(skb, 0)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* initialize checksum field
|
|
|
|
* This is required - at least for Zynq, which otherwise calculates
|
|
|
|
* wrong UDP header checksums for UDP packets with UDP data len <=2
|
|
|
|
*/
|
|
|
|
*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-07 16:25:14 +07:00
|
|
|
static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
|
|
|
|
{
|
|
|
|
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
|
|
|
|
int padlen = ETH_ZLEN - (*skb)->len;
|
|
|
|
int headroom = skb_headroom(*skb);
|
|
|
|
int tailroom = skb_tailroom(*skb);
|
|
|
|
struct sk_buff *nskb;
|
|
|
|
u32 fcs;
|
|
|
|
|
|
|
|
if (!(ndev->features & NETIF_F_HW_CSUM) ||
|
|
|
|
!((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
|
|
|
|
skb_shinfo(*skb)->gso_size) /* Not available for GSO */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (padlen <= 0) {
|
|
|
|
/* FCS could be appeded to tailroom. */
|
|
|
|
if (tailroom >= ETH_FCS_LEN)
|
|
|
|
goto add_fcs;
|
|
|
|
/* FCS could be appeded by moving data to headroom. */
|
|
|
|
else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
|
|
|
|
padlen = 0;
|
|
|
|
/* No room for FCS, need to reallocate skb. */
|
|
|
|
else
|
2018-10-25 04:51:23 +07:00
|
|
|
padlen = ETH_FCS_LEN;
|
2018-08-07 16:25:14 +07:00
|
|
|
} else {
|
|
|
|
/* Add room for FCS. */
|
|
|
|
padlen += ETH_FCS_LEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cloned && headroom + tailroom >= padlen) {
|
|
|
|
(*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
|
|
|
|
skb_set_tail_pointer(*skb, (*skb)->len);
|
|
|
|
} else {
|
|
|
|
nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
|
|
|
|
if (!nskb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
dev_kfree_skb_any(*skb);
|
|
|
|
*skb = nskb;
|
|
|
|
}
|
|
|
|
|
2019-01-03 21:59:35 +07:00
|
|
|
if (padlen > ETH_FCS_LEN)
|
|
|
|
skb_put_zero(*skb, padlen - ETH_FCS_LEN);
|
2018-08-07 16:25:14 +07:00
|
|
|
|
|
|
|
add_fcs:
|
|
|
|
/* set FCS to packet */
|
|
|
|
fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
|
|
|
|
fcs = ~fcs;
|
|
|
|
|
|
|
|
skb_put_u8(*skb, fcs & 0xff);
|
|
|
|
skb_put_u8(*skb, (fcs >> 8) & 0xff);
|
|
|
|
skb_put_u8(*skb, (fcs >> 16) & 0xff);
|
|
|
|
skb_put_u8(*skb, (fcs >> 24) & 0xff);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-07 16:25:12 +07:00
|
|
|
static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
2014-07-24 18:50:59 +07:00
|
|
|
{
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
u16 queue_index = skb_get_queue_mapping(skb);
|
2014-07-24 18:50:59 +07:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb_queue *queue = &bp->queues[queue_index];
|
2009-08-24 09:49:07 +07:00
|
|
|
unsigned long flags;
|
2016-11-16 17:02:34 +07:00
|
|
|
unsigned int desc_cnt, nr_frags, frag_size, f;
|
|
|
|
unsigned int hdrlen;
|
|
|
|
bool is_lso, is_udp = 0;
|
2018-08-07 16:25:12 +07:00
|
|
|
netdev_tx_t ret = NETDEV_TX_OK;
|
2016-11-16 17:02:34 +07:00
|
|
|
|
2018-08-07 16:25:13 +07:00
|
|
|
if (macb_clear_csum(skb)) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-08-07 16:25:14 +07:00
|
|
|
if (macb_pad_and_fcs(&skb, dev)) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-11-16 17:02:34 +07:00
|
|
|
is_lso = (skb_shinfo(skb)->gso_size != 0);
|
|
|
|
|
|
|
|
if (is_lso) {
|
|
|
|
is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
|
|
|
|
|
|
|
|
/* length of headers */
|
|
|
|
if (is_udp)
|
|
|
|
/* only queue eth + ip headers separately for UDP */
|
|
|
|
hdrlen = skb_transport_offset(skb);
|
|
|
|
else
|
|
|
|
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
|
|
|
if (skb_headlen(skb) < hdrlen) {
|
|
|
|
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
|
|
|
|
/* if this is required, would need to copy to single buffer */
|
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
hdrlen = min(skb_headlen(skb), bp->max_tx_length);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2012-10-31 13:04:52 +07:00
|
|
|
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
|
|
|
|
netdev_vdbg(bp->dev,
|
2016-03-30 09:11:13 +07:00
|
|
|
"start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
|
|
|
|
queue_index, skb->len, skb->head, skb->data,
|
|
|
|
skb_tail_pointer(skb), skb_end_pointer(skb));
|
2011-03-09 03:27:08 +07:00
|
|
|
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
|
|
|
|
skb->data, 16, true);
|
2006-11-09 20:51:17 +07:00
|
|
|
#endif
|
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
/* Count how many TX buffer descriptors are needed to send this
|
|
|
|
* socket buffer: skb fragments of jumbo frames may need to be
|
2016-03-30 09:11:13 +07:00
|
|
|
* split into many buffer descriptors.
|
2014-07-24 18:50:59 +07:00
|
|
|
*/
|
2016-11-16 17:02:34 +07:00
|
|
|
if (is_lso && (skb_headlen(skb) > hdrlen))
|
|
|
|
/* extra header descriptor if also payload in first buffer */
|
|
|
|
desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
|
|
|
|
else
|
|
|
|
desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
|
2014-07-24 18:50:59 +07:00
|
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
for (f = 0; f < nr_frags; f++) {
|
|
|
|
frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
|
2016-11-16 17:02:34 +07:00
|
|
|
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
|
2014-07-24 18:50:59 +07:00
|
|
|
}
|
|
|
|
|
2009-08-24 09:49:07 +07:00
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
/* This is a hard error, log it. */
|
2016-10-19 21:56:57 +07:00
|
|
|
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
|
2016-11-16 17:02:34 +07:00
|
|
|
bp->tx_ring_size) < desc_cnt) {
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
netif_stop_subqueue(dev, queue_index);
|
2009-08-24 09:49:07 +07:00
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
2011-03-09 03:27:08 +07:00
|
|
|
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue->tx_head, queue->tx_tail);
|
2009-06-12 13:22:29 +07:00
|
|
|
return NETDEV_TX_BUSY;
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
/* Map socket buffer for DMA transfer */
|
2016-11-16 17:02:34 +07:00
|
|
|
if (!macb_tx_map(bp, queue, skb, hdrlen)) {
|
2014-03-16 06:08:27 +07:00
|
|
|
dev_kfree_skb_any(skb);
|
2014-03-04 23:46:39 +07:00
|
|
|
goto unlock;
|
|
|
|
}
|
2012-10-31 13:04:55 +07:00
|
|
|
|
2012-10-31 13:04:51 +07:00
|
|
|
/* Make newly initialized descriptor visible to hardware */
|
2006-11-09 20:51:17 +07:00
|
|
|
wmb();
|
2011-06-20 04:51:28 +07:00
|
|
|
skb_tx_timestamp(skb);
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
|
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
netif_stop_subqueue(dev, queue_index);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2014-03-04 23:46:39 +07:00
|
|
|
unlock:
|
2009-08-24 09:49:07 +07:00
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2018-08-07 16:25:12 +07:00
|
|
|
return ret;
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2013-06-05 04:57:12 +07:00
|
|
|
static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
|
2013-06-05 04:57:11 +07:00
|
|
|
{
|
|
|
|
if (!macb_is_gem(bp)) {
|
|
|
|
bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
|
|
|
|
} else {
|
2013-06-05 04:57:12 +07:00
|
|
|
bp->rx_buffer_size = size;
|
2013-06-05 04:57:11 +07:00
|
|
|
|
|
|
|
if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
|
2013-06-05 04:57:12 +07:00
|
|
|
netdev_dbg(bp->dev,
|
2016-03-30 09:11:13 +07:00
|
|
|
"RX buffer must be multiple of %d bytes, expanding\n",
|
|
|
|
RX_BUFFER_MULTIPLE);
|
2013-06-05 04:57:11 +07:00
|
|
|
bp->rx_buffer_size =
|
2013-06-05 04:57:12 +07:00
|
|
|
roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
|
2013-06-05 04:57:11 +07:00
|
|
|
}
|
|
|
|
}
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-02-28 05:30:02 +07:00
|
|
|
netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
|
2013-06-05 04:57:12 +07:00
|
|
|
bp->dev->mtu, bp->rx_buffer_size);
|
2013-06-05 04:57:11 +07:00
|
|
|
}
|
|
|
|
|
2013-06-05 04:57:12 +07:00
|
|
|
static void gem_free_rx_buffers(struct macb *bp)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct macb_dma_desc *desc;
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *queue;
|
2013-06-05 04:57:12 +07:00
|
|
|
dma_addr_t addr;
|
2017-12-01 01:19:15 +07:00
|
|
|
unsigned int q;
|
2013-06-05 04:57:12 +07:00
|
|
|
int i;
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
|
|
|
if (!queue->rx_skbuff)
|
|
|
|
continue;
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
for (i = 0; i < bp->rx_ring_size; i++) {
|
|
|
|
skb = queue->rx_skbuff[i];
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
if (!skb)
|
|
|
|
continue;
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
desc = macb_rx_desc(queue, i);
|
|
|
|
addr = macb_get_addr(bp, desc);
|
2017-01-27 22:08:20 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
skb = NULL;
|
|
|
|
}
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
kfree(queue->rx_skbuff);
|
|
|
|
queue->rx_skbuff = NULL;
|
|
|
|
}
|
2013-06-05 04:57:12 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_free_rx_buffers(struct macb *bp)
|
|
|
|
{
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *queue = &bp->queues[0];
|
|
|
|
|
|
|
|
if (queue->rx_buffers) {
|
2013-06-05 04:57:12 +07:00
|
|
|
dma_free_coherent(&bp->pdev->dev,
|
2016-10-19 21:56:57 +07:00
|
|
|
bp->rx_ring_size * bp->rx_buffer_size,
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_buffers, queue->rx_buffers_dma);
|
|
|
|
queue->rx_buffers = NULL;
|
2013-06-05 04:57:12 +07:00
|
|
|
}
|
|
|
|
}
|
2013-06-05 04:57:11 +07:00
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
static void macb_free_consistent(struct macb *bp)
|
|
|
|
{
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb_queue *queue;
|
|
|
|
unsigned int q;
|
2018-07-06 13:48:58 +07:00
|
|
|
int size;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
|
2013-06-05 04:57:12 +07:00
|
|
|
bp->macbgem_ops.mog_free_rx_buffers(bp);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
|
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
|
|
|
kfree(queue->tx_skb);
|
|
|
|
queue->tx_skb = NULL;
|
|
|
|
if (queue->tx_ring) {
|
2018-07-06 13:48:58 +07:00
|
|
|
size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
|
|
|
|
dma_free_coherent(&bp->pdev->dev, size,
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue->tx_ring, queue->tx_ring_dma);
|
|
|
|
queue->tx_ring = NULL;
|
|
|
|
}
|
2018-07-06 13:48:57 +07:00
|
|
|
if (queue->rx_ring) {
|
2018-07-06 13:48:58 +07:00
|
|
|
size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
|
|
|
|
dma_free_coherent(&bp->pdev->dev, size,
|
2018-07-06 13:48:57 +07:00
|
|
|
queue->rx_ring, queue->rx_ring_dma);
|
|
|
|
queue->rx_ring = NULL;
|
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
2013-06-05 04:57:12 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_alloc_rx_buffers(struct macb *bp)
|
|
|
|
{
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *queue;
|
|
|
|
unsigned int q;
|
2013-06-05 04:57:12 +07:00
|
|
|
int size;
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
|
|
|
size = bp->rx_ring_size * sizeof(struct sk_buff *);
|
|
|
|
queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!queue->rx_skbuff)
|
|
|
|
return -ENOMEM;
|
|
|
|
else
|
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated %d RX struct sk_buff entries at %p\n",
|
|
|
|
bp->rx_ring_size, queue->rx_skbuff);
|
|
|
|
}
|
2013-06-05 04:57:12 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_alloc_rx_buffers(struct macb *bp)
|
|
|
|
{
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *queue = &bp->queues[0];
|
2013-06-05 04:57:12 +07:00
|
|
|
int size;
|
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
size = bp->rx_ring_size * bp->rx_buffer_size;
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
|
|
|
|
&queue->rx_buffers_dma, GFP_KERNEL);
|
|
|
|
if (!queue->rx_buffers)
|
2013-06-05 04:57:12 +07:00
|
|
|
return -ENOMEM;
|
2016-03-30 09:11:12 +07:00
|
|
|
|
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
|
2017-12-01 01:19:15 +07:00
|
|
|
size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
|
2013-06-05 04:57:12 +07:00
|
|
|
return 0;
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_alloc_consistent(struct macb *bp)
|
|
|
|
{
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb_queue *queue;
|
|
|
|
unsigned int q;
|
2006-11-09 20:51:17 +07:00
|
|
|
int size;
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
2018-07-06 13:48:58 +07:00
|
|
|
size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
|
|
|
|
&queue->tx_ring_dma,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!queue->tx_ring)
|
|
|
|
goto out_err;
|
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
|
|
|
|
q, size, (unsigned long)queue->tx_ring_dma,
|
|
|
|
queue->tx_ring);
|
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue->tx_skb = kmalloc(size, GFP_KERNEL);
|
|
|
|
if (!queue->tx_skb)
|
|
|
|
goto out_err;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2018-07-06 13:48:58 +07:00
|
|
|
size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
|
|
|
|
&queue->rx_ring_dma, GFP_KERNEL);
|
|
|
|
if (!queue->rx_ring)
|
|
|
|
goto out_err;
|
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
|
|
|
|
size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
|
|
|
|
}
|
2013-06-05 04:57:12 +07:00
|
|
|
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
|
2006-11-09 20:51:17 +07:00
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
macb_free_consistent(bp);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-06-05 04:57:12 +07:00
|
|
|
static void gem_init_rings(struct macb *bp)
|
|
|
|
{
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb_queue *queue;
|
2017-01-27 22:08:20 +07:00
|
|
|
struct macb_dma_desc *desc = NULL;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
unsigned int q;
|
2013-06-05 04:57:12 +07:00
|
|
|
int i;
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
2016-10-19 21:56:57 +07:00
|
|
|
for (i = 0; i < bp->tx_ring_size; i++) {
|
2017-01-27 22:08:20 +07:00
|
|
|
desc = macb_tx_desc(queue, i);
|
|
|
|
macb_set_addr(bp, desc, 0);
|
|
|
|
desc->ctrl = MACB_BIT(TX_USED);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
}
|
2017-01-27 22:08:20 +07:00
|
|
|
desc->ctrl |= MACB_BIT(TX_WRAP);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue->tx_head = 0;
|
|
|
|
queue->tx_tail = 0;
|
2013-06-05 04:57:12 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->rx_tail = 0;
|
|
|
|
queue->rx_prepared_head = 0;
|
|
|
|
|
|
|
|
gem_rx_refill(queue);
|
|
|
|
}
|
2013-06-05 04:57:12 +07:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
static void macb_init_rings(struct macb *bp)
|
|
|
|
{
|
|
|
|
int i;
|
2017-01-27 22:08:20 +07:00
|
|
|
struct macb_dma_desc *desc = NULL;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
macb_init_rx_ring(&bp->queues[0]);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
for (i = 0; i < bp->tx_ring_size; i++) {
|
2017-01-27 22:08:20 +07:00
|
|
|
desc = macb_tx_desc(&bp->queues[0], i);
|
|
|
|
macb_set_addr(bp, desc, 0);
|
|
|
|
desc->ctrl = MACB_BIT(TX_USED);
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
2015-04-23 05:28:54 +07:00
|
|
|
bp->queues[0].tx_head = 0;
|
|
|
|
bp->queues[0].tx_tail = 0;
|
2017-01-27 22:08:20 +07:00
|
|
|
desc->ctrl |= MACB_BIT(TX_WRAP);
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_reset_hw(struct macb *bp)
|
|
|
|
{
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb_queue *queue;
|
|
|
|
unsigned int q;
|
2018-08-23 14:45:22 +07:00
|
|
|
u32 ctrl = macb_readl(bp, NCR);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Disable RX and TX (XXX: Should we halt the transmission
|
2006-11-09 20:51:17 +07:00
|
|
|
* more gracefully?)
|
|
|
|
*/
|
2018-08-23 14:45:22 +07:00
|
|
|
ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
/* Clear the stats registers (XXX: Update stats first?) */
|
2018-08-23 14:45:22 +07:00
|
|
|
ctrl |= MACB_BIT(CLRSTAT);
|
|
|
|
|
|
|
|
macb_writel(bp, NCR, ctrl);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
/* Clear all status flags */
|
2012-10-22 15:45:31 +07:00
|
|
|
macb_writel(bp, TSR, -1);
|
|
|
|
macb_writel(bp, RSR, -1);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
/* Disable all interrupts */
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
|
|
|
queue_writel(queue, IDR, -1);
|
|
|
|
queue_readl(queue, ISR);
|
2016-01-15 02:27:27 +07:00
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
queue_writel(queue, ISR, -1);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2011-03-09 23:22:54 +07:00
|
|
|
static u32 gem_mdc_clk_div(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 config;
|
|
|
|
unsigned long pclk_hz = clk_get_rate(bp->pclk);
|
|
|
|
|
|
|
|
if (pclk_hz <= 20000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV8);
|
|
|
|
else if (pclk_hz <= 40000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV16);
|
|
|
|
else if (pclk_hz <= 80000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV32);
|
|
|
|
else if (pclk_hz <= 120000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV48);
|
|
|
|
else if (pclk_hz <= 160000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV64);
|
|
|
|
else
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV96);
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 macb_mdc_clk_div(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 config;
|
|
|
|
unsigned long pclk_hz;
|
|
|
|
|
|
|
|
if (macb_is_gem(bp))
|
|
|
|
return gem_mdc_clk_div(bp);
|
|
|
|
|
|
|
|
pclk_hz = clk_get_rate(bp->pclk);
|
|
|
|
if (pclk_hz <= 20000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV8);
|
|
|
|
else if (pclk_hz <= 40000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV16);
|
|
|
|
else if (pclk_hz <= 80000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV32);
|
|
|
|
else
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV64);
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Get the DMA bus width field of the network configuration register that we
|
2011-03-09 23:29:59 +07:00
|
|
|
* should program. We find the width from decoding the design configuration
|
|
|
|
* register to find the maximum supported data bus width.
|
|
|
|
*/
|
|
|
|
static u32 macb_dbw(struct macb *bp)
|
|
|
|
{
|
|
|
|
if (!macb_is_gem(bp))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
|
|
|
|
case 4:
|
|
|
|
return GEM_BF(DBW, GEM_DBW128);
|
|
|
|
case 2:
|
|
|
|
return GEM_BF(DBW, GEM_DBW64);
|
|
|
|
case 1:
|
|
|
|
default:
|
|
|
|
return GEM_BF(DBW, GEM_DBW32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Configure the receive DMA engine
|
2012-11-23 10:49:01 +07:00
|
|
|
* - use the correct receive buffer size
|
2014-07-24 18:50:58 +07:00
|
|
|
* - set best burst length for DMA operations
|
2012-11-23 10:49:01 +07:00
|
|
|
* (if not supported by FIFO, it will fallback to default)
|
|
|
|
* - set both rx/tx packet buffers to full memory size
|
|
|
|
* These are configurable parameters for GEM.
|
2011-03-15 00:38:30 +07:00
|
|
|
*/
|
|
|
|
static void macb_configure_dma(struct macb *bp)
|
|
|
|
{
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *queue;
|
|
|
|
u32 buffer_size;
|
|
|
|
unsigned int q;
|
2011-03-15 00:38:30 +07:00
|
|
|
u32 dmacfg;
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
|
2011-03-15 00:38:30 +07:00
|
|
|
if (macb_is_gem(bp)) {
|
|
|
|
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
|
2017-12-01 01:19:15 +07:00
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
|
|
|
if (q)
|
|
|
|
queue_writel(queue, RBQS, buffer_size);
|
|
|
|
else
|
|
|
|
dmacfg |= GEM_BF(RXBS, buffer_size);
|
|
|
|
}
|
2014-07-24 18:50:58 +07:00
|
|
|
if (bp->dma_burst_length)
|
|
|
|
dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
|
2012-11-23 10:49:01 +07:00
|
|
|
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
|
2015-02-18 18:29:35 +07:00
|
|
|
dmacfg &= ~GEM_BIT(ENDIA_PKT);
|
2015-03-01 13:08:02 +07:00
|
|
|
|
2015-07-25 01:23:59 +07:00
|
|
|
if (bp->native_io)
|
2015-03-01 13:08:02 +07:00
|
|
|
dmacfg &= ~GEM_BIT(ENDIA_DESC);
|
|
|
|
else
|
|
|
|
dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
|
|
|
|
|
2014-07-24 18:51:00 +07:00
|
|
|
if (bp->dev->features & NETIF_F_HW_CSUM)
|
|
|
|
dmacfg |= GEM_BIT(TXCOEN);
|
|
|
|
else
|
|
|
|
dmacfg &= ~GEM_BIT(TXCOEN);
|
2016-08-09 14:45:53 +07:00
|
|
|
|
2018-09-25 13:32:50 +07:00
|
|
|
dmacfg &= ~GEM_BIT(ADDR64);
|
2016-08-09 14:45:53 +07:00
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
2017-06-29 13:12:51 +07:00
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
2017-01-27 22:08:20 +07:00
|
|
|
dmacfg |= GEM_BIT(ADDR64);
|
2017-06-29 13:12:51 +07:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_MACB_USE_HWSTAMP
|
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
|
|
|
|
dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
|
2016-08-09 14:45:53 +07:00
|
|
|
#endif
|
2014-07-24 18:50:58 +07:00
|
|
|
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
|
|
|
|
dmacfg);
|
2011-03-15 00:38:30 +07:00
|
|
|
gem_writel(bp, DMACFG, dmacfg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
static void macb_init_hw(struct macb *bp)
|
|
|
|
{
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
struct macb_queue *queue;
|
|
|
|
unsigned int q;
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
u32 config;
|
|
|
|
|
|
|
|
macb_reset_hw(bp);
|
2012-11-07 15:14:52 +07:00
|
|
|
macb_set_hwaddr(bp);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2011-03-09 23:22:54 +07:00
|
|
|
config = macb_mdc_clk_div(bp);
|
2015-11-18 10:33:50 +07:00
|
|
|
if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
|
|
|
|
config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
|
2012-10-31 13:04:58 +07:00
|
|
|
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
|
2006-11-09 20:51:17 +07:00
|
|
|
config |= MACB_BIT(PAE); /* PAuse Enable */
|
|
|
|
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
|
2015-05-13 01:15:24 +07:00
|
|
|
if (bp->caps & MACB_CAPS_JUMBO)
|
2015-05-06 23:57:17 +07:00
|
|
|
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
|
|
|
|
else
|
|
|
|
config |= MACB_BIT(BIG); /* Receive oversized frames */
|
2006-11-09 20:51:17 +07:00
|
|
|
if (bp->dev->flags & IFF_PROMISC)
|
|
|
|
config |= MACB_BIT(CAF); /* Copy All Frames */
|
2014-07-24 18:51:01 +07:00
|
|
|
else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
|
|
|
|
config |= GEM_BIT(RXCOEN);
|
2006-11-09 20:51:17 +07:00
|
|
|
if (!(bp->dev->flags & IFF_BROADCAST))
|
|
|
|
config |= MACB_BIT(NBC); /* No BroadCast */
|
2011-03-09 23:29:59 +07:00
|
|
|
config |= macb_dbw(bp);
|
2006-11-09 20:51:17 +07:00
|
|
|
macb_writel(bp, NCFGR, config);
|
2015-05-13 01:15:24 +07:00
|
|
|
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
|
2015-05-06 23:57:17 +07:00
|
|
|
gem_writel(bp, JML, bp->jumbo_max_len);
|
2012-11-02 14:09:24 +07:00
|
|
|
bp->speed = SPEED_10;
|
|
|
|
bp->duplex = DUPLEX_HALF;
|
2015-05-06 23:57:17 +07:00
|
|
|
bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
|
2015-05-13 01:15:24 +07:00
|
|
|
if (bp->caps & MACB_CAPS_JUMBO)
|
2015-05-06 23:57:17 +07:00
|
|
|
bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2011-03-15 00:38:30 +07:00
|
|
|
macb_configure_dma(bp);
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
/* Initialize TX and RX buffers */
|
2017-12-01 01:19:15 +07:00
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
|
|
|
queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
|
2016-08-09 14:45:53 +07:00
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
2017-12-01 01:19:15 +07:00
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
|
|
|
queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
|
2016-08-09 14:45:53 +07:00
|
|
|
#endif
|
2017-01-27 22:08:20 +07:00
|
|
|
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
|
2016-08-09 14:45:53 +07:00
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
2017-06-29 13:12:51 +07:00
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
2017-01-27 22:08:20 +07:00
|
|
|
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
|
2016-08-09 14:45:53 +07:00
|
|
|
#endif
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
|
|
|
|
/* Enable interrupts */
|
|
|
|
queue_writel(queue, IER,
|
|
|
|
MACB_RX_INT_FLAGS |
|
|
|
|
MACB_TX_INT_FLAGS |
|
|
|
|
MACB_BIT(HRESP));
|
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
/* Enable TX and RX */
|
2018-08-23 14:45:22 +07:00
|
|
|
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* The hash address register is 64 bits long and takes up two
|
2007-07-13 00:07:25 +07:00
|
|
|
* locations in the memory map. The least significant bits are stored
|
|
|
|
* in EMAC_HSL and the most significant bits in EMAC_HSH.
|
|
|
|
*
|
|
|
|
* The unicast hash enable and the multicast hash enable bits in the
|
|
|
|
* network configuration register enable the reception of hash matched
|
|
|
|
* frames. The destination address is reduced to a 6 bit index into
|
|
|
|
* the 64 bit hash register using the following hash function. The
|
|
|
|
* hash function is an exclusive or of every sixth bit of the
|
|
|
|
* destination address.
|
|
|
|
*
|
|
|
|
* hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
|
|
|
|
* hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
|
|
|
|
* hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
|
|
|
|
* hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
|
|
|
|
* hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
|
|
|
|
* hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
|
|
|
|
*
|
|
|
|
* da[0] represents the least significant bit of the first byte
|
|
|
|
* received, that is, the multicast/unicast indicator, and da[47]
|
|
|
|
* represents the most significant bit of the last byte received. If
|
|
|
|
* the hash index, hi[n], points to a bit that is set in the hash
|
|
|
|
* register then the frame will be matched according to whether the
|
|
|
|
* frame is multicast or unicast. A multicast match will be signalled
|
|
|
|
* if the multicast hash enable bit is set, da[0] is 1 and the hash
|
|
|
|
* index points to a bit set in the hash register. A unicast match
|
|
|
|
* will be signalled if the unicast hash enable bit is set, da[0] is 0
|
|
|
|
* and the hash index points to a bit set in the hash register. To
|
|
|
|
* receive all multicast frames, the hash register should be set with
|
|
|
|
* all ones and the multicast hash enable bit should be set in the
|
|
|
|
* network configuration register.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline int hash_bit_value(int bitnr, __u8 *addr)
|
|
|
|
{
|
|
|
|
if (addr[bitnr / 8] & (1 << (bitnr % 8)))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Return the hash index value for the specified address. */
|
2007-07-13 00:07:25 +07:00
|
|
|
static int hash_get_index(__u8 *addr)
|
|
|
|
{
|
|
|
|
int i, j, bitval;
|
|
|
|
int hash_index = 0;
|
|
|
|
|
|
|
|
for (j = 0; j < 6; j++) {
|
|
|
|
for (i = 0, bitval = 0; i < 8; i++)
|
2015-01-16 04:55:19 +07:00
|
|
|
bitval ^= hash_bit_value(i * 6 + j, addr);
|
2007-07-13 00:07:25 +07:00
|
|
|
|
|
|
|
hash_index |= (bitval << j);
|
|
|
|
}
|
|
|
|
|
|
|
|
return hash_index;
|
|
|
|
}
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Add multicast addresses to the internal multicast-hash table. */
|
2007-07-13 00:07:25 +07:00
|
|
|
static void macb_sethashtable(struct net_device *dev)
|
|
|
|
{
|
2010-04-02 04:22:57 +07:00
|
|
|
struct netdev_hw_addr *ha;
|
2007-07-13 00:07:25 +07:00
|
|
|
unsigned long mc_filter[2];
|
2010-02-23 16:19:49 +07:00
|
|
|
unsigned int bitnr;
|
2007-07-13 00:07:25 +07:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
|
2016-03-30 09:11:13 +07:00
|
|
|
mc_filter[0] = 0;
|
|
|
|
mc_filter[1] = 0;
|
2007-07-13 00:07:25 +07:00
|
|
|
|
2010-04-02 04:22:57 +07:00
|
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
|
|
bitnr = hash_get_index(ha->addr);
|
2007-07-13 00:07:25 +07:00
|
|
|
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
|
|
|
|
}
|
|
|
|
|
2011-11-08 17:12:32 +07:00
|
|
|
macb_or_gem_writel(bp, HRB, mc_filter[0]);
|
|
|
|
macb_or_gem_writel(bp, HRT, mc_filter[1]);
|
2007-07-13 00:07:25 +07:00
|
|
|
}
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Enable/Disable promiscuous and multicast modes. */
|
2015-03-07 13:23:32 +07:00
|
|
|
static void macb_set_rx_mode(struct net_device *dev)
|
2007-07-13 00:07:25 +07:00
|
|
|
{
|
|
|
|
unsigned long cfg;
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
|
|
|
|
cfg = macb_readl(bp, NCFGR);
|
|
|
|
|
2014-07-24 18:51:01 +07:00
|
|
|
if (dev->flags & IFF_PROMISC) {
|
2007-07-13 00:07:25 +07:00
|
|
|
/* Enable promiscuous mode */
|
|
|
|
cfg |= MACB_BIT(CAF);
|
2014-07-24 18:51:01 +07:00
|
|
|
|
|
|
|
/* Disable RX checksum offload */
|
|
|
|
if (macb_is_gem(bp))
|
|
|
|
cfg &= ~GEM_BIT(RXCOEN);
|
|
|
|
} else {
|
|
|
|
/* Disable promiscuous mode */
|
2007-07-13 00:07:25 +07:00
|
|
|
cfg &= ~MACB_BIT(CAF);
|
|
|
|
|
2014-07-24 18:51:01 +07:00
|
|
|
/* Enable RX checksum offload only if requested */
|
|
|
|
if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
|
|
|
|
cfg |= GEM_BIT(RXCOEN);
|
|
|
|
}
|
|
|
|
|
2007-07-13 00:07:25 +07:00
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
|
|
/* Enable all multicast mode */
|
2011-11-08 17:12:32 +07:00
|
|
|
macb_or_gem_writel(bp, HRB, -1);
|
|
|
|
macb_or_gem_writel(bp, HRT, -1);
|
2007-07-13 00:07:25 +07:00
|
|
|
cfg |= MACB_BIT(NCFGR_MTI);
|
2010-02-08 11:30:35 +07:00
|
|
|
} else if (!netdev_mc_empty(dev)) {
|
2007-07-13 00:07:25 +07:00
|
|
|
/* Enable specific multicasts */
|
|
|
|
macb_sethashtable(dev);
|
|
|
|
cfg |= MACB_BIT(NCFGR_MTI);
|
|
|
|
} else if (dev->flags & (~IFF_ALLMULTI)) {
|
|
|
|
/* Disable all multicast mode */
|
2011-11-08 17:12:32 +07:00
|
|
|
macb_or_gem_writel(bp, HRB, 0);
|
|
|
|
macb_or_gem_writel(bp, HRT, 0);
|
2007-07-13 00:07:25 +07:00
|
|
|
cfg &= ~MACB_BIT(NCFGR_MTI);
|
|
|
|
}
|
|
|
|
|
|
|
|
macb_writel(bp, NCFGR, cfg);
|
|
|
|
}
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
static int macb_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2013-06-05 04:57:12 +07:00
|
|
|
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *queue;
|
|
|
|
unsigned int q;
|
2006-11-09 20:51:17 +07:00
|
|
|
int err;
|
|
|
|
|
2011-03-09 03:27:08 +07:00
|
|
|
netdev_dbg(bp->dev, "open\n");
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2012-07-04 06:14:13 +07:00
|
|
|
/* carrier starts down */
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
/* if the phy is not yet register, retry later*/
|
2016-06-22 05:32:35 +07:00
|
|
|
if (!dev->phydev)
|
2007-07-13 00:07:24 +07:00
|
|
|
return -EAGAIN;
|
2013-06-05 04:57:11 +07:00
|
|
|
|
|
|
|
/* RX buffers initialization */
|
2013-06-05 04:57:12 +07:00
|
|
|
macb_init_rx_buffer_size(bp, bufsz);
|
2007-07-13 00:07:24 +07:00
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
err = macb_alloc_consistent(bp);
|
|
|
|
if (err) {
|
2011-03-09 03:27:08 +07:00
|
|
|
netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
|
|
|
|
err);
|
2006-11-09 20:51:17 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-06-05 04:57:12 +07:00
|
|
|
bp->macbgem_ops.mog_init_rings(bp);
|
2006-11-09 20:51:17 +07:00
|
|
|
macb_init_hw(bp);
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
|
|
|
|
napi_enable(&queue->napi);
|
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
/* schedule a link state check */
|
2016-06-22 05:32:35 +07:00
|
|
|
phy_start(dev->phydev);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
netif_tx_start_all_queues(dev);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2017-01-19 22:56:15 +07:00
|
|
|
if (bp->ptp_info)
|
|
|
|
bp->ptp_info->ptp_init(dev);
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *queue;
|
2006-11-09 20:51:17 +07:00
|
|
|
unsigned long flags;
|
2017-12-01 01:19:15 +07:00
|
|
|
unsigned int q;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
netif_tx_stop_all_queues(dev);
|
2017-12-01 01:19:15 +07:00
|
|
|
|
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
|
|
|
|
napi_disable(&queue->napi);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2016-06-22 05:32:35 +07:00
|
|
|
if (dev->phydev)
|
|
|
|
phy_stop(dev->phydev);
|
2007-07-13 00:07:24 +07:00
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
|
|
|
macb_reset_hw(bp);
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
|
|
|
|
|
|
macb_free_consistent(bp);
|
|
|
|
|
2017-01-19 22:56:15 +07:00
|
|
|
if (bp->ptp_info)
|
|
|
|
bp->ptp_info->ptp_remove(dev);
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-06 23:57:18 +07:00
|
|
|
static int macb_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
|
{
|
|
|
|
if (netif_running(dev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
dev->mtu = new_mtu;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-03-09 23:26:35 +07:00
|
|
|
static void gem_update_stats(struct macb *bp)
|
|
|
|
{
|
2017-12-01 01:19:56 +07:00
|
|
|
struct macb_queue *queue;
|
|
|
|
unsigned int i, q, idx;
|
|
|
|
unsigned long *stat;
|
|
|
|
|
2011-03-09 23:26:35 +07:00
|
|
|
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
|
|
|
|
|
2015-01-14 05:15:51 +07:00
|
|
|
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
|
|
|
|
u32 offset = gem_statistics[i].offset;
|
2015-07-28 04:24:48 +07:00
|
|
|
u64 val = bp->macb_reg_readl(bp, offset);
|
2015-01-14 05:15:51 +07:00
|
|
|
|
|
|
|
bp->ethtool_stats[i] += val;
|
|
|
|
*p += val;
|
|
|
|
|
|
|
|
if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
|
|
|
|
/* Add GEM_OCTTXH, GEM_OCTRXH */
|
2015-07-28 04:24:48 +07:00
|
|
|
val = bp->macb_reg_readl(bp, offset + 4);
|
2015-01-16 04:55:19 +07:00
|
|
|
bp->ethtool_stats[i] += ((u64)val) << 32;
|
2015-01-14 05:15:51 +07:00
|
|
|
*(++p) += val;
|
|
|
|
}
|
|
|
|
}
|
2017-12-01 01:19:56 +07:00
|
|
|
|
|
|
|
idx = GEM_STATS_LEN;
|
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
|
|
|
|
for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
|
|
|
|
bp->ethtool_stats[idx++] = *stat;
|
2011-03-09 23:26:35 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct net_device_stats *gem_get_stats(struct macb *bp)
|
|
|
|
{
|
|
|
|
struct gem_stats *hwstat = &bp->hw_stats.gem;
|
2017-04-07 15:17:30 +07:00
|
|
|
struct net_device_stats *nstat = &bp->dev->stats;
|
2011-03-09 23:26:35 +07:00
|
|
|
|
|
|
|
gem_update_stats(bp);
|
|
|
|
|
|
|
|
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
|
|
|
|
hwstat->rx_alignment_errors +
|
|
|
|
hwstat->rx_resource_errors +
|
|
|
|
hwstat->rx_overruns +
|
|
|
|
hwstat->rx_oversize_frames +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersized_frames +
|
|
|
|
hwstat->rx_length_field_frame_errors);
|
|
|
|
nstat->tx_errors = (hwstat->tx_late_collisions +
|
|
|
|
hwstat->tx_excessive_collisions +
|
|
|
|
hwstat->tx_underrun +
|
|
|
|
hwstat->tx_carrier_sense_errors);
|
|
|
|
nstat->multicast = hwstat->rx_multicast_frames;
|
|
|
|
nstat->collisions = (hwstat->tx_single_collision_frames +
|
|
|
|
hwstat->tx_multiple_collision_frames +
|
|
|
|
hwstat->tx_excessive_collisions);
|
|
|
|
nstat->rx_length_errors = (hwstat->rx_oversize_frames +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersized_frames +
|
|
|
|
hwstat->rx_length_field_frame_errors);
|
|
|
|
nstat->rx_over_errors = hwstat->rx_resource_errors;
|
|
|
|
nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
|
|
|
|
nstat->rx_frame_errors = hwstat->rx_alignment_errors;
|
|
|
|
nstat->rx_fifo_errors = hwstat->rx_overruns;
|
|
|
|
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
|
|
|
|
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
|
|
|
|
nstat->tx_fifo_errors = hwstat->tx_underrun;
|
|
|
|
|
|
|
|
return nstat;
|
|
|
|
}
|
|
|
|
|
2015-01-14 05:15:51 +07:00
|
|
|
static void gem_get_ethtool_stats(struct net_device *dev,
|
|
|
|
struct ethtool_stats *stats, u64 *data)
|
|
|
|
{
|
|
|
|
struct macb *bp;
|
|
|
|
|
|
|
|
bp = netdev_priv(dev);
|
|
|
|
gem_update_stats(bp);
|
2017-12-01 01:19:56 +07:00
|
|
|
memcpy(data, &bp->ethtool_stats, sizeof(u64)
|
|
|
|
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
|
2015-01-14 05:15:51 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_get_sset_count(struct net_device *dev, int sset)
|
|
|
|
{
|
2017-12-01 01:19:56 +07:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
|
2015-01-14 05:15:51 +07:00
|
|
|
switch (sset) {
|
|
|
|
case ETH_SS_STATS:
|
2017-12-01 01:19:56 +07:00
|
|
|
return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
|
2015-01-14 05:15:51 +07:00
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
|
|
|
|
{
|
2017-12-01 01:19:56 +07:00
|
|
|
char stat_string[ETH_GSTRING_LEN];
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
struct macb_queue *queue;
|
2015-07-25 01:24:02 +07:00
|
|
|
unsigned int i;
|
2017-12-01 01:19:56 +07:00
|
|
|
unsigned int q;
|
2015-01-14 05:15:51 +07:00
|
|
|
|
|
|
|
switch (sset) {
|
|
|
|
case ETH_SS_STATS:
|
|
|
|
for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
|
|
|
|
memcpy(p, gem_statistics[i].stat_string,
|
|
|
|
ETH_GSTRING_LEN);
|
2017-12-01 01:19:56 +07:00
|
|
|
|
|
|
|
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
|
|
|
for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
|
|
|
|
snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
|
|
|
|
q, queue_statistics[i].stat_string);
|
|
|
|
memcpy(p, stat_string, ETH_GSTRING_LEN);
|
|
|
|
}
|
|
|
|
}
|
2015-01-14 05:15:51 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
static struct net_device_stats *macb_get_stats(struct net_device *dev)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2017-04-07 15:17:30 +07:00
|
|
|
struct net_device_stats *nstat = &bp->dev->stats;
|
2011-03-09 23:26:35 +07:00
|
|
|
struct macb_stats *hwstat = &bp->hw_stats.macb;
|
|
|
|
|
|
|
|
if (macb_is_gem(bp))
|
|
|
|
return gem_get_stats(bp);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
/* read stats from hardware */
|
|
|
|
macb_update_stats(bp);
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
/* Convert HW stats into netdevice stats */
|
|
|
|
nstat->rx_errors = (hwstat->rx_fcs_errors +
|
|
|
|
hwstat->rx_align_errors +
|
|
|
|
hwstat->rx_resource_errors +
|
|
|
|
hwstat->rx_overruns +
|
|
|
|
hwstat->rx_oversize_pkts +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersize_pkts +
|
|
|
|
hwstat->rx_length_mismatch);
|
|
|
|
nstat->tx_errors = (hwstat->tx_late_cols +
|
|
|
|
hwstat->tx_excessive_cols +
|
|
|
|
hwstat->tx_underruns +
|
2015-04-10 16:42:56 +07:00
|
|
|
hwstat->tx_carrier_errors +
|
|
|
|
hwstat->sqe_test_errors);
|
2006-11-09 20:51:17 +07:00
|
|
|
nstat->collisions = (hwstat->tx_single_cols +
|
|
|
|
hwstat->tx_multiple_cols +
|
|
|
|
hwstat->tx_excessive_cols);
|
|
|
|
nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersize_pkts +
|
|
|
|
hwstat->rx_length_mismatch);
|
2011-04-13 12:03:24 +07:00
|
|
|
nstat->rx_over_errors = hwstat->rx_resource_errors +
|
|
|
|
hwstat->rx_overruns;
|
2006-11-09 20:51:17 +07:00
|
|
|
nstat->rx_crc_errors = hwstat->rx_fcs_errors;
|
|
|
|
nstat->rx_frame_errors = hwstat->rx_align_errors;
|
|
|
|
nstat->rx_fifo_errors = hwstat->rx_overruns;
|
|
|
|
/* XXX: What does "missed" mean? */
|
|
|
|
nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
|
|
|
|
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
|
|
|
|
nstat->tx_fifo_errors = hwstat->tx_underruns;
|
|
|
|
/* Don't know about heartbeat or window errors... */
|
|
|
|
|
|
|
|
return nstat;
|
|
|
|
}
|
|
|
|
|
2012-10-31 13:04:56 +07:00
|
|
|
static int macb_get_regs_len(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
return MACB_GREGS_NBR * sizeof(u32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
|
|
|
void *p)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
unsigned int tail, head;
|
|
|
|
u32 *regs_buff = p;
|
|
|
|
|
|
|
|
regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
|
|
|
|
| MACB_GREGS_VERSION;
|
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
|
|
|
|
head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
|
2012-10-31 13:04:56 +07:00
|
|
|
|
|
|
|
regs_buff[0] = macb_readl(bp, NCR);
|
|
|
|
regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
|
|
|
|
regs_buff[2] = macb_readl(bp, NSR);
|
|
|
|
regs_buff[3] = macb_readl(bp, TSR);
|
|
|
|
regs_buff[4] = macb_readl(bp, RBQP);
|
|
|
|
regs_buff[5] = macb_readl(bp, TBQP);
|
|
|
|
regs_buff[6] = macb_readl(bp, RSR);
|
|
|
|
regs_buff[7] = macb_readl(bp, IMR);
|
|
|
|
|
|
|
|
regs_buff[8] = tail;
|
|
|
|
regs_buff[9] = head;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
|
|
|
|
regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
|
2012-10-31 13:04:56 +07:00
|
|
|
|
2016-01-05 20:39:16 +07:00
|
|
|
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
|
|
|
|
regs_buff[12] = macb_or_gem_readl(bp, USRIO);
|
2016-03-30 09:11:12 +07:00
|
|
|
if (macb_is_gem(bp))
|
2012-10-31 13:04:56 +07:00
|
|
|
regs_buff[13] = gem_readl(bp, DMACFG);
|
|
|
|
}
|
|
|
|
|
2016-02-09 21:07:16 +07:00
|
|
|
static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
|
|
|
|
wol->supported = 0;
|
|
|
|
wol->wolopts = 0;
|
|
|
|
|
|
|
|
if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
|
|
|
|
wol->supported = WAKE_MAGIC;
|
|
|
|
|
|
|
|
if (bp->wol & MACB_WOL_ENABLED)
|
|
|
|
wol->wolopts |= WAKE_MAGIC;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
|
|
|
|
if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
|
|
|
|
(wol->wolopts & ~WAKE_MAGIC))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (wol->wolopts & WAKE_MAGIC)
|
|
|
|
bp->wol |= MACB_WOL_ENABLED;
|
|
|
|
else
|
|
|
|
bp->wol &= ~MACB_WOL_ENABLED;
|
|
|
|
|
|
|
|
device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-19 21:56:58 +07:00
|
|
|
static void macb_get_ringparam(struct net_device *netdev,
|
|
|
|
struct ethtool_ringparam *ring)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
|
|
|
|
ring->rx_max_pending = MAX_RX_RING_SIZE;
|
|
|
|
ring->tx_max_pending = MAX_TX_RING_SIZE;
|
|
|
|
|
|
|
|
ring->rx_pending = bp->rx_ring_size;
|
|
|
|
ring->tx_pending = bp->tx_ring_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_set_ringparam(struct net_device *netdev,
|
|
|
|
struct ethtool_ringparam *ring)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
u32 new_rx_size, new_tx_size;
|
|
|
|
unsigned int reset = 0;
|
|
|
|
|
|
|
|
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
new_rx_size = clamp_t(u32, ring->rx_pending,
|
|
|
|
MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
|
|
|
|
new_rx_size = roundup_pow_of_two(new_rx_size);
|
|
|
|
|
|
|
|
new_tx_size = clamp_t(u32, ring->tx_pending,
|
|
|
|
MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
|
|
|
|
new_tx_size = roundup_pow_of_two(new_tx_size);
|
|
|
|
|
|
|
|
if ((new_tx_size == bp->tx_ring_size) &&
|
|
|
|
(new_rx_size == bp->rx_ring_size)) {
|
|
|
|
/* nothing to do */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (netif_running(bp->dev)) {
|
|
|
|
reset = 1;
|
|
|
|
macb_close(bp->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
bp->rx_ring_size = new_rx_size;
|
|
|
|
bp->tx_ring_size = new_tx_size;
|
|
|
|
|
|
|
|
if (reset)
|
|
|
|
macb_open(bp->dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-29 13:14:16 +07:00
|
|
|
#ifdef CONFIG_MACB_USE_HWSTAMP
|
|
|
|
static unsigned int gem_get_tsu_rate(struct macb *bp)
|
|
|
|
{
|
|
|
|
struct clk *tsu_clk;
|
|
|
|
unsigned int tsu_rate;
|
|
|
|
|
|
|
|
tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
|
|
|
|
if (!IS_ERR(tsu_clk))
|
|
|
|
tsu_rate = clk_get_rate(tsu_clk);
|
|
|
|
/* try pclk instead */
|
|
|
|
else if (!IS_ERR(bp->pclk)) {
|
|
|
|
tsu_clk = bp->pclk;
|
|
|
|
tsu_rate = clk_get_rate(tsu_clk);
|
|
|
|
} else
|
|
|
|
return -ENOTSUPP;
|
|
|
|
return tsu_rate;
|
|
|
|
}
|
|
|
|
|
|
|
|
static s32 gem_get_ptp_max_adj(void)
|
|
|
|
{
|
|
|
|
return 64000000;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_get_ts_info(struct net_device *dev,
|
|
|
|
struct ethtool_ts_info *info)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
|
|
|
|
if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
|
|
|
|
ethtool_op_get_ts_info(dev, info);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->so_timestamping =
|
|
|
|
SOF_TIMESTAMPING_TX_SOFTWARE |
|
|
|
|
SOF_TIMESTAMPING_RX_SOFTWARE |
|
|
|
|
SOF_TIMESTAMPING_SOFTWARE |
|
|
|
|
SOF_TIMESTAMPING_TX_HARDWARE |
|
|
|
|
SOF_TIMESTAMPING_RX_HARDWARE |
|
|
|
|
SOF_TIMESTAMPING_RAW_HARDWARE;
|
|
|
|
info->tx_types =
|
|
|
|
(1 << HWTSTAMP_TX_ONESTEP_SYNC) |
|
|
|
|
(1 << HWTSTAMP_TX_OFF) |
|
|
|
|
(1 << HWTSTAMP_TX_ON);
|
|
|
|
info->rx_filters =
|
|
|
|
(1 << HWTSTAMP_FILTER_NONE) |
|
|
|
|
(1 << HWTSTAMP_FILTER_ALL);
|
|
|
|
|
|
|
|
info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macb_ptp_info gem_ptp_info = {
|
|
|
|
.ptp_init = gem_ptp_init,
|
|
|
|
.ptp_remove = gem_ptp_remove,
|
|
|
|
.get_ptp_max_adj = gem_get_ptp_max_adj,
|
|
|
|
.get_tsu_rate = gem_get_tsu_rate,
|
|
|
|
.get_ts_info = gem_get_ts_info,
|
|
|
|
.get_hwtst = gem_get_hwtst,
|
|
|
|
.set_hwtst = gem_set_hwtst,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2017-01-19 22:56:15 +07:00
|
|
|
static int macb_get_ts_info(struct net_device *netdev,
|
|
|
|
struct ethtool_ts_info *info)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
|
|
|
|
if (bp->ptp_info)
|
|
|
|
return bp->ptp_info->get_ts_info(netdev, info);
|
|
|
|
|
|
|
|
return ethtool_op_get_ts_info(netdev, info);
|
|
|
|
}
|
|
|
|
|
2017-12-01 01:20:44 +07:00
|
|
|
static void gem_enable_flow_filters(struct macb *bp, bool enable)
|
|
|
|
{
|
|
|
|
struct ethtool_rx_fs_item *item;
|
|
|
|
u32 t2_scr;
|
|
|
|
int num_t2_scr;
|
|
|
|
|
|
|
|
num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
|
|
|
|
|
|
|
|
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
|
|
|
|
struct ethtool_rx_flow_spec *fs = &item->fs;
|
|
|
|
struct ethtool_tcpip4_spec *tp4sp_m;
|
|
|
|
|
|
|
|
if (fs->location >= num_t2_scr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
t2_scr = gem_readl_n(bp, SCRT2, fs->location);
|
|
|
|
|
|
|
|
/* enable/disable screener regs for the flow entry */
|
|
|
|
t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
|
|
|
|
|
|
|
|
/* only enable fields with no masking */
|
|
|
|
tp4sp_m = &(fs->m_u.tcp_ip4_spec);
|
|
|
|
|
|
|
|
if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
|
|
|
|
t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
|
|
|
|
else
|
|
|
|
t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
|
|
|
|
|
|
|
|
if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
|
|
|
|
t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
|
|
|
|
else
|
|
|
|
t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
|
|
|
|
|
|
|
|
if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
|
|
|
|
t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
|
|
|
|
else
|
|
|
|
t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
|
|
|
|
|
|
|
|
gem_writel_n(bp, SCRT2, fs->location, t2_scr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
|
|
|
|
{
|
|
|
|
struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
|
|
|
|
uint16_t index = fs->location;
|
|
|
|
u32 w0, w1, t2_scr;
|
|
|
|
bool cmp_a = false;
|
|
|
|
bool cmp_b = false;
|
|
|
|
bool cmp_c = false;
|
|
|
|
|
|
|
|
tp4sp_v = &(fs->h_u.tcp_ip4_spec);
|
|
|
|
tp4sp_m = &(fs->m_u.tcp_ip4_spec);
|
|
|
|
|
|
|
|
/* ignore field if any masking set */
|
|
|
|
if (tp4sp_m->ip4src == 0xFFFFFFFF) {
|
|
|
|
/* 1st compare reg - IP source address */
|
|
|
|
w0 = 0;
|
|
|
|
w1 = 0;
|
|
|
|
w0 = tp4sp_v->ip4src;
|
|
|
|
w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
|
|
|
|
w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
|
|
|
|
w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
|
|
|
|
gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
|
|
|
|
gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
|
|
|
|
cmp_a = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ignore field if any masking set */
|
|
|
|
if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
|
|
|
|
/* 2nd compare reg - IP destination address */
|
|
|
|
w0 = 0;
|
|
|
|
w1 = 0;
|
|
|
|
w0 = tp4sp_v->ip4dst;
|
|
|
|
w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
|
|
|
|
w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
|
|
|
|
w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
|
|
|
|
gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
|
|
|
|
gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
|
|
|
|
cmp_b = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ignore both port fields if masking set in both */
|
|
|
|
if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
|
|
|
|
/* 3rd compare reg - source port, destination port */
|
|
|
|
w0 = 0;
|
|
|
|
w1 = 0;
|
|
|
|
w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
|
|
|
|
if (tp4sp_m->psrc == tp4sp_m->pdst) {
|
|
|
|
w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
|
|
|
|
w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
|
|
|
|
w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
|
|
|
|
w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
|
|
|
|
} else {
|
|
|
|
/* only one port definition */
|
|
|
|
w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
|
|
|
|
w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
|
|
|
|
if (tp4sp_m->psrc == 0xFFFF) { /* src port */
|
|
|
|
w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
|
|
|
|
w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
|
|
|
|
} else { /* dst port */
|
|
|
|
w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
|
|
|
|
w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
|
|
|
|
gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
|
|
|
|
cmp_c = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
t2_scr = 0;
|
|
|
|
t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
|
|
|
|
t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
|
|
|
|
if (cmp_a)
|
|
|
|
t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
|
|
|
|
if (cmp_b)
|
|
|
|
t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
|
|
|
|
if (cmp_c)
|
|
|
|
t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
|
|
|
|
gem_writel_n(bp, SCRT2, index, t2_scr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_add_flow_filter(struct net_device *netdev,
|
|
|
|
struct ethtool_rxnfc *cmd)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
struct ethtool_rx_flow_spec *fs = &cmd->fs;
|
|
|
|
struct ethtool_rx_fs_item *item, *newfs;
|
2017-12-06 07:02:49 +07:00
|
|
|
unsigned long flags;
|
2017-12-01 01:20:44 +07:00
|
|
|
int ret = -EINVAL;
|
|
|
|
bool added = false;
|
|
|
|
|
2017-12-06 07:02:50 +07:00
|
|
|
newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
|
2017-12-01 01:20:44 +07:00
|
|
|
if (newfs == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
memcpy(&newfs->fs, fs, sizeof(newfs->fs));
|
|
|
|
|
|
|
|
netdev_dbg(netdev,
|
|
|
|
"Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
|
|
|
|
fs->flow_type, (int)fs->ring_cookie, fs->location,
|
|
|
|
htonl(fs->h_u.tcp_ip4_spec.ip4src),
|
|
|
|
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
|
|
|
|
htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
|
|
|
|
|
2017-12-06 07:02:49 +07:00
|
|
|
spin_lock_irqsave(&bp->rx_fs_lock, flags);
|
|
|
|
|
2017-12-01 01:20:44 +07:00
|
|
|
/* find correct place to add in list */
|
2017-12-06 07:02:48 +07:00
|
|
|
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
|
|
|
|
if (item->fs.location > newfs->fs.location) {
|
|
|
|
list_add_tail(&newfs->list, &item->list);
|
|
|
|
added = true;
|
|
|
|
break;
|
|
|
|
} else if (item->fs.location == fs->location) {
|
|
|
|
netdev_err(netdev, "Rule not added: location %d not free!\n",
|
|
|
|
fs->location);
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto err;
|
2017-12-01 01:20:44 +07:00
|
|
|
}
|
|
|
|
}
|
2017-12-06 07:02:48 +07:00
|
|
|
if (!added)
|
|
|
|
list_add_tail(&newfs->list, &bp->rx_fs_list.list);
|
2017-12-01 01:20:44 +07:00
|
|
|
|
|
|
|
gem_prog_cmp_regs(bp, fs);
|
|
|
|
bp->rx_fs_list.count++;
|
|
|
|
/* enable filtering if NTUPLE on */
|
|
|
|
if (netdev->features & NETIF_F_NTUPLE)
|
|
|
|
gem_enable_flow_filters(bp, 1);
|
|
|
|
|
2017-12-06 07:02:49 +07:00
|
|
|
spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
|
2017-12-01 01:20:44 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2017-12-06 07:02:49 +07:00
|
|
|
spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
|
2017-12-01 01:20:44 +07:00
|
|
|
kfree(newfs);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_del_flow_filter(struct net_device *netdev,
|
|
|
|
struct ethtool_rxnfc *cmd)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
struct ethtool_rx_fs_item *item;
|
|
|
|
struct ethtool_rx_flow_spec *fs;
|
2017-12-06 07:02:49 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&bp->rx_fs_lock, flags);
|
2017-12-01 01:20:44 +07:00
|
|
|
|
|
|
|
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
|
|
|
|
if (item->fs.location == cmd->fs.location) {
|
|
|
|
/* disable screener regs for the flow entry */
|
|
|
|
fs = &(item->fs);
|
|
|
|
netdev_dbg(netdev,
|
|
|
|
"Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
|
|
|
|
fs->flow_type, (int)fs->ring_cookie, fs->location,
|
|
|
|
htonl(fs->h_u.tcp_ip4_spec.ip4src),
|
|
|
|
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
|
|
|
|
htons(fs->h_u.tcp_ip4_spec.psrc),
|
|
|
|
htons(fs->h_u.tcp_ip4_spec.pdst));
|
|
|
|
|
|
|
|
gem_writel_n(bp, SCRT2, fs->location, 0);
|
|
|
|
|
|
|
|
list_del(&item->list);
|
|
|
|
bp->rx_fs_list.count--;
|
2017-12-06 07:02:49 +07:00
|
|
|
spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
|
|
|
|
kfree(item);
|
2017-12-01 01:20:44 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-06 07:02:49 +07:00
|
|
|
spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
|
2017-12-01 01:20:44 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_get_flow_entry(struct net_device *netdev,
|
|
|
|
struct ethtool_rxnfc *cmd)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
struct ethtool_rx_fs_item *item;
|
|
|
|
|
|
|
|
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
|
|
|
|
if (item->fs.location == cmd->fs.location) {
|
|
|
|
memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_get_all_flow_entries(struct net_device *netdev,
|
|
|
|
struct ethtool_rxnfc *cmd, u32 *rule_locs)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
struct ethtool_rx_fs_item *item;
|
|
|
|
uint32_t cnt = 0;
|
|
|
|
|
|
|
|
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
|
|
|
|
if (cnt == cmd->rule_cnt)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
rule_locs[cnt] = item->fs.location;
|
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
cmd->data = bp->max_tuples;
|
|
|
|
cmd->rule_cnt = cnt;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
|
|
|
u32 *rule_locs)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
switch (cmd->cmd) {
|
|
|
|
case ETHTOOL_GRXRINGS:
|
|
|
|
cmd->data = bp->num_queues;
|
|
|
|
break;
|
|
|
|
case ETHTOOL_GRXCLSRLCNT:
|
|
|
|
cmd->rule_cnt = bp->rx_fs_list.count;
|
|
|
|
break;
|
|
|
|
case ETHTOOL_GRXCLSRULE:
|
|
|
|
ret = gem_get_flow_entry(netdev, cmd);
|
|
|
|
break;
|
|
|
|
case ETHTOOL_GRXCLSRLALL:
|
|
|
|
ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
netdev_err(netdev,
|
|
|
|
"Command parameter %d is not supported\n", cmd->cmd);
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (cmd->cmd) {
|
|
|
|
case ETHTOOL_SRXCLSRLINS:
|
|
|
|
if ((cmd->fs.location >= bp->max_tuples)
|
|
|
|
|| (cmd->fs.ring_cookie >= bp->num_queues)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ret = gem_add_flow_filter(netdev, cmd);
|
|
|
|
break;
|
|
|
|
case ETHTOOL_SRXCLSRLDEL:
|
|
|
|
ret = gem_del_flow_filter(netdev, cmd);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
netdev_err(netdev,
|
|
|
|
"Command parameter %d is not supported\n", cmd->cmd);
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
static const struct ethtool_ops macb_ethtool_ops = {
|
2012-10-31 13:04:56 +07:00
|
|
|
.get_regs_len = macb_get_regs_len,
|
|
|
|
.get_regs = macb_get_regs,
|
2006-11-09 20:51:17 +07:00
|
|
|
.get_link = ethtool_op_get_link,
|
2012-04-04 05:59:31 +07:00
|
|
|
.get_ts_info = ethtool_op_get_ts_info,
|
2016-02-09 21:07:16 +07:00
|
|
|
.get_wol = macb_get_wol,
|
|
|
|
.set_wol = macb_set_wol,
|
2016-06-22 05:32:36 +07:00
|
|
|
.get_link_ksettings = phy_ethtool_get_link_ksettings,
|
|
|
|
.set_link_ksettings = phy_ethtool_set_link_ksettings,
|
2016-10-19 21:56:58 +07:00
|
|
|
.get_ringparam = macb_get_ringparam,
|
|
|
|
.set_ringparam = macb_set_ringparam,
|
2015-01-16 04:55:20 +07:00
|
|
|
};
|
|
|
|
|
2015-02-05 23:21:07 +07:00
|
|
|
static const struct ethtool_ops gem_ethtool_ops = {
|
2015-01-16 04:55:20 +07:00
|
|
|
.get_regs_len = macb_get_regs_len,
|
|
|
|
.get_regs = macb_get_regs,
|
|
|
|
.get_link = ethtool_op_get_link,
|
2017-01-19 22:56:15 +07:00
|
|
|
.get_ts_info = macb_get_ts_info,
|
2015-01-14 05:15:51 +07:00
|
|
|
.get_ethtool_stats = gem_get_ethtool_stats,
|
|
|
|
.get_strings = gem_get_ethtool_strings,
|
|
|
|
.get_sset_count = gem_get_sset_count,
|
2016-06-22 05:32:36 +07:00
|
|
|
.get_link_ksettings = phy_ethtool_get_link_ksettings,
|
|
|
|
.set_link_ksettings = phy_ethtool_set_link_ksettings,
|
2016-10-19 21:56:58 +07:00
|
|
|
.get_ringparam = macb_get_ringparam,
|
|
|
|
.set_ringparam = macb_set_ringparam,
|
2017-12-01 01:20:44 +07:00
|
|
|
.get_rxnfc = gem_get_rxnfc,
|
|
|
|
.set_rxnfc = gem_set_rxnfc,
|
2006-11-09 20:51:17 +07:00
|
|
|
};
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2016-06-22 05:32:35 +07:00
|
|
|
struct phy_device *phydev = dev->phydev;
|
2017-01-19 22:56:15 +07:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
if (!netif_running(dev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-07-13 00:07:24 +07:00
|
|
|
if (!phydev)
|
|
|
|
return -ENODEV;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2017-01-19 22:56:15 +07:00
|
|
|
if (!bp->ptp_info)
|
|
|
|
return phy_mii_ioctl(phydev, rq, cmd);
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCSHWTSTAMP:
|
|
|
|
return bp->ptp_info->set_hwtst(dev, rq, cmd);
|
|
|
|
case SIOCGHWTSTAMP:
|
|
|
|
return bp->ptp_info->get_hwtst(dev, rq);
|
|
|
|
default:
|
|
|
|
return phy_mii_ioctl(phydev, rq, cmd);
|
|
|
|
}
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2014-07-24 18:51:00 +07:00
|
|
|
static int macb_set_features(struct net_device *netdev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
netdev_features_t changed = features ^ netdev->features;
|
|
|
|
|
|
|
|
/* TX checksum offload */
|
|
|
|
if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
|
|
|
|
u32 dmacfg;
|
|
|
|
|
|
|
|
dmacfg = gem_readl(bp, DMACFG);
|
|
|
|
if (features & NETIF_F_HW_CSUM)
|
|
|
|
dmacfg |= GEM_BIT(TXCOEN);
|
|
|
|
else
|
|
|
|
dmacfg &= ~GEM_BIT(TXCOEN);
|
|
|
|
gem_writel(bp, DMACFG, dmacfg);
|
|
|
|
}
|
|
|
|
|
2014-07-24 18:51:01 +07:00
|
|
|
/* RX checksum offload */
|
|
|
|
if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
|
|
|
|
u32 netcfg;
|
|
|
|
|
|
|
|
netcfg = gem_readl(bp, NCFGR);
|
|
|
|
if (features & NETIF_F_RXCSUM &&
|
|
|
|
!(netdev->flags & IFF_PROMISC))
|
|
|
|
netcfg |= GEM_BIT(RXCOEN);
|
|
|
|
else
|
|
|
|
netcfg &= ~GEM_BIT(RXCOEN);
|
|
|
|
gem_writel(bp, NCFGR, netcfg);
|
|
|
|
}
|
|
|
|
|
2017-12-01 01:20:44 +07:00
|
|
|
/* RX Flow Filters */
|
|
|
|
if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
|
|
|
|
bool turn_on = features & NETIF_F_NTUPLE;
|
|
|
|
|
|
|
|
gem_enable_flow_filters(bp, turn_on);
|
|
|
|
}
|
2014-07-24 18:51:00 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-11 14:42:26 +07:00
|
|
|
static const struct net_device_ops macb_netdev_ops = {
|
|
|
|
.ndo_open = macb_open,
|
|
|
|
.ndo_stop = macb_close,
|
|
|
|
.ndo_start_xmit = macb_start_xmit,
|
2011-08-16 13:29:01 +07:00
|
|
|
.ndo_set_rx_mode = macb_set_rx_mode,
|
2009-04-11 14:42:26 +07:00
|
|
|
.ndo_get_stats = macb_get_stats,
|
|
|
|
.ndo_do_ioctl = macb_ioctl,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
2015-05-06 23:57:18 +07:00
|
|
|
.ndo_change_mtu = macb_change_mtu,
|
2009-04-11 14:42:26 +07:00
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
2009-05-05 01:08:41 +07:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = macb_poll_controller,
|
|
|
|
#endif
|
2014-07-24 18:51:00 +07:00
|
|
|
.ndo_set_features = macb_set_features,
|
2016-11-16 17:02:34 +07:00
|
|
|
.ndo_features_check = macb_features_check,
|
2009-04-11 14:42:26 +07:00
|
|
|
};
|
|
|
|
|
2016-03-30 09:11:12 +07:00
|
|
|
/* Configure peripheral capabilities according to device tree
|
2014-07-24 18:50:58 +07:00
|
|
|
* and integration options used
|
|
|
|
*/
|
2016-03-30 09:11:12 +07:00
|
|
|
static void macb_configure_caps(struct macb *bp,
|
|
|
|
const struct macb_config *dt_conf)
|
2014-07-24 18:50:58 +07:00
|
|
|
{
|
|
|
|
u32 dcfg;
|
|
|
|
|
2015-03-31 20:02:01 +07:00
|
|
|
if (dt_conf)
|
|
|
|
bp->caps = dt_conf->caps;
|
|
|
|
|
2015-07-25 01:23:59 +07:00
|
|
|
if (hw_is_gem(bp->regs, bp->native_io)) {
|
2014-07-24 18:50:58 +07:00
|
|
|
bp->caps |= MACB_CAPS_MACB_IS_GEM;
|
|
|
|
|
|
|
|
dcfg = gem_readl(bp, DCFG1);
|
|
|
|
if (GEM_BFEXT(IRQCOR, dcfg) == 0)
|
|
|
|
bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
|
|
|
|
dcfg = gem_readl(bp, DCFG2);
|
|
|
|
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
|
|
|
|
bp->caps |= MACB_CAPS_FIFO_MODE;
|
2017-06-29 13:14:16 +07:00
|
|
|
#ifdef CONFIG_MACB_USE_HWSTAMP
|
|
|
|
if (gem_has_ptp(bp)) {
|
2017-06-29 13:12:51 +07:00
|
|
|
if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
|
|
|
|
pr_err("GEM doesn't support hardware ptp.\n");
|
2017-06-29 13:14:16 +07:00
|
|
|
else {
|
2017-06-29 13:12:51 +07:00
|
|
|
bp->hw_dma_cap |= HW_DMA_CAP_PTP;
|
2017-06-29 13:14:16 +07:00
|
|
|
bp->ptp_info = &gem_ptp_info;
|
|
|
|
}
|
2017-06-29 13:12:51 +07:00
|
|
|
}
|
2017-06-29 13:14:16 +07:00
|
|
|
#endif
|
2014-07-24 18:50:58 +07:00
|
|
|
}
|
|
|
|
|
2015-07-25 01:24:01 +07:00
|
|
|
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
|
2014-07-24 18:50:58 +07:00
|
|
|
}
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
static void macb_probe_queues(void __iomem *mem,
|
2015-07-25 01:23:59 +07:00
|
|
|
bool native_io,
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
unsigned int *queue_mask,
|
|
|
|
unsigned int *num_queues)
|
|
|
|
{
|
|
|
|
unsigned int hw_q;
|
|
|
|
|
|
|
|
*queue_mask = 0x1;
|
|
|
|
*num_queues = 1;
|
|
|
|
|
2015-03-31 20:02:00 +07:00
|
|
|
/* is it macb or gem ?
|
|
|
|
*
|
|
|
|
* We need to read directly from the hardware here because
|
|
|
|
* we are early in the probe process and don't have the
|
|
|
|
* MACB_CAPS_MACB_IS_GEM flag positioned
|
|
|
|
*/
|
2015-07-25 01:23:59 +07:00
|
|
|
if (!hw_is_gem(mem, native_io))
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* bit 0 is never set but queue 0 always exists */
|
2015-02-18 18:29:35 +07:00
|
|
|
*queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
*queue_mask |= 0x1;
|
|
|
|
|
|
|
|
for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
|
|
|
|
if (*queue_mask & (1 << hw_q))
|
|
|
|
(*num_queues)++;
|
|
|
|
}
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
|
2016-08-16 11:44:50 +07:00
|
|
|
struct clk **hclk, struct clk **tx_clk,
|
|
|
|
struct clk **rx_clk)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
2016-12-14 13:39:15 +07:00
|
|
|
struct macb_platform_data *pdata;
|
2015-03-07 13:23:32 +07:00
|
|
|
int err;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2016-12-14 13:39:15 +07:00
|
|
|
pdata = dev_get_platdata(&pdev->dev);
|
|
|
|
if (pdata) {
|
|
|
|
*pclk = pdata->pclk;
|
|
|
|
*hclk = pdata->hclk;
|
|
|
|
} else {
|
|
|
|
*pclk = devm_clk_get(&pdev->dev, "pclk");
|
|
|
|
*hclk = devm_clk_get(&pdev->dev, "hclk");
|
|
|
|
}
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
if (IS_ERR(*pclk)) {
|
|
|
|
err = PTR_ERR(*pclk);
|
2013-12-11 07:07:20 +07:00
|
|
|
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
|
2015-03-07 13:23:32 +07:00
|
|
|
return err;
|
2007-02-07 22:40:44 +07:00
|
|
|
}
|
2011-03-09 03:19:23 +07:00
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
if (IS_ERR(*hclk)) {
|
|
|
|
err = PTR_ERR(*hclk);
|
2013-12-11 07:07:20 +07:00
|
|
|
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
|
2015-03-07 13:23:32 +07:00
|
|
|
return err;
|
2013-12-11 07:07:20 +07:00
|
|
|
}
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
*tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
|
|
|
|
if (IS_ERR(*tx_clk))
|
|
|
|
*tx_clk = NULL;
|
2013-12-11 07:07:23 +07:00
|
|
|
|
2016-08-16 11:44:50 +07:00
|
|
|
*rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
|
|
|
|
if (IS_ERR(*rx_clk))
|
|
|
|
*rx_clk = NULL;
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
err = clk_prepare_enable(*pclk);
|
2013-12-11 07:07:20 +07:00
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
|
2015-03-07 13:23:32 +07:00
|
|
|
return err;
|
2013-12-11 07:07:20 +07:00
|
|
|
}
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
err = clk_prepare_enable(*hclk);
|
2013-12-11 07:07:20 +07:00
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
|
2015-03-07 13:23:32 +07:00
|
|
|
goto err_disable_pclk;
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
err = clk_prepare_enable(*tx_clk);
|
2015-03-07 13:23:31 +07:00
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
|
2015-03-07 13:23:32 +07:00
|
|
|
goto err_disable_hclk;
|
2013-12-11 07:07:23 +07:00
|
|
|
}
|
|
|
|
|
2016-08-16 11:44:50 +07:00
|
|
|
err = clk_prepare_enable(*rx_clk);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
|
|
|
|
goto err_disable_txclk;
|
|
|
|
}
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
return 0;
|
|
|
|
|
2016-08-16 11:44:50 +07:00
|
|
|
err_disable_txclk:
|
|
|
|
clk_disable_unprepare(*tx_clk);
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
err_disable_hclk:
|
|
|
|
clk_disable_unprepare(*hclk);
|
|
|
|
|
|
|
|
err_disable_pclk:
|
|
|
|
clk_disable_unprepare(*pclk);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_init(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct net_device *dev = platform_get_drvdata(pdev);
|
|
|
|
unsigned int hw_q, q;
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
struct macb_queue *queue;
|
|
|
|
int err;
|
2017-12-01 01:20:44 +07:00
|
|
|
u32 val, reg;
|
2015-03-31 20:02:03 +07:00
|
|
|
|
2016-10-19 21:56:57 +07:00
|
|
|
bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
|
|
|
|
bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
|
|
|
|
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
/* set the queue register mapping once for all: queue0 has a special
|
|
|
|
* register mapping but we don't want to test the queue index then
|
|
|
|
* compute the corresponding register offset at run time.
|
|
|
|
*/
|
2014-12-15 21:13:32 +07:00
|
|
|
for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
|
2015-03-31 20:01:59 +07:00
|
|
|
if (!(bp->queue_mask & (1 << hw_q)))
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
continue;
|
|
|
|
|
2014-12-15 21:13:32 +07:00
|
|
|
queue = &bp->queues[q];
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
queue->bp = bp;
|
2017-12-01 01:19:15 +07:00
|
|
|
netif_napi_add(dev, &queue->napi, macb_poll, 64);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
if (hw_q) {
|
|
|
|
queue->ISR = GEM_ISR(hw_q - 1);
|
|
|
|
queue->IER = GEM_IER(hw_q - 1);
|
|
|
|
queue->IDR = GEM_IDR(hw_q - 1);
|
|
|
|
queue->IMR = GEM_IMR(hw_q - 1);
|
|
|
|
queue->TBQP = GEM_TBQP(hw_q - 1);
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->RBQP = GEM_RBQP(hw_q - 1);
|
|
|
|
queue->RBQS = GEM_RBQS(hw_q - 1);
|
2016-08-09 14:45:53 +07:00
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
2017-12-01 01:19:15 +07:00
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
|
2017-01-27 22:08:20 +07:00
|
|
|
queue->TBQPH = GEM_TBQPH(hw_q - 1);
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->RBQPH = GEM_RBQPH(hw_q - 1);
|
|
|
|
}
|
2016-08-09 14:45:53 +07:00
|
|
|
#endif
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
} else {
|
|
|
|
/* queue0 uses legacy registers */
|
|
|
|
queue->ISR = MACB_ISR;
|
|
|
|
queue->IER = MACB_IER;
|
|
|
|
queue->IDR = MACB_IDR;
|
|
|
|
queue->IMR = MACB_IMR;
|
|
|
|
queue->TBQP = MACB_TBQP;
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->RBQP = MACB_RBQP;
|
2016-08-09 14:45:53 +07:00
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
2017-12-01 01:19:15 +07:00
|
|
|
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
|
2017-01-27 22:08:20 +07:00
|
|
|
queue->TBQPH = MACB_TBQPH;
|
2017-12-01 01:19:15 +07:00
|
|
|
queue->RBQPH = MACB_RBQPH;
|
|
|
|
}
|
2016-08-09 14:45:53 +07:00
|
|
|
#endif
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* get irq: here we use the linux queue index, not the hardware
|
|
|
|
* queue index. the queue irq definitions in the device tree
|
|
|
|
* must remove the optional gaps that could exist in the
|
|
|
|
* hardware queue mask.
|
|
|
|
*/
|
2014-12-15 21:13:32 +07:00
|
|
|
queue->irq = platform_get_irq(pdev, q);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
|
2015-03-07 00:29:12 +07:00
|
|
|
IRQF_SHARED, dev->name, queue);
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"Unable to request IRQ %d (error %d)\n",
|
|
|
|
queue->irq, err);
|
2015-03-31 20:02:03 +07:00
|
|
|
return err;
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
|
2014-12-15 21:13:32 +07:00
|
|
|
q++;
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2009-04-11 14:42:26 +07:00
|
|
|
dev->netdev_ops = &macb_netdev_ops;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2013-06-05 04:57:12 +07:00
|
|
|
/* setup appropriated routines according to adapter type */
|
|
|
|
if (macb_is_gem(bp)) {
|
2014-07-24 18:50:59 +07:00
|
|
|
bp->max_tx_length = GEM_MAX_TX_LEN;
|
2013-06-05 04:57:12 +07:00
|
|
|
bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_init_rings = gem_init_rings;
|
|
|
|
bp->macbgem_ops.mog_rx = gem_rx;
|
2015-01-16 04:55:20 +07:00
|
|
|
dev->ethtool_ops = &gem_ethtool_ops;
|
2013-06-05 04:57:12 +07:00
|
|
|
} else {
|
2014-07-24 18:50:59 +07:00
|
|
|
bp->max_tx_length = MACB_MAX_TX_LEN;
|
2013-06-05 04:57:12 +07:00
|
|
|
bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_init_rings = macb_init_rings;
|
|
|
|
bp->macbgem_ops.mog_rx = macb_rx;
|
2015-01-16 04:55:20 +07:00
|
|
|
dev->ethtool_ops = &macb_ethtool_ops;
|
2013-06-05 04:57:12 +07:00
|
|
|
}
|
|
|
|
|
2014-07-24 18:50:59 +07:00
|
|
|
/* Set features */
|
|
|
|
dev->hw_features = NETIF_F_SG;
|
2016-11-16 17:02:34 +07:00
|
|
|
|
|
|
|
/* Check LSO capability */
|
|
|
|
if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
|
|
|
|
dev->hw_features |= MACB_NETIF_LSO;
|
|
|
|
|
2014-07-24 18:51:00 +07:00
|
|
|
/* Checksum offload is only available on gem with packet buffer */
|
|
|
|
if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
|
2014-07-24 18:51:01 +07:00
|
|
|
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|
2014-07-24 18:50:59 +07:00
|
|
|
if (bp->caps & MACB_CAPS_SG_DISABLED)
|
|
|
|
dev->hw_features &= ~NETIF_F_SG;
|
|
|
|
dev->features = dev->hw_features;
|
|
|
|
|
2017-12-01 01:20:44 +07:00
|
|
|
/* Check RX Flow Filters support.
|
|
|
|
* Max Rx flows set by availability of screeners & compare regs:
|
|
|
|
* each 4-tuple define requires 1 T2 screener reg + 3 compare regs
|
|
|
|
*/
|
|
|
|
reg = gem_readl(bp, DCFG8);
|
|
|
|
bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
|
|
|
|
GEM_BFEXT(T2SCR, reg));
|
|
|
|
if (bp->max_tuples > 0) {
|
|
|
|
/* also needs one ethtype match to check IPv4 */
|
|
|
|
if (GEM_BFEXT(SCR2ETH, reg) > 0) {
|
|
|
|
/* program this reg now */
|
|
|
|
reg = 0;
|
|
|
|
reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
|
|
|
|
gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
|
|
|
|
/* Filtering is supported in hw but don't enable it in kernel now */
|
|
|
|
dev->hw_features |= NETIF_F_NTUPLE;
|
|
|
|
/* init Rx flow definitions */
|
|
|
|
INIT_LIST_HEAD(&bp->rx_fs_list.list);
|
|
|
|
bp->rx_fs_list.count = 0;
|
|
|
|
spin_lock_init(&bp->rx_fs_lock);
|
|
|
|
} else
|
|
|
|
bp->max_tuples = 0;
|
|
|
|
}
|
|
|
|
|
2016-01-05 20:39:16 +07:00
|
|
|
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
|
|
|
|
val = 0;
|
|
|
|
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
|
|
|
|
val = GEM_BIT(RGMII);
|
|
|
|
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
|
2016-03-10 22:44:32 +07:00
|
|
|
(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
|
2016-01-05 20:39:16 +07:00
|
|
|
val = MACB_BIT(RMII);
|
2016-03-10 22:44:32 +07:00
|
|
|
else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
|
2016-01-05 20:39:16 +07:00
|
|
|
val = MACB_BIT(MII);
|
2015-03-07 13:23:32 +07:00
|
|
|
|
2016-01-05 20:39:16 +07:00
|
|
|
if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
|
|
|
|
val |= MACB_BIT(CLKEN);
|
2015-03-07 13:23:32 +07:00
|
|
|
|
2016-01-05 20:39:16 +07:00
|
|
|
macb_or_gem_writel(bp, USRIO, val);
|
|
|
|
}
|
2015-03-07 13:23:32 +07:00
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
/* Set MII management clock divider */
|
2015-03-07 13:23:32 +07:00
|
|
|
val = macb_mdc_clk_div(bp);
|
|
|
|
val |= macb_dbw(bp);
|
2015-11-18 10:33:50 +07:00
|
|
|
if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
|
|
|
|
val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
|
2015-03-07 13:23:32 +07:00
|
|
|
macb_writel(bp, NCFGR, val);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_OF)
|
|
|
|
/* 1518 rounded up */
|
|
|
|
#define AT91ETHER_MAX_RBUFF_SZ 0x600
|
|
|
|
/* max number of receive buffers */
|
|
|
|
#define AT91ETHER_MAX_RX_DESCR 9
|
|
|
|
|
|
|
|
/* Initialize and start the Receiver and Transmit subsystems */
|
|
|
|
static int at91ether_start(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *lp = netdev_priv(dev);
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *q = &lp->queues[0];
|
2017-01-27 22:08:20 +07:00
|
|
|
struct macb_dma_desc *desc;
|
2015-03-07 13:23:32 +07:00
|
|
|
dma_addr_t addr;
|
|
|
|
u32 ctl;
|
|
|
|
int i;
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
|
2015-03-07 13:23:32 +07:00
|
|
|
(AT91ETHER_MAX_RX_DESCR *
|
2017-01-27 22:08:20 +07:00
|
|
|
macb_dma_desc_get_size(lp)),
|
2017-12-01 01:19:15 +07:00
|
|
|
&q->rx_ring_dma, GFP_KERNEL);
|
|
|
|
if (!q->rx_ring)
|
2015-03-07 13:23:32 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
|
2015-03-07 13:23:32 +07:00
|
|
|
AT91ETHER_MAX_RX_DESCR *
|
|
|
|
AT91ETHER_MAX_RBUFF_SZ,
|
2017-12-01 01:19:15 +07:00
|
|
|
&q->rx_buffers_dma, GFP_KERNEL);
|
|
|
|
if (!q->rx_buffers) {
|
2015-03-07 13:23:32 +07:00
|
|
|
dma_free_coherent(&lp->pdev->dev,
|
|
|
|
AT91ETHER_MAX_RX_DESCR *
|
2017-01-27 22:08:20 +07:00
|
|
|
macb_dma_desc_get_size(lp),
|
2017-12-01 01:19:15 +07:00
|
|
|
q->rx_ring, q->rx_ring_dma);
|
|
|
|
q->rx_ring = NULL;
|
2015-03-07 13:23:32 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
addr = q->rx_buffers_dma;
|
2015-03-07 13:23:32 +07:00
|
|
|
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
|
2017-12-01 01:19:15 +07:00
|
|
|
desc = macb_rx_desc(q, i);
|
2017-01-27 22:08:20 +07:00
|
|
|
macb_set_addr(lp, desc, addr);
|
|
|
|
desc->ctrl = 0;
|
2015-03-07 13:23:32 +07:00
|
|
|
addr += AT91ETHER_MAX_RBUFF_SZ;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the Wrap bit on the last descriptor */
|
2017-01-27 22:08:20 +07:00
|
|
|
desc->addr |= MACB_BIT(RX_WRAP);
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
/* Reset buffer index */
|
2017-12-01 01:19:15 +07:00
|
|
|
q->rx_tail = 0;
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
/* Program address of descriptor list in Rx Buffer Queue register */
|
2017-12-01 01:19:15 +07:00
|
|
|
macb_writel(lp, RBQP, q->rx_ring_dma);
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
/* Enable Receive and Transmit */
|
|
|
|
ctl = macb_readl(lp, NCR);
|
|
|
|
macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Open the ethernet interface */
|
|
|
|
static int at91ether_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *lp = netdev_priv(dev);
|
|
|
|
u32 ctl;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Clear internal statistics */
|
|
|
|
ctl = macb_readl(lp, NCR);
|
|
|
|
macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
|
|
|
|
|
|
|
|
macb_set_hwaddr(lp);
|
|
|
|
|
|
|
|
ret = at91ether_start(dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Enable MAC interrupts */
|
|
|
|
macb_writel(lp, IER, MACB_BIT(RCOMP) |
|
|
|
|
MACB_BIT(RXUBR) |
|
|
|
|
MACB_BIT(ISR_TUND) |
|
|
|
|
MACB_BIT(ISR_RLE) |
|
|
|
|
MACB_BIT(TCOMP) |
|
|
|
|
MACB_BIT(ISR_ROVR) |
|
|
|
|
MACB_BIT(HRESP));
|
|
|
|
|
|
|
|
/* schedule a link state check */
|
2016-06-22 05:32:35 +07:00
|
|
|
phy_start(dev->phydev);
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
netif_start_queue(dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Close the interface */
|
|
|
|
static int at91ether_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *lp = netdev_priv(dev);
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *q = &lp->queues[0];
|
2015-03-07 13:23:32 +07:00
|
|
|
u32 ctl;
|
|
|
|
|
|
|
|
/* Disable Receiver and Transmitter */
|
|
|
|
ctl = macb_readl(lp, NCR);
|
|
|
|
macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
|
|
|
|
|
|
|
|
/* Disable MAC interrupts */
|
|
|
|
macb_writel(lp, IDR, MACB_BIT(RCOMP) |
|
|
|
|
MACB_BIT(RXUBR) |
|
|
|
|
MACB_BIT(ISR_TUND) |
|
|
|
|
MACB_BIT(ISR_RLE) |
|
|
|
|
MACB_BIT(TCOMP) |
|
|
|
|
MACB_BIT(ISR_ROVR) |
|
|
|
|
MACB_BIT(HRESP));
|
|
|
|
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
|
|
|
|
dma_free_coherent(&lp->pdev->dev,
|
|
|
|
AT91ETHER_MAX_RX_DESCR *
|
2017-01-27 22:08:20 +07:00
|
|
|
macb_dma_desc_get_size(lp),
|
2017-12-01 01:19:15 +07:00
|
|
|
q->rx_ring, q->rx_ring_dma);
|
|
|
|
q->rx_ring = NULL;
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
dma_free_coherent(&lp->pdev->dev,
|
|
|
|
AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
|
2017-12-01 01:19:15 +07:00
|
|
|
q->rx_buffers, q->rx_buffers_dma);
|
|
|
|
q->rx_buffers = NULL;
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Transmit packet */
|
2018-08-07 16:25:12 +07:00
|
|
|
static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2015-03-07 13:23:32 +07:00
|
|
|
{
|
|
|
|
struct macb *lp = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
|
|
|
|
/* Store packet information (to free when Tx completed) */
|
|
|
|
lp->skb = skb;
|
|
|
|
lp->skb_length = skb->len;
|
|
|
|
lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
|
|
|
|
DMA_TO_DEVICE);
|
2016-11-19 05:40:10 +07:00
|
|
|
if (dma_mapping_error(NULL, lp->skb_physaddr)) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
dev->stats.tx_dropped++;
|
|
|
|
netdev_err(dev, "%s: DMA mapping error\n", __func__);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
/* Set address of the data in the Transmit Address register */
|
|
|
|
macb_writel(lp, TAR, lp->skb_physaddr);
|
|
|
|
/* Set length of the packet in the Transmit Control register */
|
|
|
|
macb_writel(lp, TCR, skb->len);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
} else {
|
|
|
|
netdev_err(dev, "%s called, but device is busy!\n", __func__);
|
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Extract received frame from buffer descriptors and sent to upper layers.
|
|
|
|
* (Called from interrupt context)
|
|
|
|
*/
|
|
|
|
static void at91ether_rx(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *lp = netdev_priv(dev);
|
2017-12-01 01:19:15 +07:00
|
|
|
struct macb_queue *q = &lp->queues[0];
|
2017-01-27 22:08:20 +07:00
|
|
|
struct macb_dma_desc *desc;
|
2015-03-07 13:23:32 +07:00
|
|
|
unsigned char *p_recv;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned int pktlen;
|
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
desc = macb_rx_desc(q, q->rx_tail);
|
2017-01-27 22:08:20 +07:00
|
|
|
while (desc->addr & MACB_BIT(RX_USED)) {
|
2017-12-01 01:19:15 +07:00
|
|
|
p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
|
2017-01-27 22:08:20 +07:00
|
|
|
pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
|
2015-03-07 13:23:32 +07:00
|
|
|
skb = netdev_alloc_skb(dev, pktlen + 2);
|
|
|
|
if (skb) {
|
|
|
|
skb_reserve(skb, 2);
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:20 +07:00
|
|
|
skb_put_data(skb, p_recv, pktlen);
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
skb->protocol = eth_type_trans(skb, dev);
|
2017-04-07 15:17:30 +07:00
|
|
|
dev->stats.rx_packets++;
|
|
|
|
dev->stats.rx_bytes += pktlen;
|
2015-03-07 13:23:32 +07:00
|
|
|
netif_rx(skb);
|
|
|
|
} else {
|
2017-04-07 15:17:30 +07:00
|
|
|
dev->stats.rx_dropped++;
|
2015-03-07 13:23:32 +07:00
|
|
|
}
|
|
|
|
|
2017-01-27 22:08:20 +07:00
|
|
|
if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
|
2017-04-07 15:17:30 +07:00
|
|
|
dev->stats.multicast++;
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
/* reset ownership bit */
|
2017-01-27 22:08:20 +07:00
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
/* wrap after last buffer */
|
2017-12-01 01:19:15 +07:00
|
|
|
if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
|
|
|
|
q->rx_tail = 0;
|
2015-03-07 13:23:32 +07:00
|
|
|
else
|
2017-12-01 01:19:15 +07:00
|
|
|
q->rx_tail++;
|
2017-01-27 22:08:20 +07:00
|
|
|
|
2017-12-01 01:19:15 +07:00
|
|
|
desc = macb_rx_desc(q, q->rx_tail);
|
2015-03-07 13:23:32 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* MAC interrupt handler */
|
|
|
|
static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct net_device *dev = dev_id;
|
|
|
|
struct macb *lp = netdev_priv(dev);
|
|
|
|
u32 intstatus, ctl;
|
|
|
|
|
|
|
|
/* MAC Interrupt Status register indicates what interrupts are pending.
|
|
|
|
* It is automatically cleared once read.
|
|
|
|
*/
|
|
|
|
intstatus = macb_readl(lp, ISR);
|
|
|
|
|
|
|
|
/* Receive complete */
|
|
|
|
if (intstatus & MACB_BIT(RCOMP))
|
|
|
|
at91ether_rx(dev);
|
|
|
|
|
|
|
|
/* Transmit complete */
|
|
|
|
if (intstatus & MACB_BIT(TCOMP)) {
|
|
|
|
/* The TCOM bit is set even if the transmission failed */
|
|
|
|
if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
|
2017-04-07 15:17:30 +07:00
|
|
|
dev->stats.tx_errors++;
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
if (lp->skb) {
|
|
|
|
dev_kfree_skb_irq(lp->skb);
|
|
|
|
lp->skb = NULL;
|
|
|
|
dma_unmap_single(NULL, lp->skb_physaddr,
|
|
|
|
lp->skb_length, DMA_TO_DEVICE);
|
2017-04-07 15:17:30 +07:00
|
|
|
dev->stats.tx_packets++;
|
|
|
|
dev->stats.tx_bytes += lp->skb_length;
|
2015-03-07 13:23:32 +07:00
|
|
|
}
|
|
|
|
netif_wake_queue(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Work-around for EMAC Errata section 41.3.1 */
|
|
|
|
if (intstatus & MACB_BIT(RXUBR)) {
|
|
|
|
ctl = macb_readl(lp, NCR);
|
|
|
|
macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
|
2016-11-28 20:55:00 +07:00
|
|
|
wmb();
|
2015-03-07 13:23:32 +07:00
|
|
|
macb_writel(lp, NCR, ctl | MACB_BIT(RE));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (intstatus & MACB_BIT(ISR_ROVR))
|
|
|
|
netdev_err(dev, "ROVR error\n");
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static void at91ether_poll_controller(struct net_device *dev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
at91ether_interrupt(dev->irq, dev);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static const struct net_device_ops at91ether_netdev_ops = {
|
|
|
|
.ndo_open = at91ether_open,
|
|
|
|
.ndo_stop = at91ether_close,
|
|
|
|
.ndo_start_xmit = at91ether_start_xmit,
|
|
|
|
.ndo_get_stats = macb_get_stats,
|
|
|
|
.ndo_set_rx_mode = macb_set_rx_mode,
|
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
|
|
|
.ndo_do_ioctl = macb_ioctl,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = at91ether_poll_controller,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
|
2016-08-16 11:44:50 +07:00
|
|
|
struct clk **hclk, struct clk **tx_clk,
|
|
|
|
struct clk **rx_clk)
|
2015-03-07 13:23:32 +07:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
*hclk = NULL;
|
|
|
|
*tx_clk = NULL;
|
2016-08-16 11:44:50 +07:00
|
|
|
*rx_clk = NULL;
|
2015-03-31 20:02:03 +07:00
|
|
|
|
|
|
|
*pclk = devm_clk_get(&pdev->dev, "ether_clk");
|
|
|
|
if (IS_ERR(*pclk))
|
|
|
|
return PTR_ERR(*pclk);
|
2015-03-07 13:23:32 +07:00
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
err = clk_prepare_enable(*pclk);
|
2015-03-07 13:23:32 +07:00
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int at91ether_init(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct net_device *dev = platform_get_drvdata(pdev);
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
int err;
|
|
|
|
u32 reg;
|
|
|
|
|
2018-06-26 15:44:01 +07:00
|
|
|
bp->queues[0].bp = bp;
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
dev->netdev_ops = &at91ether_netdev_ops;
|
|
|
|
dev->ethtool_ops = &macb_ethtool_ops;
|
|
|
|
|
|
|
|
err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
|
|
|
|
0, dev->name, dev);
|
|
|
|
if (err)
|
2015-03-31 20:02:03 +07:00
|
|
|
return err;
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
macb_writel(bp, NCR, 0);
|
|
|
|
|
|
|
|
reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
|
|
|
|
if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
|
|
|
|
reg |= MACB_BIT(RM9200_RMII);
|
|
|
|
|
|
|
|
macb_writel(bp, NCFGR, reg);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-10 10:38:02 +07:00
|
|
|
static const struct macb_config at91sam9260_config = {
|
2016-03-10 22:44:32 +07:00
|
|
|
.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
|
2015-03-31 20:02:03 +07:00
|
|
|
.clk_init = macb_clk_init,
|
2015-03-07 13:23:32 +07:00
|
|
|
.init = macb_init,
|
|
|
|
};
|
|
|
|
|
2018-09-14 22:48:10 +07:00
|
|
|
static const struct macb_config sama5d3macb_config = {
|
|
|
|
.caps = MACB_CAPS_SG_DISABLED
|
|
|
|
| MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
|
|
|
|
.clk_init = macb_clk_init,
|
|
|
|
.init = macb_init,
|
|
|
|
};
|
|
|
|
|
2015-03-10 10:38:02 +07:00
|
|
|
static const struct macb_config pc302gem_config = {
|
2015-03-07 13:23:32 +07:00
|
|
|
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
|
|
|
|
.dma_burst_length = 16,
|
2015-03-31 20:02:03 +07:00
|
|
|
.clk_init = macb_clk_init,
|
2015-03-07 13:23:32 +07:00
|
|
|
.init = macb_init,
|
|
|
|
};
|
|
|
|
|
2015-06-18 21:27:23 +07:00
|
|
|
static const struct macb_config sama5d2_config = {
|
2016-03-10 22:44:32 +07:00
|
|
|
.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
|
2015-06-18 21:27:23 +07:00
|
|
|
.dma_burst_length = 16,
|
|
|
|
.clk_init = macb_clk_init,
|
|
|
|
.init = macb_init,
|
|
|
|
};
|
|
|
|
|
2015-03-10 10:38:02 +07:00
|
|
|
static const struct macb_config sama5d3_config = {
|
2016-03-10 22:44:32 +07:00
|
|
|
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
|
2017-07-05 22:36:16 +07:00
|
|
|
| MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
|
2015-03-07 13:23:32 +07:00
|
|
|
.dma_burst_length = 16,
|
2015-03-31 20:02:03 +07:00
|
|
|
.clk_init = macb_clk_init,
|
2015-03-07 13:23:32 +07:00
|
|
|
.init = macb_init,
|
2017-07-05 22:36:16 +07:00
|
|
|
.jumbo_max_len = 10240,
|
2015-03-07 13:23:32 +07:00
|
|
|
};
|
|
|
|
|
2015-03-10 10:38:02 +07:00
|
|
|
static const struct macb_config sama5d4_config = {
|
2016-03-10 22:44:32 +07:00
|
|
|
.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
|
2015-03-07 13:23:32 +07:00
|
|
|
.dma_burst_length = 4,
|
2015-03-31 20:02:03 +07:00
|
|
|
.clk_init = macb_clk_init,
|
2015-03-07 13:23:32 +07:00
|
|
|
.init = macb_init,
|
|
|
|
};
|
|
|
|
|
2015-03-10 10:38:02 +07:00
|
|
|
static const struct macb_config emac_config = {
|
2015-03-31 20:02:03 +07:00
|
|
|
.clk_init = at91ether_clk_init,
|
2015-03-07 13:23:32 +07:00
|
|
|
.init = at91ether_init,
|
|
|
|
};
|
|
|
|
|
2016-01-05 20:39:17 +07:00
|
|
|
static const struct macb_config np4_config = {
|
|
|
|
.caps = MACB_CAPS_USRIO_DISABLED,
|
|
|
|
.clk_init = macb_clk_init,
|
|
|
|
.init = macb_init,
|
|
|
|
};
|
2015-05-23 12:22:35 +07:00
|
|
|
|
2015-05-06 23:57:16 +07:00
|
|
|
static const struct macb_config zynqmp_config = {
|
2017-06-29 13:14:16 +07:00
|
|
|
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
|
|
|
|
MACB_CAPS_JUMBO |
|
2018-07-06 13:48:58 +07:00
|
|
|
MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
|
2015-05-06 23:57:16 +07:00
|
|
|
.dma_burst_length = 16,
|
|
|
|
.clk_init = macb_clk_init,
|
|
|
|
.init = macb_init,
|
2015-05-06 23:57:17 +07:00
|
|
|
.jumbo_max_len = 10240,
|
2015-05-06 23:57:16 +07:00
|
|
|
};
|
|
|
|
|
2015-05-22 21:22:10 +07:00
|
|
|
static const struct macb_config zynq_config = {
|
2015-07-06 11:32:53 +07:00
|
|
|
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
|
2015-05-22 21:22:10 +07:00
|
|
|
.dma_burst_length = 16,
|
|
|
|
.clk_init = macb_clk_init,
|
|
|
|
.init = macb_init,
|
|
|
|
};
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
static const struct of_device_id macb_dt_ids[] = {
|
|
|
|
{ .compatible = "cdns,at32ap7000-macb" },
|
|
|
|
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
|
|
|
|
{ .compatible = "cdns,macb" },
|
2016-01-05 20:39:17 +07:00
|
|
|
{ .compatible = "cdns,np4-macb", .data = &np4_config },
|
2015-03-07 13:23:32 +07:00
|
|
|
{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
|
|
|
|
{ .compatible = "cdns,gem", .data = &pc302gem_config },
|
2015-06-18 21:27:23 +07:00
|
|
|
{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
|
2015-03-07 13:23:32 +07:00
|
|
|
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
|
2018-09-14 22:48:10 +07:00
|
|
|
{ .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
|
2015-03-07 13:23:32 +07:00
|
|
|
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
|
|
|
|
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
|
|
|
|
{ .compatible = "cdns,emac", .data = &emac_config },
|
2015-05-06 23:57:16 +07:00
|
|
|
{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
|
2015-05-22 21:22:10 +07:00
|
|
|
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
|
2015-03-07 13:23:32 +07:00
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, macb_dt_ids);
|
|
|
|
#endif /* CONFIG_OF */
|
|
|
|
|
2016-12-14 13:39:15 +07:00
|
|
|
static const struct macb_config default_gem_config = {
|
2017-06-29 13:14:16 +07:00
|
|
|
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
|
|
|
|
MACB_CAPS_JUMBO |
|
|
|
|
MACB_CAPS_GEM_HAS_PTP,
|
2016-12-14 13:39:15 +07:00
|
|
|
.dma_burst_length = 16,
|
|
|
|
.clk_init = macb_clk_init,
|
|
|
|
.init = macb_init,
|
|
|
|
.jumbo_max_len = 10240,
|
|
|
|
};
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
static int macb_probe(struct platform_device *pdev)
|
|
|
|
{
|
2016-12-14 13:39:15 +07:00
|
|
|
const struct macb_config *macb_config = &default_gem_config;
|
2015-03-31 20:02:03 +07:00
|
|
|
int (*clk_init)(struct platform_device *, struct clk **,
|
2016-08-16 11:44:50 +07:00
|
|
|
struct clk **, struct clk **, struct clk **)
|
2016-12-14 13:39:15 +07:00
|
|
|
= macb_config->clk_init;
|
|
|
|
int (*init)(struct platform_device *) = macb_config->init;
|
2015-03-07 13:23:32 +07:00
|
|
|
struct device_node *np = pdev->dev.of_node;
|
2016-08-16 11:44:50 +07:00
|
|
|
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
|
2015-03-07 13:23:32 +07:00
|
|
|
unsigned int queue_mask, num_queues;
|
|
|
|
struct macb_platform_data *pdata;
|
2015-07-25 01:23:59 +07:00
|
|
|
bool native_io;
|
2015-03-07 13:23:32 +07:00
|
|
|
struct phy_device *phydev;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct resource *regs;
|
|
|
|
void __iomem *mem;
|
|
|
|
const char *mac;
|
|
|
|
struct macb *bp;
|
2018-07-06 13:48:58 +07:00
|
|
|
int err, val;
|
2015-03-07 13:23:32 +07:00
|
|
|
|
2015-07-25 01:23:59 +07:00
|
|
|
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
|
mem = devm_ioremap_resource(&pdev->dev, regs);
|
|
|
|
if (IS_ERR(mem))
|
|
|
|
return PTR_ERR(mem);
|
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
if (np) {
|
|
|
|
const struct of_device_id *match;
|
|
|
|
|
|
|
|
match = of_match_node(macb_dt_ids, np);
|
|
|
|
if (match && match->data) {
|
|
|
|
macb_config = match->data;
|
|
|
|
clk_init = macb_config->clk_init;
|
|
|
|
init = macb_config->init;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-16 11:44:50 +07:00
|
|
|
err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
|
2015-03-31 20:02:03 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2015-07-25 01:23:59 +07:00
|
|
|
native_io = hw_is_native_io(mem);
|
2015-03-07 13:23:32 +07:00
|
|
|
|
2015-07-25 01:23:59 +07:00
|
|
|
macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
|
2015-03-07 13:23:32 +07:00
|
|
|
dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
|
2015-03-31 20:02:03 +07:00
|
|
|
if (!dev) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_disable_clocks;
|
|
|
|
}
|
2015-03-07 13:23:32 +07:00
|
|
|
|
|
|
|
dev->base_addr = regs->start;
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
|
|
|
|
|
|
|
bp = netdev_priv(dev);
|
|
|
|
bp->pdev = pdev;
|
|
|
|
bp->dev = dev;
|
|
|
|
bp->regs = mem;
|
2015-07-25 01:23:59 +07:00
|
|
|
bp->native_io = native_io;
|
|
|
|
if (native_io) {
|
2015-07-28 04:24:48 +07:00
|
|
|
bp->macb_reg_readl = hw_readl_native;
|
|
|
|
bp->macb_reg_writel = hw_writel_native;
|
2015-07-25 01:23:59 +07:00
|
|
|
} else {
|
2015-07-28 04:24:48 +07:00
|
|
|
bp->macb_reg_readl = hw_readl;
|
|
|
|
bp->macb_reg_writel = hw_writel;
|
2015-07-25 01:23:59 +07:00
|
|
|
}
|
2015-03-07 13:23:32 +07:00
|
|
|
bp->num_queues = num_queues;
|
2015-03-31 20:01:59 +07:00
|
|
|
bp->queue_mask = queue_mask;
|
2015-03-31 20:02:03 +07:00
|
|
|
if (macb_config)
|
|
|
|
bp->dma_burst_length = macb_config->dma_burst_length;
|
|
|
|
bp->pclk = pclk;
|
|
|
|
bp->hclk = hclk;
|
|
|
|
bp->tx_clk = tx_clk;
|
2016-08-16 11:44:50 +07:00
|
|
|
bp->rx_clk = rx_clk;
|
2015-07-25 01:24:00 +07:00
|
|
|
if (macb_config)
|
2015-05-06 23:57:17 +07:00
|
|
|
bp->jumbo_max_len = macb_config->jumbo_max_len;
|
|
|
|
|
2016-02-09 21:07:16 +07:00
|
|
|
bp->wol = 0;
|
2016-02-17 06:10:45 +07:00
|
|
|
if (of_get_property(np, "magic-packet", NULL))
|
2016-02-09 21:07:16 +07:00
|
|
|
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
|
|
|
|
device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
spin_lock_init(&bp->lock);
|
|
|
|
|
2015-03-31 20:02:02 +07:00
|
|
|
/* setup capabilities */
|
2015-03-31 20:02:01 +07:00
|
|
|
macb_configure_caps(bp, macb_config);
|
|
|
|
|
2017-06-29 13:12:51 +07:00
|
|
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
|
|
|
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
|
|
|
|
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
|
|
|
|
bp->hw_dma_cap |= HW_DMA_CAP_64B;
|
|
|
|
}
|
|
|
|
#endif
|
2015-03-07 13:23:32 +07:00
|
|
|
platform_set_drvdata(pdev, dev);
|
|
|
|
|
|
|
|
dev->irq = platform_get_irq(pdev, 0);
|
2015-03-31 20:02:03 +07:00
|
|
|
if (dev->irq < 0) {
|
|
|
|
err = dev->irq;
|
2016-08-12 22:43:54 +07:00
|
|
|
goto err_out_free_netdev;
|
2015-03-31 20:02:03 +07:00
|
|
|
}
|
2015-03-07 13:23:32 +07:00
|
|
|
|
ethernet: use core min/max MTU checking
et131x: min_mtu 64, max_mtu 9216
altera_tse: min_mtu 64, max_mtu 1500
amd8111e: min_mtu 60, max_mtu 9000
bnad: min_mtu 46, max_mtu 9000
macb: min_mtu 68, max_mtu 1500 or 10240 depending on hardware capability
xgmac: min_mtu 46, max_mtu 9000
cxgb2: min_mtu 68, max_mtu 9582 (pm3393) or 9600 (vsc7326)
enic: min_mtu 68, max_mtu 9000
gianfar: min_mtu 50, max_mu 9586
hns_enet: min_mtu 68, max_mtu 9578 (v1) or 9706 (v2)
ksz884x: min_mtu 60, max_mtu 1894
myri10ge: min_mtu 68, max_mtu 9000
natsemi: min_mtu 64, max_mtu 2024
nfp: min_mtu 68, max_mtu hardware-specific
forcedeth: min_mtu 64, max_mtu 1500 or 9100, depending on hardware
pch_gbe: min_mtu 46, max_mtu 10300
pasemi_mac: min_mtu 64, max_mtu 9000
qcaspi: min_mtu 46, max_mtu 1500
- remove qcaspi_netdev_change_mtu as it is now redundant
rocker: min_mtu 68, max_mtu 9000
sxgbe: min_mtu 68, max_mtu 9000
stmmac: min_mtu 46, max_mtu depends on hardware
tehuti: min_mtu 60, max_mtu 16384
- driver had no max mtu checking, but product docs say 16k jumbo packets
are supported by the hardware
netcp: min_mtu 68, max_mtu 9486
- remove netcp_ndo_change_mtu as it is now redundant
via-velocity: min_mtu 64, max_mtu 9000
octeon: min_mtu 46, max_mtu 65370
CC: netdev@vger.kernel.org
CC: Mark Einon <mark.einon@gmail.com>
CC: Vince Bridgers <vbridger@opensource.altera.com>
CC: Rasesh Mody <rasesh.mody@qlogic.com>
CC: Nicolas Ferre <nicolas.ferre@atmel.com>
CC: Santosh Raspatur <santosh@chelsio.com>
CC: Hariprasad S <hariprasad@chelsio.com>
CC: Christian Benvenuti <benve@cisco.com>
CC: Sujith Sankar <ssujith@cisco.com>
CC: Govindarajulu Varadarajan <_govind@gmx.com>
CC: Neel Patel <neepatel@cisco.com>
CC: Claudiu Manoil <claudiu.manoil@freescale.com>
CC: Yisen Zhuang <yisen.zhuang@huawei.com>
CC: Salil Mehta <salil.mehta@huawei.com>
CC: Hyong-Youb Kim <hykim@myri.com>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Olof Johansson <olof@lixom.net>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Byungho An <bh74.an@samsung.com>
CC: Girish K S <ks.giri@samsung.com>
CC: Vipul Pandya <vipul.pandya@samsung.com>
CC: Giuseppe Cavallaro <peppe.cavallaro@st.com>
CC: Alexandre Torgue <alexandre.torgue@st.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Wingman Kwok <w-kwok2@ti.com>
CC: Murali Karicheri <m-karicheri2@ti.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-18 02:54:17 +07:00
|
|
|
/* MTU range: 68 - 1500 or 10240 */
|
|
|
|
dev->min_mtu = GEM_MTU_MIN_SIZE;
|
|
|
|
if (bp->caps & MACB_CAPS_JUMBO)
|
|
|
|
dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
|
|
|
|
else
|
|
|
|
dev->max_mtu = ETH_DATA_LEN;
|
|
|
|
|
2018-07-06 13:48:58 +07:00
|
|
|
if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
|
|
|
|
val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
|
|
|
|
if (val)
|
|
|
|
bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
|
|
|
|
macb_dma_desc_get_size(bp);
|
|
|
|
|
|
|
|
val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
|
|
|
|
if (val)
|
|
|
|
bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
|
|
|
|
macb_dma_desc_get_size(bp);
|
|
|
|
}
|
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
mac = of_get_mac_address(np);
|
2018-03-29 12:29:49 +07:00
|
|
|
if (mac) {
|
2016-03-30 09:11:14 +07:00
|
|
|
ether_addr_copy(bp->dev->dev_addr, mac);
|
2018-03-29 12:29:49 +07:00
|
|
|
} else {
|
2018-11-30 15:20:58 +07:00
|
|
|
err = nvmem_get_mac_address(&pdev->dev, bp->dev->dev_addr);
|
2018-03-29 12:29:49 +07:00
|
|
|
if (err) {
|
|
|
|
if (err == -EPROBE_DEFER)
|
|
|
|
goto err_out_free_netdev;
|
|
|
|
macb_get_hwaddr(bp);
|
|
|
|
}
|
|
|
|
}
|
2011-11-18 21:29:25 +07:00
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
err = of_get_phy_mode(np);
|
2011-11-18 21:29:25 +07:00
|
|
|
if (err < 0) {
|
2013-08-30 12:12:21 +07:00
|
|
|
pdata = dev_get_platdata(&pdev->dev);
|
2011-11-18 21:29:25 +07:00
|
|
|
if (pdata && pdata->is_rmii)
|
|
|
|
bp->phy_interface = PHY_INTERFACE_MODE_RMII;
|
|
|
|
else
|
|
|
|
bp->phy_interface = PHY_INTERFACE_MODE_MII;
|
|
|
|
} else {
|
|
|
|
bp->phy_interface = err;
|
|
|
|
}
|
2007-07-13 00:07:24 +07:00
|
|
|
|
2015-03-07 13:23:32 +07:00
|
|
|
/* IP specific init */
|
|
|
|
err = init(pdev);
|
|
|
|
if (err)
|
|
|
|
goto err_out_free_netdev;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
2016-05-03 08:38:45 +07:00
|
|
|
err = macb_mii_init(bp);
|
|
|
|
if (err)
|
|
|
|
goto err_out_free_netdev;
|
|
|
|
|
2016-06-22 05:32:35 +07:00
|
|
|
phydev = dev->phydev;
|
2016-05-03 08:38:45 +07:00
|
|
|
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
err = register_netdev(dev);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
|
2016-05-03 08:38:45 +07:00
|
|
|
goto err_out_unregister_mdio;
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
2018-01-27 13:39:01 +07:00
|
|
|
tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
|
|
|
|
(unsigned long)bp);
|
|
|
|
|
2016-05-03 08:38:45 +07:00
|
|
|
phy_attached_info(phydev);
|
2012-07-04 06:14:13 +07:00
|
|
|
|
2014-09-13 06:57:49 +07:00
|
|
|
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
|
|
|
|
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
|
|
|
|
dev->base_addr, dev->irq, dev->dev_addr);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2016-05-03 08:38:45 +07:00
|
|
|
err_out_unregister_mdio:
|
2016-06-22 05:32:35 +07:00
|
|
|
phy_disconnect(dev->phydev);
|
2016-05-03 08:38:45 +07:00
|
|
|
mdiobus_unregister(bp->mii_bus);
|
2017-11-08 15:56:35 +07:00
|
|
|
of_node_put(bp->phy_node);
|
2017-11-08 15:56:34 +07:00
|
|
|
if (np && of_phy_is_fixed_link(np))
|
|
|
|
of_phy_deregister_fixed_link(np);
|
2016-05-03 08:38:45 +07:00
|
|
|
mdiobus_free(bp->mii_bus);
|
|
|
|
|
2014-12-15 21:13:32 +07:00
|
|
|
err_out_free_netdev:
|
net/macb: add TX multiqueue support for gem
gem devices designed with multiqueue CANNOT work without this patch.
When probing a gem device, the driver must first prepare and enable the
peripheral clock before accessing I/O registers. The second step is to read the
MID register to find whether the device is a gem or an old macb IP.
For gem devices, it reads the Design Configuration Register 6 (DCFG6) to
compute to total number of queues, whereas macb devices always have a single
queue.
Only then it can call alloc_etherdev_mq() with the correct number of queues.
This is the reason why the order of some initializations has been changed in
macb_probe().
Eventually, the dedicated IRQ and TX ring buffer descriptors are initialized
for each queue.
For backward compatibility reasons, queue0 uses the legacy registers ISR, IER,
IDR, IMR, TBQP and RBQP. On the other hand, the other queues use new registers
ISR[1..7], IER[1..7], IDR[1..7], IMR[1..7], TBQP[1..7] and RBQP[1..7].
Except this hardware detail there is no real difference between queue0 and the
others. The driver hides that thanks to the struct macb_queue.
This structure allows us to share a common set of functions for all the queues.
Besides when a TX error occurs, the gem MUST be halted before writing any of
the TBQP registers to reset the relevant queue. An immediate side effect is
that the other queues too aren't processed anymore by the gem.
So macb_tx_error_task() calls netif_tx_stop_all_queues() to notify the Linux
network engine that all transmissions are stopped.
Also macb_tx_error_task() now calls spin_lock_irqsave() to prevent the
interrupt handlers of the other queues from running as each of them may wake
its associated queue up (please refer to macb_tx_interrupt()).
Finally, as all queues have previously been stopped, they should be restarted
calling netif_tx_start_all_queues() and setting the TSTART bit into the Network
Control Register. Before this patch, when dealing with a single queue, the
driver used to defer the reset of the faulting queue and the write of the
TSTART bit until the next call of macb_start_xmit().
As explained before, this bit is now set by macb_tx_error_task() too. That's
why the faulting queue MUST be reset by setting the TX_USED bit in its first
buffer descriptor before writing the TSTART bit.
Queue 0 always exits and is the lowest priority when other queues are available.
The higher the index of the queue is, the higher its priority is.
When transmitting frames, the TX queue is selected by the skb->queue_mapping
value. So queue discipline can be used to define the queue priority policy.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-12 19:26:44 +07:00
|
|
|
free_netdev(dev);
|
2015-03-07 13:23:32 +07:00
|
|
|
|
2015-03-31 20:02:03 +07:00
|
|
|
err_disable_clocks:
|
|
|
|
clk_disable_unprepare(tx_clk);
|
|
|
|
clk_disable_unprepare(hclk);
|
|
|
|
clk_disable_unprepare(pclk);
|
2016-08-16 11:44:50 +07:00
|
|
|
clk_disable_unprepare(rx_clk);
|
2015-03-31 20:02:03 +07:00
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-01-23 00:31:05 +07:00
|
|
|
static int macb_remove(struct platform_device *pdev)
|
2006-11-09 20:51:17 +07:00
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macb *bp;
|
2017-11-08 15:56:34 +07:00
|
|
|
struct device_node *np = pdev->dev.of_node;
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
dev = platform_get_drvdata(pdev);
|
|
|
|
|
|
|
|
if (dev) {
|
|
|
|
bp = netdev_priv(dev);
|
2016-06-22 05:32:35 +07:00
|
|
|
if (dev->phydev)
|
|
|
|
phy_disconnect(dev->phydev);
|
2008-10-09 06:29:57 +07:00
|
|
|
mdiobus_unregister(bp->mii_bus);
|
2017-11-08 15:56:34 +07:00
|
|
|
if (np && of_phy_is_fixed_link(np))
|
|
|
|
of_phy_deregister_fixed_link(np);
|
2016-10-07 22:13:22 +07:00
|
|
|
dev->phydev = NULL;
|
2008-10-09 06:29:57 +07:00
|
|
|
mdiobus_free(bp->mii_bus);
|
2015-12-11 17:34:53 +07:00
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
unregister_netdev(dev);
|
2015-03-07 13:23:31 +07:00
|
|
|
clk_disable_unprepare(bp->tx_clk);
|
2013-03-28 06:07:07 +07:00
|
|
|
clk_disable_unprepare(bp->hclk);
|
|
|
|
clk_disable_unprepare(bp->pclk);
|
2016-08-16 11:44:50 +07:00
|
|
|
clk_disable_unprepare(bp->rx_clk);
|
2017-06-23 21:54:10 +07:00
|
|
|
of_node_put(bp->phy_node);
|
2014-12-15 21:13:31 +07:00
|
|
|
free_netdev(dev);
|
2006-11-09 20:51:17 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-23 15:36:03 +07:00
|
|
|
static int __maybe_unused macb_suspend(struct device *dev)
|
2008-03-04 19:39:29 +07:00
|
|
|
{
|
2018-10-22 03:00:14 +07:00
|
|
|
struct net_device *netdev = dev_get_drvdata(dev);
|
2008-03-04 19:39:29 +07:00
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
|
2012-07-04 06:14:13 +07:00
|
|
|
netif_carrier_off(netdev);
|
2008-03-04 19:39:29 +07:00
|
|
|
netif_device_detach(netdev);
|
|
|
|
|
2016-02-09 21:07:16 +07:00
|
|
|
if (bp->wol & MACB_WOL_ENABLED) {
|
|
|
|
macb_writel(bp, IER, MACB_BIT(WOL));
|
|
|
|
macb_writel(bp, WOL, MACB_BIT(MAG));
|
|
|
|
enable_irq_wake(bp->queues[0].irq);
|
|
|
|
} else {
|
|
|
|
clk_disable_unprepare(bp->tx_clk);
|
|
|
|
clk_disable_unprepare(bp->hclk);
|
|
|
|
clk_disable_unprepare(bp->pclk);
|
2016-08-16 11:44:50 +07:00
|
|
|
clk_disable_unprepare(bp->rx_clk);
|
2016-02-09 21:07:16 +07:00
|
|
|
}
|
2008-03-04 19:39:29 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-23 15:36:03 +07:00
|
|
|
static int __maybe_unused macb_resume(struct device *dev)
|
2008-03-04 19:39:29 +07:00
|
|
|
{
|
2018-10-22 03:00:14 +07:00
|
|
|
struct net_device *netdev = dev_get_drvdata(dev);
|
2008-03-04 19:39:29 +07:00
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
|
2016-02-09 21:07:16 +07:00
|
|
|
if (bp->wol & MACB_WOL_ENABLED) {
|
|
|
|
macb_writel(bp, IDR, MACB_BIT(WOL));
|
|
|
|
macb_writel(bp, WOL, 0);
|
|
|
|
disable_irq_wake(bp->queues[0].irq);
|
|
|
|
} else {
|
|
|
|
clk_prepare_enable(bp->pclk);
|
|
|
|
clk_prepare_enable(bp->hclk);
|
|
|
|
clk_prepare_enable(bp->tx_clk);
|
2016-08-16 11:44:50 +07:00
|
|
|
clk_prepare_enable(bp->rx_clk);
|
2016-02-09 21:07:16 +07:00
|
|
|
}
|
2008-03-04 19:39:29 +07:00
|
|
|
|
|
|
|
netif_device_attach(netdev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-11 07:07:19 +07:00
|
|
|
static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
|
|
|
|
|
2006-11-09 20:51:17 +07:00
|
|
|
static struct platform_driver macb_driver = {
|
2015-01-23 00:31:05 +07:00
|
|
|
.probe = macb_probe,
|
|
|
|
.remove = macb_remove,
|
2006-11-09 20:51:17 +07:00
|
|
|
.driver = {
|
|
|
|
.name = "macb",
|
2011-11-18 21:29:25 +07:00
|
|
|
.of_match_table = of_match_ptr(macb_dt_ids),
|
2013-12-11 07:07:19 +07:00
|
|
|
.pm = &macb_pm_ops,
|
2006-11-09 20:51:17 +07:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2015-01-23 00:31:05 +07:00
|
|
|
module_platform_driver(macb_driver);
|
2006-11-09 20:51:17 +07:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
2011-11-08 17:12:32 +07:00
|
|
|
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
|
2011-05-18 21:49:24 +07:00
|
|
|
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
|
2008-04-19 03:50:44 +07:00
|
|
|
MODULE_ALIAS("platform:macb");
|