2007-02-01 10:43:54 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2006-2007 PA Semi, Inc
|
|
|
|
*
|
|
|
|
* Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
2013-12-06 21:28:43 +07:00
|
|
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
2007-02-01 10:43:54 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/pci.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/slab.h>
|
2007-02-01 10:43:54 +07:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/netdevice.h>
|
2009-04-25 19:53:17 +07:00
|
|
|
#include <linux/of_mdio.h>
|
2007-02-01 10:43:54 +07:00
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <asm/dma-mapping.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <net/checksum.h>
|
2011-05-23 03:47:17 +07:00
|
|
|
#include <linux/prefetch.h>
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-05-08 12:47:21 +07:00
|
|
|
#include <asm/irq.h>
|
2007-10-04 01:03:54 +07:00
|
|
|
#include <asm/firmware.h>
|
2007-11-29 09:56:04 +07:00
|
|
|
#include <asm/pasemi_dma.h>
|
2007-05-08 12:47:21 +07:00
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
#include "pasemi_mac.h"
|
|
|
|
|
2007-10-03 04:26:53 +07:00
|
|
|
/* We have our own align, since ppc64 in general has it at 0 because
|
|
|
|
* of design flaws in some of the server bridge chips. However, for
|
|
|
|
* PWRficient doing the unaligned copies is more expensive than doing
|
|
|
|
* unaligned DMA, so make sure the data is aligned instead.
|
|
|
|
*/
|
|
|
|
#define LOCAL_SKB_ALIGN 2
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
/* TODO list
|
|
|
|
*
|
|
|
|
* - Multicast support
|
|
|
|
* - Large MTU support
|
2007-10-03 04:27:28 +07:00
|
|
|
* - Multiqueue RX/TX
|
2007-02-01 10:43:54 +07:00
|
|
|
*/
|
|
|
|
|
ethernet: use core min/max MTU checking
et131x: min_mtu 64, max_mtu 9216
altera_tse: min_mtu 64, max_mtu 1500
amd8111e: min_mtu 60, max_mtu 9000
bnad: min_mtu 46, max_mtu 9000
macb: min_mtu 68, max_mtu 1500 or 10240 depending on hardware capability
xgmac: min_mtu 46, max_mtu 9000
cxgb2: min_mtu 68, max_mtu 9582 (pm3393) or 9600 (vsc7326)
enic: min_mtu 68, max_mtu 9000
gianfar: min_mtu 50, max_mu 9586
hns_enet: min_mtu 68, max_mtu 9578 (v1) or 9706 (v2)
ksz884x: min_mtu 60, max_mtu 1894
myri10ge: min_mtu 68, max_mtu 9000
natsemi: min_mtu 64, max_mtu 2024
nfp: min_mtu 68, max_mtu hardware-specific
forcedeth: min_mtu 64, max_mtu 1500 or 9100, depending on hardware
pch_gbe: min_mtu 46, max_mtu 10300
pasemi_mac: min_mtu 64, max_mtu 9000
qcaspi: min_mtu 46, max_mtu 1500
- remove qcaspi_netdev_change_mtu as it is now redundant
rocker: min_mtu 68, max_mtu 9000
sxgbe: min_mtu 68, max_mtu 9000
stmmac: min_mtu 46, max_mtu depends on hardware
tehuti: min_mtu 60, max_mtu 16384
- driver had no max mtu checking, but product docs say 16k jumbo packets
are supported by the hardware
netcp: min_mtu 68, max_mtu 9486
- remove netcp_ndo_change_mtu as it is now redundant
via-velocity: min_mtu 64, max_mtu 9000
octeon: min_mtu 46, max_mtu 65370
CC: netdev@vger.kernel.org
CC: Mark Einon <mark.einon@gmail.com>
CC: Vince Bridgers <vbridger@opensource.altera.com>
CC: Rasesh Mody <rasesh.mody@qlogic.com>
CC: Nicolas Ferre <nicolas.ferre@atmel.com>
CC: Santosh Raspatur <santosh@chelsio.com>
CC: Hariprasad S <hariprasad@chelsio.com>
CC: Christian Benvenuti <benve@cisco.com>
CC: Sujith Sankar <ssujith@cisco.com>
CC: Govindarajulu Varadarajan <_govind@gmx.com>
CC: Neel Patel <neepatel@cisco.com>
CC: Claudiu Manoil <claudiu.manoil@freescale.com>
CC: Yisen Zhuang <yisen.zhuang@huawei.com>
CC: Salil Mehta <salil.mehta@huawei.com>
CC: Hyong-Youb Kim <hykim@myri.com>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Olof Johansson <olof@lixom.net>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Byungho An <bh74.an@samsung.com>
CC: Girish K S <ks.giri@samsung.com>
CC: Vipul Pandya <vipul.pandya@samsung.com>
CC: Giuseppe Cavallaro <peppe.cavallaro@st.com>
CC: Alexandre Torgue <alexandre.torgue@st.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Wingman Kwok <w-kwok2@ti.com>
CC: Murali Karicheri <m-karicheri2@ti.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-18 02:54:17 +07:00
|
|
|
#define PE_MIN_MTU (ETH_ZLEN + ETH_HLEN)
|
2008-03-06 05:34:16 +07:00
|
|
|
#define PE_MAX_MTU 9000
|
2008-01-24 02:56:47 +07:00
|
|
|
#define PE_DEF_MTU ETH_DATA_LEN
|
|
|
|
|
2007-05-08 12:47:49 +07:00
|
|
|
#define DEFAULT_MSG_ENABLE \
|
|
|
|
(NETIF_MSG_DRV | \
|
|
|
|
NETIF_MSG_PROBE | \
|
|
|
|
NETIF_MSG_LINK | \
|
|
|
|
NETIF_MSG_TIMER | \
|
|
|
|
NETIF_MSG_IFDOWN | \
|
|
|
|
NETIF_MSG_IFUP | \
|
|
|
|
NETIF_MSG_RX_ERR | \
|
|
|
|
NETIF_MSG_TX_ERR)
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
|
|
|
|
MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
|
|
|
|
|
|
|
|
static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
|
|
|
|
module_param(debug, int, 0);
|
|
|
|
MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2008-02-21 09:57:59 +07:00
|
|
|
extern const struct ethtool_ops pasemi_mac_ethtool_ops;
|
|
|
|
|
2007-10-04 01:03:54 +07:00
|
|
|
static int translation_enabled(void)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
|
|
|
|
return 1;
|
|
|
|
#else
|
|
|
|
return firmware_has_feature(FW_FEATURE_LPAR);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
static void write_iob_reg(unsigned int reg, unsigned int val)
|
2007-09-16 03:40:59 +07:00
|
|
|
{
|
2007-11-29 09:56:32 +07:00
|
|
|
pasemi_write_iob_reg(reg, val);
|
2007-09-16 03:40:59 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg)
|
2007-09-16 03:40:59 +07:00
|
|
|
{
|
2007-11-29 09:56:32 +07:00
|
|
|
return pasemi_read_mac_reg(mac->dma_if, reg);
|
2007-09-16 03:40:59 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg,
|
2007-09-16 03:40:59 +07:00
|
|
|
unsigned int val)
|
|
|
|
{
|
2007-11-29 09:56:32 +07:00
|
|
|
pasemi_write_mac_reg(mac->dma_if, reg, val);
|
2007-09-16 03:40:59 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
static unsigned int read_dma_reg(unsigned int reg)
|
2007-09-16 03:40:59 +07:00
|
|
|
{
|
2007-11-29 09:56:32 +07:00
|
|
|
return pasemi_read_dma_reg(reg);
|
2007-09-16 03:40:59 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
static void write_dma_reg(unsigned int reg, unsigned int val)
|
2007-09-16 03:40:59 +07:00
|
|
|
{
|
2007-11-29 09:56:32 +07:00
|
|
|
pasemi_write_dma_reg(reg, val);
|
2007-09-16 03:40:59 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac)
|
2007-11-29 09:54:28 +07:00
|
|
|
{
|
|
|
|
return mac->rx;
|
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac)
|
2007-11-29 09:54:28 +07:00
|
|
|
{
|
|
|
|
return mac->tx;
|
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static inline void prefetch_skb(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
const void *d = skb;
|
|
|
|
|
|
|
|
prefetch(d);
|
|
|
|
prefetch(d+64);
|
|
|
|
prefetch(d+128);
|
|
|
|
prefetch(d+192);
|
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
static int mac_to_intf(struct pasemi_mac *mac)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = mac->pdev;
|
|
|
|
u32 tmp;
|
|
|
|
int nintf, off, i, j;
|
|
|
|
int devfn = pdev->devfn;
|
|
|
|
|
|
|
|
tmp = read_dma_reg(PAS_DMA_CAP_IFI);
|
|
|
|
nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S;
|
|
|
|
off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S;
|
|
|
|
|
|
|
|
/* IOFF contains the offset to the registers containing the
|
|
|
|
* DMA interface-to-MAC-pci-id mappings, and NIN contains number
|
|
|
|
* of total interfaces. Each register contains 4 devfns.
|
|
|
|
* Just do a linear search until we find the devfn of the MAC
|
|
|
|
* we're trying to look up.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 0; i < (nintf+3)/4; i++) {
|
|
|
|
tmp = read_dma_reg(off+4*i);
|
|
|
|
for (j = 0; j < 4; j++) {
|
|
|
|
if (((tmp >> (8*j)) & 0xff) == devfn)
|
|
|
|
return i*4 + j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2008-01-24 02:56:47 +07:00
|
|
|
static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
|
|
|
|
{
|
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
|
|
|
|
flags &= ~PAS_MAC_CFG_PCFG_PE;
|
|
|
|
write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
|
|
|
|
{
|
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
|
|
|
|
flags |= PAS_MAC_CFG_PCFG_PE;
|
|
|
|
write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
|
|
|
|
}
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
static int pasemi_get_mac_addr(struct pasemi_mac *mac)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = mac->pdev;
|
|
|
|
struct device_node *dn = pci_device_to_OF_node(pdev);
|
2007-05-13 02:57:46 +07:00
|
|
|
int len;
|
2007-02-01 10:43:54 +07:00
|
|
|
const u8 *maddr;
|
2013-08-02 06:17:49 +07:00
|
|
|
u8 addr[ETH_ALEN];
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
if (!dn) {
|
|
|
|
dev_dbg(&pdev->dev,
|
|
|
|
"No device node for mac, not configuring\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2007-05-13 02:57:46 +07:00
|
|
|
maddr = of_get_property(dn, "local-mac-address", &len);
|
|
|
|
|
2013-08-02 06:17:49 +07:00
|
|
|
if (maddr && len == ETH_ALEN) {
|
|
|
|
memcpy(mac->mac_addr, maddr, ETH_ALEN);
|
2007-05-13 02:57:46 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Some old versions of firmware mistakenly uses mac-address
|
|
|
|
* (and as a string) instead of a byte array in local-mac-address.
|
|
|
|
*/
|
2007-05-08 12:48:02 +07:00
|
|
|
|
|
|
|
if (maddr == NULL)
|
2007-05-09 01:57:17 +07:00
|
|
|
maddr = of_get_property(dn, "mac-address", NULL);
|
2007-05-08 12:48:02 +07:00
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
if (maddr == NULL) {
|
|
|
|
dev_warn(&pdev->dev,
|
|
|
|
"no mac address in device tree, not configuring\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2017-12-20 01:31:03 +07:00
|
|
|
if (!mac_pton(maddr, addr)) {
|
2007-02-01 10:43:54 +07:00
|
|
|
dev_warn(&pdev->dev,
|
|
|
|
"can't parse mac address, not configuring\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-08-02 06:17:49 +07:00
|
|
|
memcpy(mac->mac_addr, addr, ETH_ALEN);
|
2007-05-13 02:57:46 +07:00
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-01-24 02:56:19 +07:00
|
|
|
static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
|
|
|
|
{
|
|
|
|
struct pasemi_mac *mac = netdev_priv(dev);
|
|
|
|
struct sockaddr *addr = p;
|
|
|
|
unsigned int adr0, adr1;
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(addr->sa_data))
|
2012-02-21 09:07:49 +07:00
|
|
|
return -EADDRNOTAVAIL;
|
2008-01-24 02:56:19 +07:00
|
|
|
|
|
|
|
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
|
|
|
|
|
|
|
adr0 = dev->dev_addr[2] << 24 |
|
|
|
|
dev->dev_addr[3] << 16 |
|
|
|
|
dev->dev_addr[4] << 8 |
|
|
|
|
dev->dev_addr[5];
|
|
|
|
adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1);
|
|
|
|
adr1 &= ~0xffff;
|
|
|
|
adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1];
|
|
|
|
|
|
|
|
pasemi_mac_intf_disable(mac);
|
|
|
|
write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0);
|
|
|
|
write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1);
|
|
|
|
pasemi_mac_intf_enable(mac);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-03 04:26:13 +07:00
|
|
|
static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
|
2007-11-29 09:57:45 +07:00
|
|
|
const int nfrags,
|
2007-10-03 04:26:13 +07:00
|
|
|
struct sk_buff *skb,
|
2007-11-29 09:56:41 +07:00
|
|
|
const dma_addr_t *dmas)
|
2007-10-03 04:26:13 +07:00
|
|
|
{
|
|
|
|
int f;
|
2007-11-29 09:56:41 +07:00
|
|
|
struct pci_dev *pdev = mac->dma_pdev;
|
2007-10-03 04:26:13 +07:00
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
|
2007-10-03 04:26:13 +07:00
|
|
|
|
|
|
|
for (f = 0; f < nfrags; f++) {
|
2011-10-19 04:00:24 +07:00
|
|
|
const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
|
2007-10-03 04:26:13 +07:00
|
|
|
|
2011-10-19 04:00:24 +07:00
|
|
|
pci_unmap_page(pdev, dmas[f+1], skb_frag_size(frag), PCI_DMA_TODEVICE);
|
2007-10-03 04:26:13 +07:00
|
|
|
}
|
|
|
|
dev_kfree_skb_irq(skb);
|
|
|
|
|
|
|
|
/* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
|
|
|
|
* aligned up to a power of 2
|
|
|
|
*/
|
|
|
|
return (nfrags + 3) & ~1;
|
|
|
|
}
|
|
|
|
|
2008-03-06 05:34:16 +07:00
|
|
|
static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac)
|
|
|
|
{
|
|
|
|
struct pasemi_mac_csring *ring;
|
|
|
|
u32 val;
|
|
|
|
unsigned int cfg;
|
|
|
|
int chno;
|
|
|
|
|
|
|
|
ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring),
|
|
|
|
offsetof(struct pasemi_mac_csring, chan));
|
|
|
|
|
|
|
|
if (!ring) {
|
|
|
|
dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n");
|
|
|
|
goto out_chan;
|
|
|
|
}
|
|
|
|
|
|
|
|
chno = ring->chan.chno;
|
|
|
|
|
|
|
|
ring->size = CS_RING_SIZE;
|
|
|
|
ring->next_to_fill = 0;
|
|
|
|
|
|
|
|
/* Allocate descriptors */
|
|
|
|
if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE))
|
|
|
|
goto out_ring_desc;
|
|
|
|
|
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
|
|
|
|
PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
|
|
|
|
val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
|
|
|
|
val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3);
|
|
|
|
|
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
|
|
|
|
|
|
|
|
ring->events[0] = pasemi_dma_alloc_flag();
|
|
|
|
ring->events[1] = pasemi_dma_alloc_flag();
|
|
|
|
if (ring->events[0] < 0 || ring->events[1] < 0)
|
|
|
|
goto out_flags;
|
|
|
|
|
|
|
|
pasemi_dma_clear_flag(ring->events[0]);
|
|
|
|
pasemi_dma_clear_flag(ring->events[1]);
|
|
|
|
|
|
|
|
ring->fun = pasemi_dma_alloc_fun();
|
|
|
|
if (ring->fun < 0)
|
|
|
|
goto out_fun;
|
|
|
|
|
|
|
|
cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP |
|
|
|
|
PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) |
|
|
|
|
PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ;
|
|
|
|
|
|
|
|
if (translation_enabled())
|
|
|
|
cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
|
|
|
|
|
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
|
|
|
|
|
|
|
|
/* enable channel */
|
|
|
|
pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
|
|
|
|
PAS_DMA_TXCHAN_TCMDSTA_DB |
|
|
|
|
PAS_DMA_TXCHAN_TCMDSTA_DE |
|
|
|
|
PAS_DMA_TXCHAN_TCMDSTA_DA);
|
|
|
|
|
|
|
|
return ring;
|
|
|
|
|
|
|
|
out_fun:
|
|
|
|
out_flags:
|
|
|
|
if (ring->events[0] >= 0)
|
|
|
|
pasemi_dma_free_flag(ring->events[0]);
|
|
|
|
if (ring->events[1] >= 0)
|
|
|
|
pasemi_dma_free_flag(ring->events[1]);
|
|
|
|
pasemi_dma_free_ring(&ring->chan);
|
|
|
|
out_ring_desc:
|
|
|
|
pasemi_dma_free_chan(&ring->chan);
|
|
|
|
out_chan:
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pasemi_mac_setup_csrings(struct pasemi_mac *mac)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
mac->cs[0] = pasemi_mac_setup_csring(mac);
|
|
|
|
if (mac->type == MAC_TYPE_XAUI)
|
|
|
|
mac->cs[1] = pasemi_mac_setup_csring(mac);
|
|
|
|
else
|
|
|
|
mac->cs[1] = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_CS; i++)
|
|
|
|
if (mac->cs[i])
|
|
|
|
mac->num_cs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring)
|
|
|
|
{
|
|
|
|
pasemi_dma_stop_chan(&csring->chan);
|
|
|
|
pasemi_dma_free_flag(csring->events[0]);
|
|
|
|
pasemi_dma_free_flag(csring->events[1]);
|
|
|
|
pasemi_dma_free_ring(&csring->chan);
|
|
|
|
pasemi_dma_free_chan(&csring->chan);
|
2008-03-25 21:58:40 +07:00
|
|
|
pasemi_dma_free_fun(csring->fun);
|
2008-03-06 05:34:16 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
|
2007-02-01 10:43:54 +07:00
|
|
|
{
|
|
|
|
struct pasemi_mac_rxring *ring;
|
|
|
|
struct pasemi_mac *mac = netdev_priv(dev);
|
2007-11-29 09:56:32 +07:00
|
|
|
int chno;
|
2007-10-04 01:03:54 +07:00
|
|
|
unsigned int cfg;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring),
|
|
|
|
offsetof(struct pasemi_mac_rxring, chan));
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
if (!ring) {
|
|
|
|
dev_err(&mac->pdev->dev, "Can't allocate RX channel\n");
|
|
|
|
goto out_chan;
|
|
|
|
}
|
|
|
|
chno = ring->chan.chno;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
spin_lock_init(&ring->lock);
|
|
|
|
|
2007-08-22 21:13:11 +07:00
|
|
|
ring->size = RX_RING_SIZE;
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:03:40 +07:00
|
|
|
ring->ring_info = kcalloc(RX_RING_SIZE,
|
|
|
|
sizeof(struct pasemi_mac_buffer),
|
|
|
|
GFP_KERNEL);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-10-03 04:25:53 +07:00
|
|
|
if (!ring->ring_info)
|
|
|
|
goto out_ring_info;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
/* Allocate descriptors */
|
2007-11-29 09:56:32 +07:00
|
|
|
if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
|
2007-10-03 04:25:53 +07:00
|
|
|
goto out_ring_desc;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2013-08-27 12:45:23 +07:00
|
|
|
ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
|
|
|
|
RX_RING_SIZE * sizeof(u64),
|
|
|
|
&ring->buf_dma, GFP_KERNEL);
|
2007-02-01 10:43:54 +07:00
|
|
|
if (!ring->buffers)
|
2007-11-29 09:56:32 +07:00
|
|
|
goto out_ring_desc;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
|
|
|
|
PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno),
|
|
|
|
PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) |
|
|
|
|
PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3));
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
cfg = PAS_DMA_RXCHAN_CFG_HBU(2);
|
2007-10-04 01:03:54 +07:00
|
|
|
|
|
|
|
if (translation_enabled())
|
|
|
|
cfg |= PAS_DMA_RXCHAN_CFG_CTR;
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if),
|
|
|
|
PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma));
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if),
|
|
|
|
PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) |
|
|
|
|
PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 |
|
2007-10-04 01:03:54 +07:00
|
|
|
PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP |
|
|
|
|
PAS_DMA_RXINT_CFG_HEN;
|
|
|
|
|
|
|
|
if (translation_enabled())
|
|
|
|
cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR;
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg);
|
2007-08-22 21:12:52 +07:00
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
ring->next_to_fill = 0;
|
|
|
|
ring->next_to_clean = 0;
|
2007-11-29 09:54:28 +07:00
|
|
|
ring->mac = mac;
|
2007-02-01 10:43:54 +07:00
|
|
|
mac->rx = ring;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2007-10-03 04:25:53 +07:00
|
|
|
out_ring_desc:
|
|
|
|
kfree(ring->ring_info);
|
|
|
|
out_ring_info:
|
2007-11-29 09:56:32 +07:00
|
|
|
pasemi_dma_free_chan(&ring->chan);
|
|
|
|
out_chan:
|
2007-02-01 10:43:54 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
static struct pasemi_mac_txring *
|
2007-11-29 09:56:41 +07:00
|
|
|
pasemi_mac_setup_tx_resources(const struct net_device *dev)
|
2007-02-01 10:43:54 +07:00
|
|
|
{
|
|
|
|
struct pasemi_mac *mac = netdev_priv(dev);
|
|
|
|
u32 val;
|
|
|
|
struct pasemi_mac_txring *ring;
|
2007-10-04 01:03:54 +07:00
|
|
|
unsigned int cfg;
|
2007-11-29 09:56:32 +07:00
|
|
|
int chno;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring),
|
|
|
|
offsetof(struct pasemi_mac_txring, chan));
|
|
|
|
|
|
|
|
if (!ring) {
|
|
|
|
dev_err(&mac->pdev->dev, "Can't allocate TX channel\n");
|
|
|
|
goto out_chan;
|
|
|
|
}
|
|
|
|
|
|
|
|
chno = ring->chan.chno;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
spin_lock_init(&ring->lock);
|
|
|
|
|
2007-08-22 21:13:11 +07:00
|
|
|
ring->size = TX_RING_SIZE;
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:03:40 +07:00
|
|
|
ring->ring_info = kcalloc(TX_RING_SIZE,
|
|
|
|
sizeof(struct pasemi_mac_buffer),
|
|
|
|
GFP_KERNEL);
|
2007-10-03 04:25:53 +07:00
|
|
|
if (!ring->ring_info)
|
|
|
|
goto out_ring_info;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
/* Allocate descriptors */
|
2007-11-29 09:56:32 +07:00
|
|
|
if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE))
|
2007-10-03 04:25:53 +07:00
|
|
|
goto out_ring_desc;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
|
|
|
|
PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
|
|
|
|
val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
|
2007-10-03 04:25:53 +07:00
|
|
|
val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-10-04 01:03:54 +07:00
|
|
|
cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
|
|
|
|
PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
|
|
|
|
PAS_DMA_TXCHAN_CFG_UP |
|
2008-03-06 05:34:16 +07:00
|
|
|
PAS_DMA_TXCHAN_CFG_WT(4);
|
2007-10-04 01:03:54 +07:00
|
|
|
|
|
|
|
if (translation_enabled())
|
|
|
|
cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-08-22 21:13:11 +07:00
|
|
|
ring->next_to_fill = 0;
|
2007-02-01 10:43:54 +07:00
|
|
|
ring->next_to_clean = 0;
|
2007-11-29 09:54:28 +07:00
|
|
|
ring->mac = mac;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
return ring;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-10-03 04:25:53 +07:00
|
|
|
out_ring_desc:
|
|
|
|
kfree(ring->ring_info);
|
|
|
|
out_ring_info:
|
2007-11-29 09:56:32 +07:00
|
|
|
pasemi_dma_free_chan(&ring->chan);
|
|
|
|
out_chan:
|
2007-11-29 09:54:28 +07:00
|
|
|
return NULL;
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
|
2007-02-01 10:43:54 +07:00
|
|
|
{
|
2007-11-29 09:54:28 +07:00
|
|
|
struct pasemi_mac_txring *txring = tx_ring(mac);
|
2007-10-03 04:26:13 +07:00
|
|
|
unsigned int i, j;
|
2007-02-01 10:43:54 +07:00
|
|
|
struct pasemi_mac_buffer *info;
|
2007-10-03 04:26:13 +07:00
|
|
|
dma_addr_t dmas[MAX_SKB_FRAGS+1];
|
2007-11-29 09:57:45 +07:00
|
|
|
int freed, nfrags;
|
2007-10-03 04:27:15 +07:00
|
|
|
int start, limit;
|
2007-10-03 04:25:53 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
start = txring->next_to_clean;
|
|
|
|
limit = txring->next_to_fill;
|
2007-10-03 04:27:15 +07:00
|
|
|
|
|
|
|
/* Compensate for when fill has wrapped and clean has not */
|
|
|
|
if (start > limit)
|
|
|
|
limit += TX_RING_SIZE;
|
|
|
|
|
|
|
|
for (i = start; i < limit; i += freed) {
|
2007-11-29 09:54:28 +07:00
|
|
|
info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)];
|
2007-10-03 04:25:53 +07:00
|
|
|
if (info->dma && info->skb) {
|
2007-11-29 09:57:45 +07:00
|
|
|
nfrags = skb_shinfo(info->skb)->nr_frags;
|
|
|
|
for (j = 0; j <= nfrags; j++)
|
2007-11-29 09:54:28 +07:00
|
|
|
dmas[j] = txring->ring_info[(i+1+j) &
|
|
|
|
(TX_RING_SIZE-1)].dma;
|
2007-11-29 09:57:45 +07:00
|
|
|
freed = pasemi_mac_unmap_tx_skb(mac, nfrags,
|
|
|
|
info->skb, dmas);
|
2013-02-24 20:01:19 +07:00
|
|
|
} else {
|
2007-10-03 04:26:13 +07:00
|
|
|
freed = 2;
|
2013-02-24 20:01:19 +07:00
|
|
|
}
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
kfree(txring->ring_info);
|
2007-11-29 09:56:32 +07:00
|
|
|
pasemi_dma_free_chan(&txring->chan);
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
2008-01-24 02:56:47 +07:00
|
|
|
static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac)
|
2007-02-01 10:43:54 +07:00
|
|
|
{
|
2007-11-29 09:54:28 +07:00
|
|
|
struct pasemi_mac_rxring *rx = rx_ring(mac);
|
2007-02-01 10:43:54 +07:00
|
|
|
unsigned int i;
|
|
|
|
struct pasemi_mac_buffer *info;
|
|
|
|
|
|
|
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
2007-11-29 09:54:28 +07:00
|
|
|
info = &RX_DESC_INFO(rx, i);
|
2007-10-03 04:25:53 +07:00
|
|
|
if (info->skb && info->dma) {
|
|
|
|
pci_unmap_single(mac->dma_pdev,
|
|
|
|
info->dma,
|
|
|
|
info->skb->len,
|
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
|
dev_kfree_skb_any(info->skb);
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
2007-10-03 04:25:53 +07:00
|
|
|
info->dma = 0;
|
|
|
|
info->skb = NULL;
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
2007-10-03 04:25:53 +07:00
|
|
|
for (i = 0; i < RX_RING_SIZE; i++)
|
2008-01-24 02:56:47 +07:00
|
|
|
RX_BUFF(rx, i) = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
|
|
|
|
{
|
|
|
|
pasemi_mac_free_rx_buffers(mac);
|
2007-10-03 04:25:53 +07:00
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
|
2007-11-29 09:54:28 +07:00
|
|
|
rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
kfree(rx_ring(mac)->ring_info);
|
2007-11-29 09:56:32 +07:00
|
|
|
pasemi_dma_free_chan(&rx_ring(mac)->chan);
|
2007-02-01 10:43:54 +07:00
|
|
|
mac->rx = NULL;
|
|
|
|
}
|
|
|
|
|
2012-05-03 07:51:46 +07:00
|
|
|
static void pasemi_mac_replenish_rx_ring(struct net_device *dev,
|
2007-11-29 09:56:41 +07:00
|
|
|
const int limit)
|
2007-02-01 10:43:54 +07:00
|
|
|
{
|
2007-11-29 09:56:41 +07:00
|
|
|
const struct pasemi_mac *mac = netdev_priv(dev);
|
2007-11-29 09:54:28 +07:00
|
|
|
struct pasemi_mac_rxring *rx = rx_ring(mac);
|
2007-10-03 04:27:57 +07:00
|
|
|
int fill, count;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-05-08 12:47:45 +07:00
|
|
|
if (limit <= 0)
|
2007-02-01 10:43:54 +07:00
|
|
|
return;
|
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
fill = rx_ring(mac)->next_to_fill;
|
2007-09-27 04:25:06 +07:00
|
|
|
for (count = 0; count < limit; count++) {
|
2007-11-29 09:54:28 +07:00
|
|
|
struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill);
|
|
|
|
u64 *buff = &RX_BUFF(rx, fill);
|
2007-02-01 10:43:54 +07:00
|
|
|
struct sk_buff *skb;
|
|
|
|
dma_addr_t dma;
|
|
|
|
|
2007-10-03 04:25:53 +07:00
|
|
|
/* Entry in use? */
|
|
|
|
WARN_ON(*buff);
|
|
|
|
|
2012-02-06 18:16:13 +07:00
|
|
|
skb = netdev_alloc_skb(dev, mac->bufsz);
|
2007-11-29 09:57:56 +07:00
|
|
|
skb_reserve(skb, LOCAL_SKB_ALIGN);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-05-08 12:47:37 +07:00
|
|
|
if (unlikely(!skb))
|
2007-02-01 10:43:54 +07:00
|
|
|
break;
|
|
|
|
|
2007-10-03 04:26:53 +07:00
|
|
|
dma = pci_map_single(mac->dma_pdev, skb->data,
|
2008-01-24 02:56:47 +07:00
|
|
|
mac->bufsz - LOCAL_SKB_ALIGN,
|
2007-02-01 10:43:54 +07:00
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
|
|
2008-07-26 09:44:49 +07:00
|
|
|
if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) {
|
2007-02-01 10:43:54 +07:00
|
|
|
dev_kfree_skb_irq(info->skb);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->skb = skb;
|
|
|
|
info->dma = dma;
|
2008-01-24 02:56:47 +07:00
|
|
|
*buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma);
|
2007-10-03 04:25:53 +07:00
|
|
|
fill++;
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
wmb();
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) &
|
2007-10-03 04:27:57 +07:00
|
|
|
(RX_RING_SIZE - 1);
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac)
|
2007-05-08 12:47:26 +07:00
|
|
|
{
|
2007-11-29 09:57:09 +07:00
|
|
|
struct pasemi_mac_rxring *rx = rx_ring(mac);
|
2007-05-13 06:01:09 +07:00
|
|
|
unsigned int reg, pcnt;
|
2007-05-08 12:47:26 +07:00
|
|
|
/* Re-enable packet count interrupts: finally
|
|
|
|
* ack the packet count interrupt we got in rx_intr.
|
|
|
|
*/
|
|
|
|
|
2007-11-29 09:57:09 +07:00
|
|
|
pcnt = *rx->chan.status & PAS_STATUS_PCNT_M;
|
2007-05-08 12:47:26 +07:00
|
|
|
|
2007-05-13 06:01:09 +07:00
|
|
|
reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
|
2007-05-08 12:47:26 +07:00
|
|
|
|
2007-11-29 09:57:09 +07:00
|
|
|
if (*rx->chan.status & PAS_STATUS_TIMER)
|
|
|
|
reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg);
|
2007-05-08 12:47:26 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac)
|
2007-05-08 12:47:26 +07:00
|
|
|
{
|
2007-05-13 06:01:09 +07:00
|
|
|
unsigned int reg, pcnt;
|
2007-05-08 12:47:26 +07:00
|
|
|
|
|
|
|
/* Re-enable packet count interrupts */
|
2007-11-29 09:56:32 +07:00
|
|
|
pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M;
|
2007-05-08 12:47:26 +07:00
|
|
|
|
2007-05-13 06:01:09 +07:00
|
|
|
reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
|
2007-05-08 12:47:26 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg);
|
2007-05-08 12:47:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac,
|
|
|
|
const u64 macrx)
|
2007-10-03 04:24:51 +07:00
|
|
|
{
|
|
|
|
unsigned int rcmdsta, ccmdsta;
|
2007-11-29 09:56:32 +07:00
|
|
|
struct pasemi_dmachan *chan = &rx_ring(mac)->chan;
|
2007-10-03 04:24:51 +07:00
|
|
|
|
|
|
|
if (!netif_msg_rx_err(mac))
|
|
|
|
return;
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
|
|
|
|
ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno));
|
2007-10-03 04:24:51 +07:00
|
|
|
|
2009-01-06 21:26:03 +07:00
|
|
|
printk(KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n",
|
2007-11-29 09:56:32 +07:00
|
|
|
macrx, *chan->status);
|
2007-10-03 04:24:51 +07:00
|
|
|
|
|
|
|
printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
|
|
|
|
rcmdsta, ccmdsta);
|
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac,
|
|
|
|
const u64 mactx)
|
2007-10-03 04:24:51 +07:00
|
|
|
{
|
|
|
|
unsigned int cmdsta;
|
2007-11-29 09:56:32 +07:00
|
|
|
struct pasemi_dmachan *chan = &tx_ring(mac)->chan;
|
2007-10-03 04:24:51 +07:00
|
|
|
|
|
|
|
if (!netif_msg_tx_err(mac))
|
|
|
|
return;
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno));
|
2007-10-03 04:24:51 +07:00
|
|
|
|
2009-01-06 21:26:03 +07:00
|
|
|
printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\
|
|
|
|
"tx status 0x%016llx\n", mactx, *chan->status);
|
2007-10-03 04:24:51 +07:00
|
|
|
|
|
|
|
printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
|
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
|
|
|
|
const int limit)
|
2007-02-01 10:43:54 +07:00
|
|
|
{
|
2007-11-29 09:56:41 +07:00
|
|
|
const struct pasemi_dmachan *chan = &rx->chan;
|
2007-11-29 09:54:28 +07:00
|
|
|
struct pasemi_mac *mac = rx->mac;
|
2007-11-29 09:56:41 +07:00
|
|
|
struct pci_dev *pdev = mac->dma_pdev;
|
2007-05-08 12:47:45 +07:00
|
|
|
unsigned int n;
|
2007-11-29 09:56:41 +07:00
|
|
|
int count, buf_index, tot_bytes, packets;
|
2007-05-08 12:47:45 +07:00
|
|
|
struct pasemi_mac_buffer *info;
|
|
|
|
struct sk_buff *skb;
|
2007-10-03 04:27:57 +07:00
|
|
|
unsigned int len;
|
2007-11-29 09:56:41 +07:00
|
|
|
u64 macrx, eval;
|
2007-05-08 12:47:45 +07:00
|
|
|
dma_addr_t dma;
|
2007-11-29 09:56:41 +07:00
|
|
|
|
|
|
|
tot_bytes = 0;
|
|
|
|
packets = 0;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
spin_lock(&rx->lock);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
n = rx->next_to_clean;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
prefetch(&RX_DESC(rx, n));
|
2007-10-03 04:27:57 +07:00
|
|
|
|
|
|
|
for (count = 0; count < limit; count++) {
|
2007-11-29 09:54:28 +07:00
|
|
|
macrx = RX_DESC(rx, n);
|
2007-11-29 09:56:41 +07:00
|
|
|
prefetch(&RX_DESC(rx, n+4));
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-10-03 04:24:51 +07:00
|
|
|
if ((macrx & XCT_MACRX_E) ||
|
2007-11-29 09:56:32 +07:00
|
|
|
(*chan->status & PAS_STATUS_ERROR))
|
2007-10-03 04:24:51 +07:00
|
|
|
pasemi_mac_rx_error(mac, macrx);
|
|
|
|
|
2007-05-08 12:47:45 +07:00
|
|
|
if (!(macrx & XCT_MACRX_O))
|
2007-02-01 10:43:54 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
info = NULL;
|
|
|
|
|
2007-10-03 04:27:57 +07:00
|
|
|
BUG_ON(!(macrx & XCT_MACRX_RR_8BRES));
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >>
|
2007-10-03 04:27:57 +07:00
|
|
|
XCT_RXRES_8B_EVAL_S;
|
|
|
|
buf_index = eval-1;
|
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M);
|
|
|
|
info = &RX_DESC_INFO(rx, buf_index);
|
2007-10-03 04:25:53 +07:00
|
|
|
|
2007-05-08 12:47:37 +07:00
|
|
|
skb = info->skb;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
prefetch_skb(skb);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-05-08 12:47:45 +07:00
|
|
|
len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2008-01-24 02:56:47 +07:00
|
|
|
pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN,
|
2007-11-29 09:56:41 +07:00
|
|
|
PCI_DMA_FROMDEVICE);
|
2007-11-07 11:21:38 +07:00
|
|
|
|
|
|
|
if (macrx & XCT_MACRX_CRC) {
|
|
|
|
/* CRC error flagged */
|
|
|
|
mac->netdev->stats.rx_errors++;
|
|
|
|
mac->netdev->stats.rx_crc_errors++;
|
2007-12-04 10:34:14 +07:00
|
|
|
/* No need to free skb, it'll be reused */
|
2007-11-07 11:21:38 +07:00
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
2007-11-29 09:57:56 +07:00
|
|
|
info->skb = NULL;
|
2007-10-03 04:27:15 +07:00
|
|
|
info->dma = 0;
|
2007-10-03 04:25:53 +07:00
|
|
|
|
2007-08-22 21:12:59 +07:00
|
|
|
if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {
|
2007-08-22 21:13:24 +07:00
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
2007-05-08 12:47:45 +07:00
|
|
|
skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
|
2007-02-01 10:43:54 +07:00
|
|
|
XCT_MACRX_CSUM_S;
|
2013-02-24 20:01:19 +07:00
|
|
|
} else {
|
2010-09-03 03:07:41 +07:00
|
|
|
skb_checksum_none_assert(skb);
|
2013-02-24 20:01:19 +07:00
|
|
|
}
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
packets++;
|
|
|
|
tot_bytes += len;
|
|
|
|
|
|
|
|
/* Don't include CRC */
|
|
|
|
skb_put(skb, len-4);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-08-22 21:12:59 +07:00
|
|
|
skb->protocol = eth_type_trans(skb, mac->netdev);
|
2016-02-16 04:25:11 +07:00
|
|
|
napi_gro_receive(&mac->napi, skb);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-07 11:21:38 +07:00
|
|
|
next:
|
2007-11-29 09:54:28 +07:00
|
|
|
RX_DESC(rx, n) = 0;
|
|
|
|
RX_DESC(rx, n+1) = 0;
|
2007-05-08 12:47:45 +07:00
|
|
|
|
2007-10-03 04:27:15 +07:00
|
|
|
/* Need to zero it out since hardware doesn't, since the
|
|
|
|
* replenish loop uses it to tell when it's done.
|
|
|
|
*/
|
2007-11-29 09:54:28 +07:00
|
|
|
RX_BUFF(rx, buf_index) = 0;
|
2007-10-03 04:27:15 +07:00
|
|
|
|
2007-10-03 04:27:57 +07:00
|
|
|
n += 4;
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
2007-10-03 04:26:30 +07:00
|
|
|
if (n > RX_RING_SIZE) {
|
|
|
|
/* Errata 5971 workaround: L2 target of headers */
|
2007-11-29 09:56:32 +07:00
|
|
|
write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0);
|
2007-10-03 04:26:30 +07:00
|
|
|
n &= (RX_RING_SIZE-1);
|
|
|
|
}
|
2007-10-03 04:27:57 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
rx_ring(mac)->next_to_clean = n;
|
2007-10-03 04:27:57 +07:00
|
|
|
|
|
|
|
/* Increase is in number of 16-byte entries, and since each descriptor
|
|
|
|
* with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
|
|
|
|
* count*2.
|
|
|
|
*/
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1);
|
2007-10-03 04:27:57 +07:00
|
|
|
|
|
|
|
pasemi_mac_replenish_rx_ring(mac->netdev, count);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
mac->netdev->stats.rx_bytes += tot_bytes;
|
|
|
|
mac->netdev->stats.rx_packets += packets;
|
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
spin_unlock(&rx_ring(mac)->lock);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2007-10-03 04:26:13 +07:00
|
|
|
/* Can't make this too large or we blow the kernel stack limits */
|
|
|
|
#define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
|
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring)
|
2007-02-01 10:43:54 +07:00
|
|
|
{
|
2007-11-29 09:56:32 +07:00
|
|
|
struct pasemi_dmachan *chan = &txring->chan;
|
2007-11-29 09:54:28 +07:00
|
|
|
struct pasemi_mac *mac = txring->mac;
|
2007-10-03 04:26:13 +07:00
|
|
|
int i, j;
|
2007-10-03 04:27:15 +07:00
|
|
|
unsigned int start, descr_count, buf_count, batch_limit;
|
|
|
|
unsigned int ring_limit;
|
2007-08-22 21:13:03 +07:00
|
|
|
unsigned int total_count;
|
2007-09-27 04:23:59 +07:00
|
|
|
unsigned long flags;
|
2007-10-03 04:26:13 +07:00
|
|
|
struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];
|
|
|
|
dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];
|
2007-11-29 09:57:45 +07:00
|
|
|
int nf[TX_CLEAN_BATCHSIZE];
|
|
|
|
int nr_frags;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-08-22 21:13:03 +07:00
|
|
|
total_count = 0;
|
2007-10-03 04:27:15 +07:00
|
|
|
batch_limit = TX_CLEAN_BATCHSIZE;
|
2007-08-22 21:13:03 +07:00
|
|
|
restart:
|
2007-11-29 09:54:28 +07:00
|
|
|
spin_lock_irqsave(&txring->lock, flags);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
start = txring->next_to_clean;
|
|
|
|
ring_limit = txring->next_to_fill;
|
2007-10-03 04:27:15 +07:00
|
|
|
|
2007-11-29 09:57:45 +07:00
|
|
|
prefetch(&TX_DESC_INFO(txring, start+1).skb);
|
|
|
|
|
2007-10-03 04:27:15 +07:00
|
|
|
/* Compensate for when fill has wrapped but clean has not */
|
|
|
|
if (start > ring_limit)
|
|
|
|
ring_limit += TX_RING_SIZE;
|
2007-08-22 21:13:03 +07:00
|
|
|
|
2007-10-03 04:26:13 +07:00
|
|
|
buf_count = 0;
|
|
|
|
descr_count = 0;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-10-03 04:26:13 +07:00
|
|
|
for (i = start;
|
2007-10-03 04:27:15 +07:00
|
|
|
descr_count < batch_limit && i < ring_limit;
|
2007-10-03 04:26:13 +07:00
|
|
|
i += buf_count) {
|
2007-11-29 09:54:28 +07:00
|
|
|
u64 mactx = TX_DESC(txring, i);
|
2007-10-03 04:27:15 +07:00
|
|
|
struct sk_buff *skb;
|
2007-10-03 04:26:13 +07:00
|
|
|
|
2007-10-03 04:25:53 +07:00
|
|
|
if ((mactx & XCT_MACTX_E) ||
|
2007-11-29 09:56:32 +07:00
|
|
|
(*chan->status & PAS_STATUS_ERROR))
|
2007-10-03 04:25:53 +07:00
|
|
|
pasemi_mac_tx_error(mac, mactx);
|
2007-10-03 04:24:51 +07:00
|
|
|
|
2008-03-06 05:34:16 +07:00
|
|
|
/* Skip over control descriptors */
|
|
|
|
if (!(mactx & XCT_MACTX_LLEN_M)) {
|
|
|
|
TX_DESC(txring, i) = 0;
|
|
|
|
TX_DESC(txring, i+1) = 0;
|
|
|
|
buf_count = 2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = TX_DESC_INFO(txring, i+1).skb;
|
|
|
|
nr_frags = TX_DESC_INFO(txring, i).dma;
|
|
|
|
|
2007-10-03 04:25:53 +07:00
|
|
|
if (unlikely(mactx & XCT_MACTX_O))
|
2007-08-22 21:13:03 +07:00
|
|
|
/* Not yet transmitted */
|
2007-02-01 10:43:54 +07:00
|
|
|
break;
|
|
|
|
|
2007-11-29 09:57:45 +07:00
|
|
|
buf_count = 2 + nr_frags;
|
|
|
|
/* Since we always fill with an even number of entries, make
|
|
|
|
* sure we skip any unused one at the end as well.
|
|
|
|
*/
|
|
|
|
if (buf_count & 1)
|
|
|
|
buf_count++;
|
2007-10-03 04:26:13 +07:00
|
|
|
|
2007-11-29 09:57:45 +07:00
|
|
|
for (j = 0; j <= nr_frags; j++)
|
2007-11-29 09:54:28 +07:00
|
|
|
dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma;
|
2007-10-03 04:26:13 +07:00
|
|
|
|
2007-11-29 09:57:45 +07:00
|
|
|
skbs[descr_count] = skb;
|
|
|
|
nf[descr_count] = nr_frags;
|
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
TX_DESC(txring, i) = 0;
|
|
|
|
TX_DESC(txring, i+1) = 0;
|
2007-10-03 04:25:53 +07:00
|
|
|
|
2007-10-03 04:26:13 +07:00
|
|
|
descr_count++;
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
2007-11-29 09:54:28 +07:00
|
|
|
txring->next_to_clean = i & (TX_RING_SIZE-1);
|
2007-10-03 04:26:13 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
spin_unlock_irqrestore(&txring->lock, flags);
|
2007-04-29 03:36:40 +07:00
|
|
|
netif_wake_queue(mac->netdev);
|
|
|
|
|
2007-10-03 04:26:13 +07:00
|
|
|
for (i = 0; i < descr_count; i++)
|
2007-11-29 09:57:45 +07:00
|
|
|
pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]);
|
2007-08-22 21:13:03 +07:00
|
|
|
|
2007-10-03 04:26:13 +07:00
|
|
|
total_count += descr_count;
|
2007-08-22 21:13:03 +07:00
|
|
|
|
|
|
|
/* If the batch was full, try to clean more */
|
2007-10-03 04:27:15 +07:00
|
|
|
if (descr_count == batch_limit)
|
2007-08-22 21:13:03 +07:00
|
|
|
goto restart;
|
|
|
|
|
|
|
|
return total_count;
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
|
|
|
|
{
|
2007-11-29 09:56:41 +07:00
|
|
|
const struct pasemi_mac_rxring *rxring = data;
|
2007-11-29 09:56:32 +07:00
|
|
|
struct pasemi_mac *mac = rxring->mac;
|
2007-11-29 09:56:41 +07:00
|
|
|
const struct pasemi_dmachan *chan = &rxring->chan;
|
2007-02-01 10:43:54 +07:00
|
|
|
unsigned int reg;
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
if (!(*chan->status & PAS_STATUS_CAUSE_M))
|
2007-02-01 10:43:54 +07:00
|
|
|
return IRQ_NONE;
|
|
|
|
|
2007-05-08 12:47:32 +07:00
|
|
|
/* Don't reset packet count so it won't fire again but clear
|
|
|
|
* all others.
|
|
|
|
*/
|
|
|
|
|
|
|
|
reg = 0;
|
2007-11-29 09:56:32 +07:00
|
|
|
if (*chan->status & PAS_STATUS_SOFT)
|
2007-05-08 12:47:32 +07:00
|
|
|
reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
|
2007-11-29 09:56:32 +07:00
|
|
|
if (*chan->status & PAS_STATUS_ERROR)
|
2007-05-08 12:47:32 +07:00
|
|
|
reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2009-01-20 07:43:59 +07:00
|
|
|
napi_schedule(&mac->napi);
|
2007-05-08 12:47:32 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:54 +07:00
|
|
|
#define TX_CLEAN_INTERVAL HZ
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
|
|
|
static void pasemi_mac_tx_timer(struct timer_list *t)
|
2007-11-29 09:56:54 +07:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
|
|
|
struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
|
2007-11-29 09:56:54 +07:00
|
|
|
struct pasemi_mac *mac = txring->mac;
|
|
|
|
|
|
|
|
pasemi_mac_clean_tx(txring);
|
|
|
|
|
|
|
|
mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL);
|
|
|
|
|
|
|
|
pasemi_mac_restart_tx_intr(mac);
|
|
|
|
}
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
|
|
|
|
{
|
2007-11-29 09:54:28 +07:00
|
|
|
struct pasemi_mac_txring *txring = data;
|
2007-11-29 09:56:41 +07:00
|
|
|
const struct pasemi_dmachan *chan = &txring->chan;
|
2007-11-29 09:56:54 +07:00
|
|
|
struct pasemi_mac *mac = txring->mac;
|
|
|
|
unsigned int reg;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
if (!(*chan->status & PAS_STATUS_CAUSE_M))
|
2007-02-01 10:43:54 +07:00
|
|
|
return IRQ_NONE;
|
|
|
|
|
2007-11-29 09:56:54 +07:00
|
|
|
reg = 0;
|
2007-05-08 12:47:32 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
if (*chan->status & PAS_STATUS_SOFT)
|
2007-05-08 12:47:32 +07:00
|
|
|
reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
|
2007-11-29 09:56:32 +07:00
|
|
|
if (*chan->status & PAS_STATUS_ERROR)
|
2007-05-08 12:47:32 +07:00
|
|
|
reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:54 +07:00
|
|
|
mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
|
|
|
|
|
2009-01-20 07:43:59 +07:00
|
|
|
napi_schedule(&mac->napi);
|
2007-11-29 09:56:54 +07:00
|
|
|
|
|
|
|
if (reg)
|
|
|
|
write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2007-05-08 12:47:54 +07:00
|
|
|
static void pasemi_adjust_link(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct pasemi_mac *mac = netdev_priv(dev);
|
|
|
|
int msg;
|
|
|
|
unsigned int flags;
|
|
|
|
unsigned int new_flags;
|
|
|
|
|
2016-07-15 04:44:52 +07:00
|
|
|
if (!dev->phydev->link) {
|
2007-05-08 12:47:54 +07:00
|
|
|
/* If no link, MAC speed settings don't matter. Just report
|
|
|
|
* link down and return.
|
|
|
|
*/
|
|
|
|
if (mac->link && netif_msg_link(mac))
|
|
|
|
printk(KERN_INFO "%s: Link is down.\n", dev->name);
|
|
|
|
|
|
|
|
netif_carrier_off(dev);
|
2007-11-29 09:58:25 +07:00
|
|
|
pasemi_mac_intf_disable(mac);
|
2007-05-08 12:47:54 +07:00
|
|
|
mac->link = 0;
|
|
|
|
|
|
|
|
return;
|
2007-11-29 09:58:25 +07:00
|
|
|
} else {
|
|
|
|
pasemi_mac_intf_enable(mac);
|
2007-05-08 12:47:54 +07:00
|
|
|
netif_carrier_on(dev);
|
2007-11-29 09:58:25 +07:00
|
|
|
}
|
2007-05-08 12:47:54 +07:00
|
|
|
|
2007-09-16 03:40:59 +07:00
|
|
|
flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
|
2007-05-08 12:47:54 +07:00
|
|
|
new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
|
|
|
|
PAS_MAC_CFG_PCFG_TSR_M);
|
|
|
|
|
2016-07-15 04:44:52 +07:00
|
|
|
if (!dev->phydev->duplex)
|
2007-05-08 12:47:54 +07:00
|
|
|
new_flags |= PAS_MAC_CFG_PCFG_HD;
|
|
|
|
|
2016-07-15 04:44:52 +07:00
|
|
|
switch (dev->phydev->speed) {
|
2007-05-08 12:47:54 +07:00
|
|
|
case 1000:
|
|
|
|
new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
|
|
|
|
PAS_MAC_CFG_PCFG_TSR_1G;
|
|
|
|
break;
|
|
|
|
case 100:
|
|
|
|
new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |
|
|
|
|
PAS_MAC_CFG_PCFG_TSR_100M;
|
|
|
|
break;
|
|
|
|
case 10:
|
|
|
|
new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |
|
|
|
|
PAS_MAC_CFG_PCFG_TSR_10M;
|
|
|
|
break;
|
|
|
|
default:
|
2016-07-15 04:44:52 +07:00
|
|
|
printk("Unsupported speed %d\n", dev->phydev->speed);
|
2007-05-08 12:47:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Print on link or speed/duplex change */
|
2016-07-15 04:44:52 +07:00
|
|
|
msg = mac->link != dev->phydev->link || flags != new_flags;
|
2007-05-08 12:47:54 +07:00
|
|
|
|
2016-07-15 04:44:52 +07:00
|
|
|
mac->duplex = dev->phydev->duplex;
|
|
|
|
mac->speed = dev->phydev->speed;
|
|
|
|
mac->link = dev->phydev->link;
|
2007-05-08 12:47:54 +07:00
|
|
|
|
|
|
|
if (new_flags != flags)
|
2007-09-16 03:40:59 +07:00
|
|
|
write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
|
2007-05-08 12:47:54 +07:00
|
|
|
|
|
|
|
if (msg && netif_msg_link(mac))
|
|
|
|
printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",
|
|
|
|
dev->name, mac->speed, mac->duplex ? "full" : "half");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pasemi_mac_phy_init(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct pasemi_mac *mac = netdev_priv(dev);
|
|
|
|
struct device_node *dn, *phy_dn;
|
|
|
|
struct phy_device *phydev;
|
|
|
|
|
|
|
|
dn = pci_device_to_OF_node(mac->pdev);
|
2009-04-25 19:53:17 +07:00
|
|
|
phy_dn = of_parse_phandle(dn, "phy-handle", 0);
|
2007-05-08 12:47:54 +07:00
|
|
|
of_node_put(phy_dn);
|
|
|
|
|
|
|
|
mac->link = 0;
|
|
|
|
mac->speed = 0;
|
|
|
|
mac->duplex = -1;
|
|
|
|
|
2009-04-25 19:53:17 +07:00
|
|
|
phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
|
|
|
|
PHY_INTERFACE_MODE_SGMII);
|
2007-05-08 12:47:54 +07:00
|
|
|
|
2012-09-27 02:51:58 +07:00
|
|
|
if (!phydev) {
|
2007-05-08 12:47:54 +07:00
|
|
|
printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
|
2012-09-27 02:51:58 +07:00
|
|
|
return -ENODEV;
|
2007-05-08 12:47:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
static int pasemi_mac_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct pasemi_mac *mac = netdev_priv(dev);
|
|
|
|
unsigned int flags;
|
2008-02-21 09:57:59 +07:00
|
|
|
int i, ret;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
|
|
|
|
PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
|
|
|
|
PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
|
|
|
|
|
2007-09-16 03:40:59 +07:00
|
|
|
write_mac_reg(mac, PAS_MAC_CFG_TXP, flags);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
ret = pasemi_mac_setup_rx_resources(dev);
|
|
|
|
if (ret)
|
|
|
|
goto out_rx_resources;
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
mac->tx = pasemi_mac_setup_tx_resources(dev);
|
2007-11-29 09:54:28 +07:00
|
|
|
|
|
|
|
if (!mac->tx)
|
|
|
|
goto out_tx_ring;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2008-03-25 21:58:40 +07:00
|
|
|
/* We might already have allocated rings in case mtu was changed
|
|
|
|
* before interface was brought up.
|
|
|
|
*/
|
|
|
|
if (dev->mtu > 1500 && !mac->num_cs) {
|
2008-03-06 05:34:16 +07:00
|
|
|
pasemi_mac_setup_csrings(mac);
|
|
|
|
if (!mac->num_cs)
|
|
|
|
goto out_tx_ring;
|
|
|
|
}
|
|
|
|
|
2008-02-21 09:57:59 +07:00
|
|
|
/* Zero out rmon counters */
|
|
|
|
for (i = 0; i < 32; i++)
|
|
|
|
write_mac_reg(mac, PAS_MAC_RMON(i), 0);
|
|
|
|
|
2007-11-29 09:57:09 +07:00
|
|
|
/* 0x3ff with 33MHz clock is about 31us */
|
|
|
|
write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG,
|
|
|
|
PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff));
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno),
|
2007-11-29 09:57:27 +07:00
|
|
|
PAS_IOB_DMA_RXCH_CFG_CNTTH(256));
|
2007-11-29 09:56:32 +07:00
|
|
|
|
|
|
|
write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno),
|
2007-11-29 09:56:54 +07:00
|
|
|
PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
|
2007-11-29 09:56:32 +07:00
|
|
|
|
2007-09-16 03:40:59 +07:00
|
|
|
write_mac_reg(mac, PAS_MAC_IPC_CHNL,
|
2007-11-29 09:56:32 +07:00
|
|
|
PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) |
|
|
|
|
PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno));
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
/* enable rx if */
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
|
|
|
|
PAS_DMA_RXINT_RCMDSTA_EN |
|
|
|
|
PAS_DMA_RXINT_RCMDSTA_DROPS_M |
|
|
|
|
PAS_DMA_RXINT_RCMDSTA_BP |
|
|
|
|
PAS_DMA_RXINT_RCMDSTA_OO |
|
|
|
|
PAS_DMA_RXINT_RCMDSTA_BT);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
/* enable rx channel */
|
2007-11-29 09:56:32 +07:00
|
|
|
pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU |
|
|
|
|
PAS_DMA_RXCHAN_CCMDSTA_OD |
|
|
|
|
PAS_DMA_RXCHAN_CCMDSTA_FD |
|
|
|
|
PAS_DMA_RXCHAN_CCMDSTA_DT);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
/* enable tx channel */
|
2007-11-29 09:56:32 +07:00
|
|
|
pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
|
|
|
|
PAS_DMA_TXCHAN_TCMDSTA_DB |
|
|
|
|
PAS_DMA_TXCHAN_TCMDSTA_DE |
|
|
|
|
PAS_DMA_TXCHAN_TCMDSTA_DA);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-09-27 04:25:06 +07:00
|
|
|
pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno),
|
|
|
|
RX_RING_SIZE>>1);
|
2007-10-03 04:27:57 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
/* Clear out any residual packet count state from firmware */
|
|
|
|
pasemi_mac_restart_rx_intr(mac);
|
|
|
|
pasemi_mac_restart_tx_intr(mac);
|
|
|
|
|
2007-11-29 09:58:25 +07:00
|
|
|
flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
|
2007-09-27 04:24:42 +07:00
|
|
|
|
|
|
|
if (mac->type == MAC_TYPE_GMAC)
|
|
|
|
flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
|
|
|
|
else
|
|
|
|
flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G;
|
|
|
|
|
|
|
|
/* Enable interface in MAC */
|
|
|
|
write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
|
|
|
|
|
2007-05-08 12:47:54 +07:00
|
|
|
ret = pasemi_mac_phy_init(dev);
|
2007-11-29 09:58:25 +07:00
|
|
|
if (ret) {
|
|
|
|
/* Since we won't get link notification, just enable RX */
|
|
|
|
pasemi_mac_intf_enable(mac);
|
|
|
|
if (mac->type == MAC_TYPE_GMAC) {
|
|
|
|
/* Warn for missing PHY on SGMII (1Gig) ports */
|
|
|
|
dev_warn(&mac->pdev->dev,
|
|
|
|
"PHY init failed: %d.\n", ret);
|
|
|
|
dev_warn(&mac->pdev->dev,
|
|
|
|
"Defaulting to 1Gbit full duplex\n");
|
|
|
|
}
|
2007-11-29 09:58:06 +07:00
|
|
|
}
|
2007-05-08 12:47:54 +07:00
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
netif_start_queue(dev);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
napi_enable(&mac->napi);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
|
|
|
|
dev->name);
|
2007-05-08 12:47:21 +07:00
|
|
|
|
2013-09-13 11:21:24 +07:00
|
|
|
ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0,
|
2007-11-29 09:54:28 +07:00
|
|
|
mac->tx_irq_name, mac->tx);
|
2007-02-01 10:43:54 +07:00
|
|
|
if (ret) {
|
|
|
|
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
|
2007-11-29 09:56:32 +07:00
|
|
|
mac->tx->chan.irq, ret);
|
2007-02-01 10:43:54 +07:00
|
|
|
goto out_tx_int;
|
|
|
|
}
|
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
|
|
|
|
dev->name);
|
|
|
|
|
2013-09-13 11:21:24 +07:00
|
|
|
ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, 0,
|
2007-11-29 09:56:32 +07:00
|
|
|
mac->rx_irq_name, mac->rx);
|
2007-02-01 10:43:54 +07:00
|
|
|
if (ret) {
|
|
|
|
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
|
2007-11-29 09:56:32 +07:00
|
|
|
mac->rx->chan.irq, ret);
|
2007-02-01 10:43:54 +07:00
|
|
|
goto out_rx_int;
|
|
|
|
}
|
|
|
|
|
2016-07-15 04:44:52 +07:00
|
|
|
if (dev->phydev)
|
|
|
|
phy_start(dev->phydev);
|
2007-05-08 12:47:54 +07:00
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
|
|
|
timer_setup(&mac->tx->clean_timer, pasemi_mac_tx_timer, 0);
|
2015-02-28 01:50:59 +07:00
|
|
|
mod_timer(&mac->tx->clean_timer, jiffies + HZ);
|
2007-11-29 09:56:54 +07:00
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_rx_int:
|
2007-11-29 09:56:32 +07:00
|
|
|
free_irq(mac->tx->chan.irq, mac->tx);
|
2007-02-01 10:43:54 +07:00
|
|
|
out_tx_int:
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
napi_disable(&mac->napi);
|
2007-02-01 10:43:54 +07:00
|
|
|
netif_stop_queue(dev);
|
2007-11-29 09:54:28 +07:00
|
|
|
out_tx_ring:
|
|
|
|
if (mac->tx)
|
|
|
|
pasemi_mac_free_tx_resources(mac);
|
|
|
|
pasemi_mac_free_rx_resources(mac);
|
2007-02-01 10:43:54 +07:00
|
|
|
out_rx_resources:
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_RETRIES 5000
|
|
|
|
|
2008-01-24 02:56:47 +07:00
|
|
|
static void pasemi_mac_pause_txchan(struct pasemi_mac *mac)
|
|
|
|
{
|
|
|
|
unsigned int sta, retries;
|
|
|
|
int txch = tx_ring(mac)->chan.chno;
|
|
|
|
|
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
|
|
|
|
PAS_DMA_TXCHAN_TCMDSTA_ST);
|
|
|
|
|
|
|
|
for (retries = 0; retries < MAX_RETRIES; retries++) {
|
|
|
|
sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
|
|
|
|
if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
|
|
|
|
break;
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
|
|
|
|
dev_err(&mac->dma_pdev->dev,
|
|
|
|
"Failed to stop tx channel, tcmdsta %08x\n", sta);
|
|
|
|
|
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac)
|
|
|
|
{
|
|
|
|
unsigned int sta, retries;
|
|
|
|
int rxch = rx_ring(mac)->chan.chno;
|
|
|
|
|
|
|
|
write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
|
|
|
|
PAS_DMA_RXCHAN_CCMDSTA_ST);
|
|
|
|
for (retries = 0; retries < MAX_RETRIES; retries++) {
|
|
|
|
sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
|
|
|
|
if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
|
|
|
|
break;
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
|
|
|
|
dev_err(&mac->dma_pdev->dev,
|
|
|
|
"Failed to stop rx channel, ccmdsta 08%x\n", sta);
|
|
|
|
write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pasemi_mac_pause_rxint(struct pasemi_mac *mac)
|
|
|
|
{
|
|
|
|
unsigned int sta, retries;
|
|
|
|
|
|
|
|
write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
|
|
|
|
PAS_DMA_RXINT_RCMDSTA_ST);
|
|
|
|
for (retries = 0; retries < MAX_RETRIES; retries++) {
|
|
|
|
sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
|
|
|
|
if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
|
|
|
|
break;
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
|
|
|
|
dev_err(&mac->dma_pdev->dev,
|
|
|
|
"Failed to stop rx interface, rcmdsta %08x\n", sta);
|
|
|
|
write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
|
|
|
|
}
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
static int pasemi_mac_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct pasemi_mac *mac = netdev_priv(dev);
|
2007-10-03 04:27:39 +07:00
|
|
|
unsigned int sta;
|
2008-03-06 05:34:16 +07:00
|
|
|
int rxch, txch, i;
|
2007-11-29 09:56:32 +07:00
|
|
|
|
|
|
|
rxch = rx_ring(mac)->chan.chno;
|
|
|
|
txch = tx_ring(mac)->chan.chno;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2016-07-15 04:44:52 +07:00
|
|
|
if (dev->phydev) {
|
|
|
|
phy_stop(dev->phydev);
|
|
|
|
phy_disconnect(dev->phydev);
|
2007-05-08 12:47:54 +07:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:56:54 +07:00
|
|
|
del_timer_sync(&mac->tx->clean_timer);
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
netif_stop_queue(dev);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
napi_disable(&mac->napi);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
|
2007-10-03 04:27:39 +07:00
|
|
|
if (sta & (PAS_DMA_RXINT_RCMDSTA_BP |
|
|
|
|
PAS_DMA_RXINT_RCMDSTA_OO |
|
|
|
|
PAS_DMA_RXINT_RCMDSTA_BT))
|
|
|
|
printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta);
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
|
2007-10-03 04:27:39 +07:00
|
|
|
if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU |
|
|
|
|
PAS_DMA_RXCHAN_CCMDSTA_OD |
|
|
|
|
PAS_DMA_RXCHAN_CCMDSTA_FD |
|
|
|
|
PAS_DMA_RXCHAN_CCMDSTA_DT))
|
|
|
|
printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta);
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
|
2007-11-29 09:54:28 +07:00
|
|
|
if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB |
|
|
|
|
PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA))
|
2007-10-03 04:27:39 +07:00
|
|
|
printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta);
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
/* Clean out any pending buffers */
|
2007-11-29 09:54:28 +07:00
|
|
|
pasemi_mac_clean_tx(tx_ring(mac));
|
|
|
|
pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2008-01-24 02:56:47 +07:00
|
|
|
pasemi_mac_pause_txchan(mac);
|
|
|
|
pasemi_mac_pause_rxint(mac);
|
|
|
|
pasemi_mac_pause_rxchan(mac);
|
2008-01-24 02:57:19 +07:00
|
|
|
pasemi_mac_intf_disable(mac);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
free_irq(mac->tx->chan.irq, mac->tx);
|
|
|
|
free_irq(mac->rx->chan.irq, mac->rx);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2008-03-25 21:58:40 +07:00
|
|
|
for (i = 0; i < mac->num_cs; i++) {
|
2008-03-06 05:34:16 +07:00
|
|
|
pasemi_mac_free_csring(mac->cs[i]);
|
2008-03-25 21:58:40 +07:00
|
|
|
mac->cs[i] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
mac->num_cs = 0;
|
2008-03-06 05:34:16 +07:00
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
/* Free resources */
|
2007-11-29 09:54:28 +07:00
|
|
|
pasemi_mac_free_rx_resources(mac);
|
|
|
|
pasemi_mac_free_tx_resources(mac);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-03-06 05:34:16 +07:00
|
|
|
static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
|
|
|
|
const dma_addr_t *map,
|
|
|
|
const unsigned int *map_size,
|
|
|
|
struct pasemi_mac_txring *txring,
|
|
|
|
struct pasemi_mac_csring *csring)
|
|
|
|
{
|
|
|
|
u64 fund;
|
|
|
|
dma_addr_t cs_dest;
|
|
|
|
const int nh_off = skb_network_offset(skb);
|
|
|
|
const int nh_len = skb_network_header_len(skb);
|
|
|
|
const int nfrags = skb_shinfo(skb)->nr_frags;
|
|
|
|
int cs_size, i, fill, hdr, cpyhdr, evt;
|
|
|
|
dma_addr_t csdma;
|
|
|
|
|
|
|
|
fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
|
|
|
|
XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
|
|
|
|
XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) |
|
|
|
|
XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE;
|
|
|
|
|
|
|
|
switch (ip_hdr(skb)->protocol) {
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
fund |= XCT_FUN_SIG_TCP4;
|
|
|
|
/* TCP checksum is 16 bytes into the header */
|
|
|
|
cs_dest = map[0] + skb_transport_offset(skb) + 16;
|
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
fund |= XCT_FUN_SIG_UDP4;
|
|
|
|
/* UDP checksum is 6 bytes into the header */
|
|
|
|
cs_dest = map[0] + skb_transport_offset(skb) + 6;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do the checksum offloaded */
|
|
|
|
fill = csring->next_to_fill;
|
|
|
|
hdr = fill;
|
|
|
|
|
|
|
|
CS_DESC(csring, fill++) = fund;
|
|
|
|
/* Room for 8BRES. Checksum result is really 2 bytes into it */
|
|
|
|
csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2;
|
|
|
|
CS_DESC(csring, fill++) = 0;
|
|
|
|
|
|
|
|
CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off);
|
|
|
|
for (i = 1; i <= nfrags; i++)
|
|
|
|
CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
|
|
|
|
|
|
|
|
fill += i;
|
|
|
|
if (fill & 1)
|
|
|
|
fill++;
|
|
|
|
|
|
|
|
/* Copy the result into the TCP packet */
|
|
|
|
cpyhdr = fill;
|
|
|
|
CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
|
|
|
|
XCT_FUN_LLEN(2) | XCT_FUN_SE;
|
|
|
|
CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
|
|
|
|
CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma);
|
|
|
|
fill++;
|
|
|
|
|
|
|
|
evt = !csring->last_event;
|
|
|
|
csring->last_event = evt;
|
|
|
|
|
|
|
|
/* Event handshaking with MAC TX */
|
|
|
|
CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
|
|
|
|
CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]);
|
|
|
|
CS_DESC(csring, fill++) = 0;
|
|
|
|
CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
|
|
|
|
CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]);
|
|
|
|
CS_DESC(csring, fill++) = 0;
|
|
|
|
csring->next_to_fill = fill & (CS_RING_SIZE-1);
|
|
|
|
|
|
|
|
cs_size = fill - hdr;
|
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1);
|
|
|
|
|
|
|
|
/* TX-side event handshaking */
|
|
|
|
fill = txring->next_to_fill;
|
|
|
|
TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
|
|
|
|
CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]);
|
|
|
|
TX_DESC(txring, fill++) = 0;
|
|
|
|
TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
|
|
|
|
CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]);
|
|
|
|
TX_DESC(txring, fill++) = 0;
|
|
|
|
txring->next_to_fill = fill;
|
|
|
|
|
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
|
|
|
|
}
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
2008-03-06 05:34:16 +07:00
|
|
|
struct pasemi_mac * const mac = netdev_priv(dev);
|
|
|
|
struct pasemi_mac_txring * const txring = tx_ring(mac);
|
|
|
|
struct pasemi_mac_csring *csring;
|
|
|
|
u64 dflags = 0;
|
|
|
|
u64 mactx;
|
2007-10-03 04:26:13 +07:00
|
|
|
dma_addr_t map[MAX_SKB_FRAGS+1];
|
|
|
|
unsigned int map_size[MAX_SKB_FRAGS+1];
|
2007-09-27 04:23:59 +07:00
|
|
|
unsigned long flags;
|
2007-10-03 04:26:13 +07:00
|
|
|
int i, nfrags;
|
2007-11-29 09:56:41 +07:00
|
|
|
int fill;
|
2008-03-06 05:34:16 +07:00
|
|
|
const int nh_off = skb_network_offset(skb);
|
|
|
|
const int nh_len = skb_network_header_len(skb);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2008-03-06 05:34:16 +07:00
|
|
|
prefetch(&txring->ring_info);
|
2007-04-11 10:50:43 +07:00
|
|
|
|
2008-03-06 05:34:16 +07:00
|
|
|
dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-10-03 04:26:13 +07:00
|
|
|
nfrags = skb_shinfo(skb)->nr_frags;
|
|
|
|
|
|
|
|
map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
|
|
|
|
PCI_DMA_TODEVICE);
|
|
|
|
map_size[0] = skb_headlen(skb);
|
2008-07-26 09:44:49 +07:00
|
|
|
if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
|
2007-10-03 04:26:13 +07:00
|
|
|
goto out_err_nolock;
|
|
|
|
|
|
|
|
for (i = 0; i < nfrags; i++) {
|
|
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2011-08-31 07:47:01 +07:00
|
|
|
map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0,
|
2011-10-19 04:00:24 +07:00
|
|
|
skb_frag_size(frag), DMA_TO_DEVICE);
|
|
|
|
map_size[i+1] = skb_frag_size(frag);
|
2011-10-06 17:10:48 +07:00
|
|
|
if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) {
|
2007-10-03 04:26:13 +07:00
|
|
|
nfrags = i;
|
|
|
|
goto out_err_nolock;
|
|
|
|
}
|
|
|
|
}
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2008-03-06 05:34:16 +07:00
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) {
|
|
|
|
switch (ip_hdr(skb)->protocol) {
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
dflags |= XCT_MACTX_CSUM_TCP;
|
|
|
|
dflags |= XCT_MACTX_IPH(nh_len >> 2);
|
|
|
|
dflags |= XCT_MACTX_IPO(nh_off);
|
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
dflags |= XCT_MACTX_CSUM_UDP;
|
|
|
|
dflags |= XCT_MACTX_IPH(nh_len >> 2);
|
|
|
|
dflags |= XCT_MACTX_IPO(nh_off);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
}
|
2007-08-22 21:12:59 +07:00
|
|
|
|
2008-03-06 05:34:16 +07:00
|
|
|
mactx = dflags | XCT_MACTX_LLEN(skb->len);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(&txring->lock, flags);
|
|
|
|
|
2007-10-03 04:27:15 +07:00
|
|
|
/* Avoid stepping on the same cache line that the DMA controller
|
|
|
|
* is currently about to send, so leave at least 8 words available.
|
|
|
|
* Total free space needed is mactx + fragments + 8
|
|
|
|
*/
|
2008-03-06 05:34:16 +07:00
|
|
|
if (RING_AVAIL(txring) < nfrags + 14) {
|
2007-10-03 04:27:15 +07:00
|
|
|
/* no room -- stop the queue and wait for tx intr */
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
goto out_err;
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
2008-03-06 05:34:16 +07:00
|
|
|
/* Queue up checksum + event descriptors, if needed */
|
|
|
|
if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) {
|
|
|
|
csring = mac->cs[mac->last_cs];
|
|
|
|
mac->last_cs = (mac->last_cs + 1) % mac->num_cs;
|
|
|
|
|
|
|
|
pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring);
|
|
|
|
}
|
|
|
|
|
|
|
|
fill = txring->next_to_fill;
|
2007-11-29 09:56:41 +07:00
|
|
|
TX_DESC(txring, fill) = mactx;
|
2007-11-29 09:57:45 +07:00
|
|
|
TX_DESC_INFO(txring, fill).dma = nfrags;
|
2007-11-29 09:56:41 +07:00
|
|
|
fill++;
|
|
|
|
TX_DESC_INFO(txring, fill).skb = skb;
|
2007-10-03 04:26:13 +07:00
|
|
|
for (i = 0; i <= nfrags; i++) {
|
2007-11-29 09:56:41 +07:00
|
|
|
TX_DESC(txring, fill+i) =
|
2007-11-29 09:54:28 +07:00
|
|
|
XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
|
2007-11-29 09:56:41 +07:00
|
|
|
TX_DESC_INFO(txring, fill+i).dma = map[i];
|
2007-10-03 04:26:13 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We have to add an even number of 8-byte entries to the ring
|
|
|
|
* even if the last one is unused. That means always an odd number
|
|
|
|
* of pointers + one mactx descriptor.
|
|
|
|
*/
|
|
|
|
if (nfrags & 1)
|
|
|
|
nfrags++;
|
2007-10-03 04:25:53 +07:00
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-10-04 07:41:50 +07:00
|
|
|
dev->stats.tx_packets++;
|
|
|
|
dev->stats.tx_bytes += skb->len;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&txring->lock, flags);
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
spin_unlock_irqrestore(&txring->lock, flags);
|
2007-10-03 04:26:13 +07:00
|
|
|
out_err_nolock:
|
|
|
|
while (nfrags--)
|
|
|
|
pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags],
|
|
|
|
PCI_DMA_TODEVICE);
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pasemi_mac_set_rx_mode(struct net_device *dev)
|
|
|
|
{
|
2007-11-29 09:56:41 +07:00
|
|
|
const struct pasemi_mac *mac = netdev_priv(dev);
|
2007-02-01 10:43:54 +07:00
|
|
|
unsigned int flags;
|
|
|
|
|
2007-09-16 03:40:59 +07:00
|
|
|
flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
/* Set promiscuous */
|
|
|
|
if (dev->flags & IFF_PROMISC)
|
|
|
|
flags |= PAS_MAC_CFG_PCFG_PR;
|
|
|
|
else
|
|
|
|
flags &= ~PAS_MAC_CFG_PCFG_PR;
|
|
|
|
|
2007-09-16 03:40:59 +07:00
|
|
|
write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
static int pasemi_mac_poll(struct napi_struct *napi, int budget)
|
2007-02-01 10:43:54 +07:00
|
|
|
{
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
|
|
|
|
int pkts;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:54:28 +07:00
|
|
|
pasemi_mac_clean_tx(tx_ring(mac));
|
|
|
|
pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
if (pkts < budget) {
|
2007-02-01 10:43:54 +07:00
|
|
|
/* all done, no more packets present */
|
2017-01-30 23:22:01 +07:00
|
|
|
napi_complete_done(napi, pkts);
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-05-08 12:47:26 +07:00
|
|
|
pasemi_mac_restart_rx_intr(mac);
|
2007-11-29 09:56:54 +07:00
|
|
|
pasemi_mac_restart_tx_intr(mac);
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
return pkts;
|
2007-02-01 10:43:54 +07:00
|
|
|
}
|
|
|
|
|
2008-03-22 05:02:42 +07:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
/*
|
|
|
|
* Polling 'interrupt' - used by things like netconsole to send skbs
|
|
|
|
* without having to re-enable interrupts. It's not called while
|
|
|
|
* the interrupt routine is executing.
|
|
|
|
*/
|
|
|
|
static void pasemi_mac_netpoll(struct net_device *dev)
|
|
|
|
{
|
|
|
|
const struct pasemi_mac *mac = netdev_priv(dev);
|
|
|
|
|
|
|
|
disable_irq(mac->tx->chan.irq);
|
|
|
|
pasemi_mac_tx_intr(mac->tx->chan.irq, mac->tx);
|
|
|
|
enable_irq(mac->tx->chan.irq);
|
|
|
|
|
|
|
|
disable_irq(mac->rx->chan.irq);
|
|
|
|
pasemi_mac_rx_intr(mac->rx->chan.irq, mac->rx);
|
|
|
|
enable_irq(mac->rx->chan.irq);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-01-24 02:56:47 +07:00
|
|
|
static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
|
{
|
|
|
|
struct pasemi_mac *mac = netdev_priv(dev);
|
|
|
|
unsigned int reg;
|
2008-03-06 05:34:16 +07:00
|
|
|
unsigned int rcmdsta = 0;
|
2008-01-24 02:56:47 +07:00
|
|
|
int running;
|
2008-03-06 05:34:16 +07:00
|
|
|
int ret = 0;
|
2008-01-24 02:56:47 +07:00
|
|
|
|
|
|
|
running = netif_running(dev);
|
|
|
|
|
|
|
|
if (running) {
|
|
|
|
/* Need to stop the interface, clean out all already
|
|
|
|
* received buffers, free all unused buffers on the RX
|
|
|
|
* interface ring, then finally re-fill the rx ring with
|
|
|
|
* the new-size buffers and restart.
|
|
|
|
*/
|
|
|
|
|
|
|
|
napi_disable(&mac->napi);
|
|
|
|
netif_tx_disable(dev);
|
|
|
|
pasemi_mac_intf_disable(mac);
|
|
|
|
|
|
|
|
rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
|
|
|
|
pasemi_mac_pause_rxint(mac);
|
|
|
|
pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
|
|
|
|
pasemi_mac_free_rx_buffers(mac);
|
2008-03-06 05:34:16 +07:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup checksum channels if large MTU and none already allocated */
|
ethernet: use core min/max MTU checking
et131x: min_mtu 64, max_mtu 9216
altera_tse: min_mtu 64, max_mtu 1500
amd8111e: min_mtu 60, max_mtu 9000
bnad: min_mtu 46, max_mtu 9000
macb: min_mtu 68, max_mtu 1500 or 10240 depending on hardware capability
xgmac: min_mtu 46, max_mtu 9000
cxgb2: min_mtu 68, max_mtu 9582 (pm3393) or 9600 (vsc7326)
enic: min_mtu 68, max_mtu 9000
gianfar: min_mtu 50, max_mu 9586
hns_enet: min_mtu 68, max_mtu 9578 (v1) or 9706 (v2)
ksz884x: min_mtu 60, max_mtu 1894
myri10ge: min_mtu 68, max_mtu 9000
natsemi: min_mtu 64, max_mtu 2024
nfp: min_mtu 68, max_mtu hardware-specific
forcedeth: min_mtu 64, max_mtu 1500 or 9100, depending on hardware
pch_gbe: min_mtu 46, max_mtu 10300
pasemi_mac: min_mtu 64, max_mtu 9000
qcaspi: min_mtu 46, max_mtu 1500
- remove qcaspi_netdev_change_mtu as it is now redundant
rocker: min_mtu 68, max_mtu 9000
sxgbe: min_mtu 68, max_mtu 9000
stmmac: min_mtu 46, max_mtu depends on hardware
tehuti: min_mtu 60, max_mtu 16384
- driver had no max mtu checking, but product docs say 16k jumbo packets
are supported by the hardware
netcp: min_mtu 68, max_mtu 9486
- remove netcp_ndo_change_mtu as it is now redundant
via-velocity: min_mtu 64, max_mtu 9000
octeon: min_mtu 46, max_mtu 65370
CC: netdev@vger.kernel.org
CC: Mark Einon <mark.einon@gmail.com>
CC: Vince Bridgers <vbridger@opensource.altera.com>
CC: Rasesh Mody <rasesh.mody@qlogic.com>
CC: Nicolas Ferre <nicolas.ferre@atmel.com>
CC: Santosh Raspatur <santosh@chelsio.com>
CC: Hariprasad S <hariprasad@chelsio.com>
CC: Christian Benvenuti <benve@cisco.com>
CC: Sujith Sankar <ssujith@cisco.com>
CC: Govindarajulu Varadarajan <_govind@gmx.com>
CC: Neel Patel <neepatel@cisco.com>
CC: Claudiu Manoil <claudiu.manoil@freescale.com>
CC: Yisen Zhuang <yisen.zhuang@huawei.com>
CC: Salil Mehta <salil.mehta@huawei.com>
CC: Hyong-Youb Kim <hykim@myri.com>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Olof Johansson <olof@lixom.net>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Byungho An <bh74.an@samsung.com>
CC: Girish K S <ks.giri@samsung.com>
CC: Vipul Pandya <vipul.pandya@samsung.com>
CC: Giuseppe Cavallaro <peppe.cavallaro@st.com>
CC: Alexandre Torgue <alexandre.torgue@st.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Wingman Kwok <w-kwok2@ti.com>
CC: Murali Karicheri <m-karicheri2@ti.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-18 02:54:17 +07:00
|
|
|
if (new_mtu > PE_DEF_MTU && !mac->num_cs) {
|
2008-03-06 05:34:16 +07:00
|
|
|
pasemi_mac_setup_csrings(mac);
|
|
|
|
if (!mac->num_cs) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-01-24 02:56:47 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Change maxf, i.e. what size frames are accepted.
|
|
|
|
* Need room for ethernet header and CRC word
|
|
|
|
*/
|
|
|
|
reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG);
|
|
|
|
reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M;
|
|
|
|
reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
|
|
|
|
write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
|
|
|
|
|
|
|
|
dev->mtu = new_mtu;
|
|
|
|
/* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
|
|
|
|
mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
|
|
|
|
|
2008-03-06 05:34:16 +07:00
|
|
|
out:
|
2008-01-24 02:56:47 +07:00
|
|
|
if (running) {
|
|
|
|
write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
|
|
|
|
rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
|
|
|
|
|
|
|
|
rx_ring(mac)->next_to_fill = 0;
|
|
|
|
pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1);
|
|
|
|
|
|
|
|
napi_enable(&mac->napi);
|
|
|
|
netif_start_queue(dev);
|
|
|
|
pasemi_mac_intf_enable(mac);
|
|
|
|
}
|
|
|
|
|
2008-03-06 05:34:16 +07:00
|
|
|
return ret;
|
2008-01-24 02:56:47 +07:00
|
|
|
}
|
|
|
|
|
2009-04-15 19:52:54 +07:00
|
|
|
static const struct net_device_ops pasemi_netdev_ops = {
|
|
|
|
.ndo_open = pasemi_mac_open,
|
|
|
|
.ndo_stop = pasemi_mac_close,
|
|
|
|
.ndo_start_xmit = pasemi_mac_start_tx,
|
2011-08-16 13:29:01 +07:00
|
|
|
.ndo_set_rx_mode = pasemi_mac_set_rx_mode,
|
2009-04-15 19:52:54 +07:00
|
|
|
.ndo_set_mac_address = pasemi_mac_set_mac_addr,
|
|
|
|
.ndo_change_mtu = pasemi_mac_change_mtu,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = pasemi_mac_netpoll,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2012-12-03 21:24:00 +07:00
|
|
|
static int
|
2007-02-01 10:43:54 +07:00
|
|
|
pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct pasemi_mac *mac;
|
2009-04-23 15:53:20 +07:00
|
|
|
int err, ret;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
err = pci_enable_device(pdev);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
dev = alloc_etherdev(sizeof(struct pasemi_mac));
|
|
|
|
if (dev == NULL) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_disable_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_drvdata(pdev, dev);
|
|
|
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
|
|
|
|
|
|
|
mac = netdev_priv(dev);
|
|
|
|
|
|
|
|
mac->pdev = pdev;
|
|
|
|
mac->netdev = dev;
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
|
|
|
|
|
2007-11-29 09:56:41 +07:00
|
|
|
dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
|
2008-02-21 09:57:58 +07:00
|
|
|
NETIF_F_HIGHDMA | NETIF_F_GSO;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
|
|
|
|
if (!mac->dma_pdev) {
|
|
|
|
dev_err(&mac->pdev->dev, "Can't find DMA Controller\n");
|
|
|
|
err = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
|
|
|
|
if (!mac->iob_pdev) {
|
|
|
|
dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n");
|
|
|
|
err = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get mac addr from device tree */
|
|
|
|
if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
|
|
|
|
err = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
|
|
|
|
|
2009-04-23 15:53:20 +07:00
|
|
|
ret = mac_to_intf(mac);
|
|
|
|
if (ret < 0) {
|
2007-11-29 09:56:32 +07:00
|
|
|
dev_err(&mac->pdev->dev, "Can't map DMA interface\n");
|
|
|
|
err = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-04-23 15:53:20 +07:00
|
|
|
mac->dma_if = ret;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
switch (pdev->device) {
|
|
|
|
case 0xa005:
|
|
|
|
mac->type = MAC_TYPE_GMAC;
|
|
|
|
break;
|
|
|
|
case 0xa006:
|
|
|
|
mac->type = MAC_TYPE_XAUI;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2009-04-15 19:52:54 +07:00
|
|
|
dev->netdev_ops = &pasemi_netdev_ops;
|
2008-01-24 02:56:47 +07:00
|
|
|
dev->mtu = PE_DEF_MTU;
|
ethernet: use core min/max MTU checking
et131x: min_mtu 64, max_mtu 9216
altera_tse: min_mtu 64, max_mtu 1500
amd8111e: min_mtu 60, max_mtu 9000
bnad: min_mtu 46, max_mtu 9000
macb: min_mtu 68, max_mtu 1500 or 10240 depending on hardware capability
xgmac: min_mtu 46, max_mtu 9000
cxgb2: min_mtu 68, max_mtu 9582 (pm3393) or 9600 (vsc7326)
enic: min_mtu 68, max_mtu 9000
gianfar: min_mtu 50, max_mu 9586
hns_enet: min_mtu 68, max_mtu 9578 (v1) or 9706 (v2)
ksz884x: min_mtu 60, max_mtu 1894
myri10ge: min_mtu 68, max_mtu 9000
natsemi: min_mtu 64, max_mtu 2024
nfp: min_mtu 68, max_mtu hardware-specific
forcedeth: min_mtu 64, max_mtu 1500 or 9100, depending on hardware
pch_gbe: min_mtu 46, max_mtu 10300
pasemi_mac: min_mtu 64, max_mtu 9000
qcaspi: min_mtu 46, max_mtu 1500
- remove qcaspi_netdev_change_mtu as it is now redundant
rocker: min_mtu 68, max_mtu 9000
sxgbe: min_mtu 68, max_mtu 9000
stmmac: min_mtu 46, max_mtu depends on hardware
tehuti: min_mtu 60, max_mtu 16384
- driver had no max mtu checking, but product docs say 16k jumbo packets
are supported by the hardware
netcp: min_mtu 68, max_mtu 9486
- remove netcp_ndo_change_mtu as it is now redundant
via-velocity: min_mtu 64, max_mtu 9000
octeon: min_mtu 46, max_mtu 65370
CC: netdev@vger.kernel.org
CC: Mark Einon <mark.einon@gmail.com>
CC: Vince Bridgers <vbridger@opensource.altera.com>
CC: Rasesh Mody <rasesh.mody@qlogic.com>
CC: Nicolas Ferre <nicolas.ferre@atmel.com>
CC: Santosh Raspatur <santosh@chelsio.com>
CC: Hariprasad S <hariprasad@chelsio.com>
CC: Christian Benvenuti <benve@cisco.com>
CC: Sujith Sankar <ssujith@cisco.com>
CC: Govindarajulu Varadarajan <_govind@gmx.com>
CC: Neel Patel <neepatel@cisco.com>
CC: Claudiu Manoil <claudiu.manoil@freescale.com>
CC: Yisen Zhuang <yisen.zhuang@huawei.com>
CC: Salil Mehta <salil.mehta@huawei.com>
CC: Hyong-Youb Kim <hykim@myri.com>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Olof Johansson <olof@lixom.net>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Byungho An <bh74.an@samsung.com>
CC: Girish K S <ks.giri@samsung.com>
CC: Vipul Pandya <vipul.pandya@samsung.com>
CC: Giuseppe Cavallaro <peppe.cavallaro@st.com>
CC: Alexandre Torgue <alexandre.torgue@st.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Wingman Kwok <w-kwok2@ti.com>
CC: Murali Karicheri <m-karicheri2@ti.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-18 02:54:17 +07:00
|
|
|
|
|
|
|
/* MTU range: 64 - 9000 */
|
|
|
|
dev->min_mtu = PE_MIN_MTU;
|
|
|
|
dev->max_mtu = PE_MAX_MTU;
|
|
|
|
|
2008-01-24 02:56:47 +07:00
|
|
|
/* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
|
|
|
|
mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
|
|
|
|
|
2008-02-21 09:57:59 +07:00
|
|
|
dev->ethtool_ops = &pasemi_mac_ethtool_ops;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-09-16 03:44:07 +07:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2007-02-01 10:43:54 +07:00
|
|
|
|
2007-05-08 12:47:49 +07:00
|
|
|
mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
|
|
|
|
|
2007-05-08 12:47:54 +07:00
|
|
|
/* Enable most messages by default */
|
|
|
|
mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
err = register_netdev(dev);
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
|
|
|
|
err);
|
|
|
|
goto out;
|
2013-02-24 20:01:19 +07:00
|
|
|
} else if (netif_msg_probe(mac)) {
|
2008-10-28 05:59:26 +07:00
|
|
|
printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n",
|
2007-02-01 10:43:54 +07:00
|
|
|
dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
|
2008-10-28 05:59:26 +07:00
|
|
|
mac->dma_if, dev->dev_addr);
|
2013-02-24 20:01:19 +07:00
|
|
|
}
|
2007-02-01 10:43:54 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
out:
|
2014-11-30 00:55:40 +07:00
|
|
|
pci_dev_put(mac->iob_pdev);
|
|
|
|
pci_dev_put(mac->dma_pdev);
|
2007-09-16 03:44:07 +07:00
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
free_netdev(dev);
|
|
|
|
out_disable_device:
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-12-03 21:24:00 +07:00
|
|
|
static void pasemi_mac_remove(struct pci_dev *pdev)
|
2007-02-01 10:43:54 +07:00
|
|
|
{
|
|
|
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
|
struct pasemi_mac *mac;
|
|
|
|
|
|
|
|
if (!netdev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mac = netdev_priv(netdev);
|
|
|
|
|
|
|
|
unregister_netdev(netdev);
|
|
|
|
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
pci_dev_put(mac->dma_pdev);
|
|
|
|
pci_dev_put(mac->iob_pdev);
|
|
|
|
|
2007-11-29 09:56:32 +07:00
|
|
|
pasemi_dma_free_chan(&mac->tx->chan);
|
|
|
|
pasemi_dma_free_chan(&mac->rx->chan);
|
2007-09-16 03:44:07 +07:00
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
free_netdev(netdev);
|
|
|
|
}
|
|
|
|
|
2014-08-08 20:56:03 +07:00
|
|
|
static const struct pci_device_id pasemi_mac_pci_tbl[] = {
|
2007-02-01 10:43:54 +07:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
|
2007-05-13 02:57:36 +07:00
|
|
|
{ },
|
2007-02-01 10:43:54 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
|
|
|
|
|
|
|
|
static struct pci_driver pasemi_mac_driver = {
|
|
|
|
.name = "pasemi_mac",
|
|
|
|
.id_table = pasemi_mac_pci_tbl,
|
|
|
|
.probe = pasemi_mac_probe,
|
2012-12-03 21:24:00 +07:00
|
|
|
.remove = pasemi_mac_remove,
|
2007-02-01 10:43:54 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static void __exit pasemi_mac_cleanup_module(void)
|
|
|
|
{
|
|
|
|
pci_unregister_driver(&pasemi_mac_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
int pasemi_mac_init_module(void)
|
|
|
|
{
|
2007-11-29 09:56:32 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = pasemi_dma_init();
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2007-02-01 10:43:54 +07:00
|
|
|
return pci_register_driver(&pasemi_mac_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(pasemi_mac_init_module);
|
|
|
|
module_exit(pasemi_mac_cleanup_module);
|