2007-01-19 10:04:14 +07:00
|
|
|
/*
|
2008-10-14 08:47:30 +07:00
|
|
|
* Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
|
2007-01-19 10:04:14 +07:00
|
|
|
*
|
2007-01-31 10:44:35 +07:00
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
2007-01-19 10:04:14 +07:00
|
|
|
*
|
2007-01-31 10:44:35 +07:00
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
2007-01-19 10:04:14 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* This file should not be included directly. Include common.h instead. */
|
|
|
|
|
|
|
|
#ifndef __T3_ADAPTER_H__
|
|
|
|
#define __T3_ADAPTER_H__
|
|
|
|
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/cache.h>
|
2007-01-31 10:44:29 +07:00
|
|
|
#include <linux/mutex.h>
|
2007-10-19 13:40:25 +07:00
|
|
|
#include <linux/bitops.h>
|
2008-05-22 08:56:26 +07:00
|
|
|
#include <linux/inet_lro.h>
|
2007-01-19 10:04:14 +07:00
|
|
|
#include "t3cdev.h"
|
|
|
|
#include <asm/io.h>
|
|
|
|
|
|
|
|
struct vlan_group;
|
2007-08-30 09:15:47 +07:00
|
|
|
struct adapter;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
struct sge_qset;
|
|
|
|
|
2009-01-11 15:19:36 +07:00
|
|
|
enum { /* rx_offload flags */
|
|
|
|
T3_RX_CSUM = 1 << 0,
|
|
|
|
T3_LRO = 1 << 1,
|
|
|
|
};
|
|
|
|
|
2007-01-19 10:04:14 +07:00
|
|
|
struct port_info {
|
2007-08-30 09:15:47 +07:00
|
|
|
struct adapter *adapter;
|
2007-01-19 10:04:14 +07:00
|
|
|
struct vlan_group *vlan_grp;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
struct sge_qset *qs;
|
2007-01-19 10:04:14 +07:00
|
|
|
u8 port_id;
|
2009-01-11 15:19:36 +07:00
|
|
|
u8 rx_offload;
|
2007-01-19 10:04:14 +07:00
|
|
|
u8 nqsets;
|
|
|
|
u8 first_qset;
|
|
|
|
struct cphy phy;
|
|
|
|
struct cmac mac;
|
|
|
|
struct link_config link_config;
|
|
|
|
struct net_device_stats netstats;
|
|
|
|
int activity;
|
2008-12-19 13:56:20 +07:00
|
|
|
__be32 iscsi_ipv4addr;
|
2007-01-19 10:04:14 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
enum { /* adapter flags */
|
|
|
|
FULL_INIT_DONE = (1 << 0),
|
|
|
|
USING_MSI = (1 << 1),
|
|
|
|
USING_MSIX = (1 << 2),
|
2007-01-31 10:43:50 +07:00
|
|
|
QUEUES_BOUND = (1 << 3),
|
2007-12-18 09:47:31 +07:00
|
|
|
TP_PARITY_INIT = (1 << 4),
|
2008-05-07 09:25:56 +07:00
|
|
|
NAPI_INIT = (1 << 5),
|
2007-01-19 10:04:14 +07:00
|
|
|
};
|
|
|
|
|
2007-05-31 11:10:47 +07:00
|
|
|
struct fl_pg_chunk {
|
|
|
|
struct page *page;
|
|
|
|
void *va;
|
|
|
|
unsigned int offset;
|
|
|
|
};
|
|
|
|
|
2007-01-19 10:04:14 +07:00
|
|
|
struct rx_desc;
|
|
|
|
struct rx_sw_desc;
|
|
|
|
|
2007-05-31 11:10:47 +07:00
|
|
|
struct sge_fl { /* SGE per free-buffer list state */
|
|
|
|
unsigned int buf_size; /* size of each Rx buffer */
|
|
|
|
unsigned int credits; /* # of available Rx buffers */
|
|
|
|
unsigned int size; /* capacity of free list */
|
|
|
|
unsigned int cidx; /* consumer index */
|
|
|
|
unsigned int pidx; /* producer index */
|
|
|
|
unsigned int gen; /* free list generation */
|
|
|
|
struct fl_pg_chunk pg_chunk;/* page chunk cache */
|
|
|
|
unsigned int use_pages; /* whether FL uses pages or sk_buffs */
|
2008-05-22 08:56:21 +07:00
|
|
|
unsigned int order; /* order of page allocations */
|
2007-05-31 11:10:47 +07:00
|
|
|
struct rx_desc *desc; /* address of HW Rx descriptor ring */
|
|
|
|
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
|
|
|
|
dma_addr_t phys_addr; /* physical address of HW ring start */
|
|
|
|
unsigned int cntxt_id; /* SGE context id for the free list */
|
|
|
|
unsigned long empty; /* # of times queue ran out of buffers */
|
2007-02-25 07:44:17 +07:00
|
|
|
unsigned long alloc_failed; /* # of times buffer allocation failed */
|
2007-01-19 10:04:14 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bundle size for grouping offload RX packets for delivery to the stack.
|
|
|
|
* Don't make this too big as we do prefetch on each packet in a bundle.
|
|
|
|
*/
|
|
|
|
# define RX_BUNDLE_SIZE 8
|
|
|
|
|
|
|
|
struct rsp_desc;
|
|
|
|
|
|
|
|
struct sge_rspq { /* state for an SGE response queue */
|
|
|
|
unsigned int credits; /* # of pending response credits */
|
|
|
|
unsigned int size; /* capacity of response queue */
|
|
|
|
unsigned int cidx; /* consumer index */
|
|
|
|
unsigned int gen; /* current generation bit */
|
|
|
|
unsigned int polling; /* is the queue serviced through NAPI? */
|
|
|
|
unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
|
|
|
|
unsigned int next_holdoff; /* holdoff time for next interrupt */
|
2008-05-22 08:56:21 +07:00
|
|
|
unsigned int rx_recycle_buf; /* whether recycling occurred
|
|
|
|
within current sop-eop */
|
2007-01-19 10:04:14 +07:00
|
|
|
struct rsp_desc *desc; /* address of HW response ring */
|
|
|
|
dma_addr_t phys_addr; /* physical address of the ring */
|
|
|
|
unsigned int cntxt_id; /* SGE context id for the response q */
|
|
|
|
spinlock_t lock; /* guards response processing */
|
2008-09-22 15:29:52 +07:00
|
|
|
struct sk_buff_head rx_queue; /* offload packet receive queue */
|
2008-05-22 08:56:21 +07:00
|
|
|
struct sk_buff *pg_skb; /* used to build frag list in napi handler */
|
2007-01-19 10:04:14 +07:00
|
|
|
|
|
|
|
unsigned long offload_pkts;
|
|
|
|
unsigned long offload_bundles;
|
|
|
|
unsigned long eth_pkts; /* # of ethernet packets */
|
|
|
|
unsigned long pure_rsps; /* # of pure (non-data) responses */
|
|
|
|
unsigned long imm_data; /* responses with immediate data */
|
|
|
|
unsigned long rx_drops; /* # of packets dropped due to no mem */
|
|
|
|
unsigned long async_notif; /* # of asynchronous notification events */
|
|
|
|
unsigned long empty; /* # of times queue ran out of credits */
|
|
|
|
unsigned long nomem; /* # of responses deferred due to no mem */
|
|
|
|
unsigned long unhandled_irqs; /* # of spurious intrs */
|
2007-02-25 07:44:12 +07:00
|
|
|
unsigned long starved;
|
|
|
|
unsigned long restarted;
|
2007-01-19 10:04:14 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct tx_desc;
|
|
|
|
struct tx_sw_desc;
|
|
|
|
|
|
|
|
struct sge_txq { /* state for an SGE Tx queue */
|
|
|
|
unsigned long flags; /* HW DMA fetch status */
|
|
|
|
unsigned int in_use; /* # of in-use Tx descriptors */
|
|
|
|
unsigned int size; /* # of descriptors */
|
|
|
|
unsigned int processed; /* total # of descs HW has processed */
|
|
|
|
unsigned int cleaned; /* total # of descs SW has reclaimed */
|
|
|
|
unsigned int stop_thres; /* SW TX queue suspend threshold */
|
|
|
|
unsigned int cidx; /* consumer index */
|
|
|
|
unsigned int pidx; /* producer index */
|
|
|
|
unsigned int gen; /* current value of generation bit */
|
|
|
|
unsigned int unacked; /* Tx descriptors used since last COMPL */
|
|
|
|
struct tx_desc *desc; /* address of HW Tx descriptor ring */
|
|
|
|
struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
|
|
|
|
spinlock_t lock; /* guards enqueueing of new packets */
|
|
|
|
unsigned int token; /* WR token */
|
|
|
|
dma_addr_t phys_addr; /* physical address of the ring */
|
|
|
|
struct sk_buff_head sendq; /* List of backpressured offload packets */
|
|
|
|
struct tasklet_struct qresume_tsk; /* restarts the queue */
|
|
|
|
unsigned int cntxt_id; /* SGE context id for the Tx q */
|
|
|
|
unsigned long stops; /* # of times q has been stopped */
|
|
|
|
unsigned long restarts; /* # of queue restarts */
|
|
|
|
};
|
|
|
|
|
|
|
|
enum { /* per port SGE statistics */
|
|
|
|
SGE_PSTAT_TSO, /* # of TSO requests */
|
|
|
|
SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
|
|
|
|
SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
|
|
|
|
SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
|
|
|
|
SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
|
2008-05-22 08:56:26 +07:00
|
|
|
SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */
|
|
|
|
SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */
|
|
|
|
SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */
|
2007-01-19 10:04:14 +07:00
|
|
|
|
|
|
|
SGE_PSTAT_MAX /* must be last */
|
|
|
|
};
|
|
|
|
|
2008-05-22 08:56:26 +07:00
|
|
|
#define T3_MAX_LRO_SES 8
|
|
|
|
#define T3_MAX_LRO_MAX_PKTS 64
|
|
|
|
|
2007-01-19 10:04:14 +07:00
|
|
|
struct sge_qset { /* an SGE queue set */
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
struct adapter *adap;
|
|
|
|
struct napi_struct napi;
|
2007-01-19 10:04:14 +07:00
|
|
|
struct sge_rspq rspq;
|
|
|
|
struct sge_fl fl[SGE_RXQ_PER_SET];
|
|
|
|
struct sge_txq txq[SGE_TXQ_PER_SET];
|
2008-05-22 08:56:26 +07:00
|
|
|
struct net_lro_mgr lro_mgr;
|
|
|
|
struct net_lro_desc lro_desc[T3_MAX_LRO_SES];
|
|
|
|
struct skb_frag_struct *lro_frag_tbl;
|
|
|
|
int lro_nfrags;
|
|
|
|
int lro_enabled;
|
|
|
|
int lro_frag_len;
|
|
|
|
void *lro_va;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 06:41:36 +07:00
|
|
|
struct net_device *netdev;
|
2008-12-16 16:09:39 +07:00
|
|
|
struct netdev_queue *tx_q; /* associated netdev TX queue */
|
2007-01-19 10:04:14 +07:00
|
|
|
unsigned long txq_stopped; /* which Tx queues are stopped */
|
|
|
|
struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
|
|
|
|
unsigned long port_stats[SGE_PSTAT_MAX];
|
|
|
|
} ____cacheline_aligned;
|
|
|
|
|
|
|
|
struct sge {
|
|
|
|
struct sge_qset qs[SGE_QSETS];
|
|
|
|
spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct adapter {
|
|
|
|
struct t3cdev tdev;
|
|
|
|
struct list_head adapter_list;
|
|
|
|
void __iomem *regs;
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
unsigned long registered_device_map;
|
|
|
|
unsigned long open_device_map;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
const char *name;
|
|
|
|
int msg_enable;
|
|
|
|
unsigned int mmio_len;
|
|
|
|
|
|
|
|
struct adapter_params params;
|
|
|
|
unsigned int slow_intr_mask;
|
|
|
|
unsigned long irq_stats[IRQ_NUM_STATS];
|
|
|
|
|
|
|
|
struct {
|
|
|
|
unsigned short vec;
|
|
|
|
char desc[22];
|
|
|
|
} msix_info[SGE_QSETS + 1];
|
|
|
|
|
|
|
|
/* T3 modules */
|
|
|
|
struct sge sge;
|
|
|
|
struct mc7 pmrx;
|
|
|
|
struct mc7 pmtx;
|
|
|
|
struct mc7 cm;
|
|
|
|
struct mc5 mc5;
|
|
|
|
|
|
|
|
struct net_device *port[MAX_NPORTS];
|
|
|
|
unsigned int check_task_cnt;
|
|
|
|
struct delayed_work adap_check_task;
|
|
|
|
struct work_struct ext_intr_handler_task;
|
2008-10-09 07:36:03 +07:00
|
|
|
struct work_struct fatal_error_handler_task;
|
2007-01-19 10:04:14 +07:00
|
|
|
|
|
|
|
struct dentry *debugfs_root;
|
|
|
|
|
|
|
|
struct mutex mdio_lock;
|
|
|
|
spinlock_t stats_lock;
|
|
|
|
spinlock_t work_lock;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
|
|
|
|
{
|
|
|
|
u32 val = readl(adapter->regs + reg_addr);
|
|
|
|
|
|
|
|
CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
|
|
|
|
{
|
|
|
|
CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
|
|
|
|
writel(val, adapter->regs + reg_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
|
|
|
|
{
|
|
|
|
return netdev_priv(adap->port[idx]);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define OFFLOAD_DEVMAP_BIT 15
|
|
|
|
|
|
|
|
#define tdev2adap(d) container_of(d, struct adapter, tdev)
|
|
|
|
|
|
|
|
static inline int offload_running(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
|
|
|
|
}
|
|
|
|
|
|
|
|
int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
|
|
|
|
|
|
|
|
void t3_os_ext_intr_handler(struct adapter *adapter);
|
|
|
|
void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
|
|
|
|
int speed, int duplex, int fc);
|
2008-10-09 07:38:29 +07:00
|
|
|
void t3_os_phymod_changed(struct adapter *adap, int port_id);
|
2007-01-19 10:04:14 +07:00
|
|
|
|
|
|
|
void t3_sge_start(struct adapter *adap);
|
|
|
|
void t3_sge_stop(struct adapter *adap);
|
2008-09-25 21:05:28 +07:00
|
|
|
void t3_stop_sge_timers(struct adapter *adap);
|
2007-01-19 10:04:14 +07:00
|
|
|
void t3_free_sge_resources(struct adapter *adap);
|
|
|
|
void t3_sge_err_intr_handler(struct adapter *adapter);
|
2007-10-19 14:12:20 +07:00
|
|
|
irq_handler_t t3_intr_handler(struct adapter *adap, int polling);
|
2007-01-19 10:04:14 +07:00
|
|
|
int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
|
2007-01-31 10:43:50 +07:00
|
|
|
int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
|
2007-01-19 10:04:14 +07:00
|
|
|
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
|
|
|
|
int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
|
|
|
|
int irq_vec_idx, const struct qset_params *p,
|
2008-12-16 16:09:39 +07:00
|
|
|
int ntxq, struct net_device *dev,
|
|
|
|
struct netdev_queue *netdevq);
|
2007-01-19 10:04:14 +07:00
|
|
|
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
|
|
|
|
unsigned char *data);
|
|
|
|
irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
|
|
|
|
|
|
|
|
#endif /* __T3_ADAPTER_H__ */
|