mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 05:36:47 +07:00
Merge branch 's390-qeth-next'
Julian Wiedmann says: ==================== s390/qeth: updates 2018-09-17 please apply the following patchset to net-next. This brings more restructuring of qeth's transmit code (eliminating its last usage of skb_realloc_headroom()), and the usual mix of minor improvements & cleanups. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ce5b127b17
@ -26,6 +26,7 @@
|
||||
#include <net/ipv6.h>
|
||||
#include <net/if_inet6.h>
|
||||
#include <net/addrconf.h>
|
||||
#include <net/tcp.h>
|
||||
|
||||
#include <asm/debug.h>
|
||||
#include <asm/qdio.h>
|
||||
@ -638,7 +639,6 @@ struct qeth_reply {
|
||||
atomic_t received;
|
||||
int rc;
|
||||
void *param;
|
||||
struct qeth_card *card;
|
||||
refcount_t refcnt;
|
||||
};
|
||||
|
||||
@ -892,11 +892,6 @@ static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
|
||||
if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
|
||||
(ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
|
||||
*flags |= QETH_HDR_EXT_UDP;
|
||||
if (ipv == 4) {
|
||||
/* some HW requires combined L3+L4 csum offload: */
|
||||
*flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
|
||||
ip_hdr(skb)->check = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
|
||||
@ -1007,9 +1002,7 @@ int qeth_query_switch_attributes(struct qeth_card *card,
|
||||
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
|
||||
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
|
||||
void *reply_param);
|
||||
int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
|
||||
int extra_elems, int data_offset);
|
||||
int qeth_get_elements_for_frags(struct sk_buff *);
|
||||
unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
|
||||
int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
|
||||
struct qeth_hdr *hdr, unsigned int offset,
|
||||
unsigned int hd_len);
|
||||
@ -1027,7 +1020,6 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
|
||||
int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
|
||||
struct ethtool_link_ksettings *cmd);
|
||||
int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
|
||||
int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
|
||||
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
|
||||
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
|
||||
void qeth_trace_features(struct qeth_card *);
|
||||
@ -1052,6 +1044,11 @@ int qeth_vm_request_mac(struct qeth_card *card);
|
||||
int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
|
||||
struct qeth_hdr **hdr, unsigned int hdr_len,
|
||||
unsigned int proto_len, unsigned int *elements);
|
||||
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
|
||||
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
|
||||
void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
|
||||
struct sk_buff *skb, int ipv, int cast_type,
|
||||
unsigned int data_len));
|
||||
|
||||
/* exports for OSN */
|
||||
int qeth_osn_assist(struct net_device *, void *, int);
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/mii.h>
|
||||
@ -591,7 +592,6 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
|
||||
if (reply) {
|
||||
refcount_set(&reply->refcnt, 1);
|
||||
atomic_set(&reply->received, 0);
|
||||
reply->card = card;
|
||||
}
|
||||
return reply;
|
||||
}
|
||||
@ -780,7 +780,6 @@ void qeth_release_buffer(struct qeth_channel *channel,
|
||||
|
||||
QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
|
||||
spin_lock_irqsave(&channel->iob_lock, flags);
|
||||
memset(iob->data, 0, QETH_BUFSIZE);
|
||||
iob->state = BUF_STATE_FREE;
|
||||
iob->callback = qeth_send_control_data_cb;
|
||||
iob->rc = 0;
|
||||
@ -900,44 +899,6 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
|
||||
qeth_release_buffer(channel, iob);
|
||||
}
|
||||
|
||||
static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
|
||||
{
|
||||
int cnt;
|
||||
|
||||
QETH_DBF_TEXT(SETUP, 2, "setupch");
|
||||
|
||||
channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
|
||||
if (!channel->ccw)
|
||||
return -ENOMEM;
|
||||
channel->state = CH_STATE_DOWN;
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
init_waitqueue_head(&channel->wait_q);
|
||||
|
||||
if (!alloc_buffers)
|
||||
return 0;
|
||||
|
||||
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
|
||||
channel->iob[cnt].data =
|
||||
kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
|
||||
if (channel->iob[cnt].data == NULL)
|
||||
break;
|
||||
channel->iob[cnt].state = BUF_STATE_FREE;
|
||||
channel->iob[cnt].channel = channel;
|
||||
channel->iob[cnt].callback = qeth_send_control_data_cb;
|
||||
channel->iob[cnt].rc = 0;
|
||||
}
|
||||
if (cnt < QETH_CMD_BUFFER_NO) {
|
||||
kfree(channel->ccw);
|
||||
while (cnt-- > 0)
|
||||
kfree(channel->iob[cnt].data);
|
||||
return -ENOMEM;
|
||||
}
|
||||
channel->io_buf_no = 0;
|
||||
spin_lock_init(&channel->iob_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qeth_set_thread_start_bit(struct qeth_card *card,
|
||||
unsigned long thread)
|
||||
{
|
||||
@ -1336,14 +1297,61 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
|
||||
|
||||
static void qeth_clean_channel(struct qeth_channel *channel)
|
||||
{
|
||||
struct ccw_device *cdev = channel->ccwdev;
|
||||
int cnt;
|
||||
|
||||
QETH_DBF_TEXT(SETUP, 2, "freech");
|
||||
|
||||
spin_lock_irq(get_ccwdev_lock(cdev));
|
||||
cdev->handler = NULL;
|
||||
spin_unlock_irq(get_ccwdev_lock(cdev));
|
||||
|
||||
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
|
||||
kfree(channel->iob[cnt].data);
|
||||
kfree(channel->ccw);
|
||||
}
|
||||
|
||||
static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
|
||||
{
|
||||
struct ccw_device *cdev = channel->ccwdev;
|
||||
int cnt;
|
||||
|
||||
QETH_DBF_TEXT(SETUP, 2, "setupch");
|
||||
|
||||
channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
|
||||
if (!channel->ccw)
|
||||
return -ENOMEM;
|
||||
channel->state = CH_STATE_DOWN;
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
init_waitqueue_head(&channel->wait_q);
|
||||
|
||||
spin_lock_irq(get_ccwdev_lock(cdev));
|
||||
cdev->handler = qeth_irq;
|
||||
spin_unlock_irq(get_ccwdev_lock(cdev));
|
||||
|
||||
if (!alloc_buffers)
|
||||
return 0;
|
||||
|
||||
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
|
||||
channel->iob[cnt].data = kmalloc(QETH_BUFSIZE,
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (channel->iob[cnt].data == NULL)
|
||||
break;
|
||||
channel->iob[cnt].state = BUF_STATE_FREE;
|
||||
channel->iob[cnt].channel = channel;
|
||||
channel->iob[cnt].callback = qeth_send_control_data_cb;
|
||||
channel->iob[cnt].rc = 0;
|
||||
}
|
||||
if (cnt < QETH_CMD_BUFFER_NO) {
|
||||
qeth_clean_channel(channel);
|
||||
return -ENOMEM;
|
||||
}
|
||||
channel->io_buf_no = 0;
|
||||
spin_lock_init(&channel->iob_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qeth_set_single_write_queues(struct qeth_card *card)
|
||||
{
|
||||
if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
|
||||
@ -1494,7 +1502,7 @@ static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
|
||||
CARD_BUS_ID(card), card->info.mcl_level);
|
||||
}
|
||||
|
||||
static struct qeth_card *qeth_alloc_card(void)
|
||||
static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
|
||||
{
|
||||
struct qeth_card *card;
|
||||
|
||||
@ -1503,6 +1511,11 @@ static struct qeth_card *qeth_alloc_card(void)
|
||||
if (!card)
|
||||
goto out;
|
||||
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
|
||||
|
||||
card->gdev = gdev;
|
||||
CARD_RDEV(card) = gdev->cdev[0];
|
||||
CARD_WDEV(card) = gdev->cdev[1];
|
||||
CARD_DDEV(card) = gdev->cdev[2];
|
||||
if (qeth_setup_channel(&card->read, true))
|
||||
goto out_ip;
|
||||
if (qeth_setup_channel(&card->write, true))
|
||||
@ -1526,15 +1539,14 @@ static struct qeth_card *qeth_alloc_card(void)
|
||||
|
||||
static int qeth_clear_channel(struct qeth_channel *channel)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct qeth_card *card;
|
||||
int rc;
|
||||
|
||||
card = CARD_FROM_CDEV(channel->ccwdev);
|
||||
QETH_CARD_TEXT(card, 3, "clearch");
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -1550,15 +1562,14 @@ static int qeth_clear_channel(struct qeth_channel *channel)
|
||||
|
||||
static int qeth_halt_channel(struct qeth_channel *channel)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct qeth_card *card;
|
||||
int rc;
|
||||
|
||||
card = CARD_FROM_CDEV(channel->ccwdev);
|
||||
QETH_CARD_TEXT(card, 3, "haltch");
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -1652,7 +1663,6 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
|
||||
char *rcd_buf;
|
||||
int ret;
|
||||
struct qeth_channel *channel = &card->data;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* scan for RCD command in extended SenseID data
|
||||
@ -1666,11 +1676,11 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
|
||||
|
||||
qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
|
||||
channel->state = CH_STATE_RCD;
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
|
||||
QETH_RCD_PARM, LPM_ANYPATH, 0,
|
||||
QETH_RCD_TIMEOUT);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
if (!ret)
|
||||
wait_event(card->wait_q,
|
||||
(channel->state == CH_STATE_RCD_DONE ||
|
||||
@ -1828,7 +1838,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
|
||||
struct qeth_cmd_buffer *))
|
||||
{
|
||||
struct qeth_cmd_buffer *iob;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
struct qeth_card *card;
|
||||
|
||||
@ -1843,10 +1852,10 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
|
||||
wait_event(card->wait_q,
|
||||
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
|
||||
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
|
||||
(addr_t) iob, 0, 0, QETH_TIMEOUT);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
|
||||
if (rc) {
|
||||
QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
|
||||
@ -1873,7 +1882,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
|
||||
{
|
||||
struct qeth_card *card;
|
||||
struct qeth_cmd_buffer *iob;
|
||||
unsigned long flags;
|
||||
__u16 temp;
|
||||
__u8 tmp;
|
||||
int rc;
|
||||
@ -1913,10 +1921,10 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
|
||||
wait_event(card->wait_q,
|
||||
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
|
||||
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
|
||||
(addr_t) iob, 0, 0, QETH_TIMEOUT);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
|
||||
if (rc) {
|
||||
QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
|
||||
@ -2097,7 +2105,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
||||
{
|
||||
struct qeth_channel *channel = iob->channel;
|
||||
int rc;
|
||||
unsigned long flags;
|
||||
struct qeth_reply *reply = NULL;
|
||||
unsigned long timeout, event_timeout;
|
||||
struct qeth_ipa_cmd *cmd = NULL;
|
||||
@ -2130,26 +2137,26 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
||||
}
|
||||
qeth_prepare_control_data(card, len, iob);
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
spin_lock_irq(&card->lock);
|
||||
list_add_tail(&reply->list, &card->cmd_waiter_list);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
spin_unlock_irq(&card->lock);
|
||||
|
||||
timeout = jiffies + event_timeout;
|
||||
|
||||
QETH_CARD_TEXT(card, 6, "noirqpnd");
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
|
||||
(addr_t) iob, 0, 0, event_timeout);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
if (rc) {
|
||||
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
|
||||
"ccw_device_start rc = %i\n",
|
||||
dev_name(&channel->ccwdev->dev), rc);
|
||||
QETH_CARD_TEXT_(card, 2, " err%d", rc);
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
spin_lock_irq(&card->lock);
|
||||
list_del_init(&reply->list);
|
||||
qeth_put_reply(reply);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
spin_unlock_irq(&card->lock);
|
||||
qeth_release_buffer(channel, iob);
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
wake_up(&card->wait_q);
|
||||
@ -2177,9 +2184,9 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
||||
|
||||
time_err:
|
||||
reply->rc = -ETIME;
|
||||
spin_lock_irqsave(&reply->card->lock, flags);
|
||||
spin_lock_irq(&card->lock);
|
||||
list_del_init(&reply->list);
|
||||
spin_unlock_irqrestore(&reply->card->lock, flags);
|
||||
spin_unlock_irq(&card->lock);
|
||||
atomic_inc(&reply->received);
|
||||
rc = reply->rc;
|
||||
qeth_put_reply(reply);
|
||||
@ -2880,10 +2887,10 @@ static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
|
||||
}
|
||||
|
||||
static void qeth_fill_ipacmd_header(struct qeth_card *card,
|
||||
struct qeth_ipa_cmd *cmd, __u8 command,
|
||||
enum qeth_prot_versions prot)
|
||||
struct qeth_ipa_cmd *cmd,
|
||||
enum qeth_ipa_cmds command,
|
||||
enum qeth_prot_versions prot)
|
||||
{
|
||||
memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
|
||||
cmd->hdr.command = command;
|
||||
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
|
||||
/* cmd->hdr.seqno is set by qeth_send_control_data() */
|
||||
@ -2895,8 +2902,6 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
|
||||
cmd->hdr.prim_version_no = 1;
|
||||
cmd->hdr.param_count = 1;
|
||||
cmd->hdr.prot_version = prot;
|
||||
cmd->hdr.ipa_supported = 0;
|
||||
cmd->hdr.ipa_enabled = 0;
|
||||
}
|
||||
|
||||
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
|
||||
@ -3043,7 +3048,7 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
|
||||
QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
|
||||
card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
|
||||
card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
|
||||
return -0;
|
||||
return 0;
|
||||
default:
|
||||
if (cmd->hdr.return_code) {
|
||||
QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
|
||||
@ -3787,7 +3792,7 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
|
||||
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
|
||||
* fragmented part of the SKB. Returns zero for linear SKB.
|
||||
*/
|
||||
int qeth_get_elements_for_frags(struct sk_buff *skb)
|
||||
static int qeth_get_elements_for_frags(struct sk_buff *skb)
|
||||
{
|
||||
int cnt, elements = 0;
|
||||
|
||||
@ -3800,9 +3805,17 @@ int qeth_get_elements_for_frags(struct sk_buff *skb)
|
||||
}
|
||||
return elements;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
|
||||
|
||||
static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
|
||||
/**
|
||||
* qeth_count_elements() - Counts the number of QDIO buffer elements needed
|
||||
* to transmit an skb.
|
||||
* @skb: the skb to operate on.
|
||||
* @data_offset: skip this part of the skb's linear data
|
||||
*
|
||||
* Returns the number of pages, and thus QDIO buffer elements, needed to map the
|
||||
* skb's data (both its linear part and paged fragments).
|
||||
*/
|
||||
unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
|
||||
{
|
||||
unsigned int elements = qeth_get_elements_for_frags(skb);
|
||||
addr_t end = (addr_t)skb->data + skb_headlen(skb);
|
||||
@ -3812,54 +3825,10 @@ static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
|
||||
elements += qeth_get_elements_for_range(start, end);
|
||||
return elements;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_count_elements);
|
||||
|
||||
/**
|
||||
* qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags.
|
||||
* @card: qeth card structure, to check max. elems.
|
||||
* @skb: SKB address
|
||||
* @extra_elems: extra elems needed, to check against max.
|
||||
* @data_offset: range starts at skb->data + data_offset
|
||||
*
|
||||
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
|
||||
* skb data, including linear part and fragments. Checks if the result plus
|
||||
* extra_elems fits under the limit for the card. Returns 0 if it does not.
|
||||
* Note: extra_elems is not included in the returned result.
|
||||
*/
|
||||
int qeth_get_elements_no(struct qeth_card *card,
|
||||
struct sk_buff *skb, int extra_elems, int data_offset)
|
||||
{
|
||||
int elements = qeth_count_elements(skb, data_offset);
|
||||
|
||||
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
|
||||
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
|
||||
"(Number=%d / Length=%d). Discarded.\n",
|
||||
elements + extra_elems, skb->len);
|
||||
return 0;
|
||||
}
|
||||
return elements;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_get_elements_no);
|
||||
|
||||
int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
|
||||
{
|
||||
int hroom, inpage, rest;
|
||||
|
||||
if (((unsigned long)skb->data & PAGE_MASK) !=
|
||||
(((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
|
||||
hroom = skb_headroom(skb);
|
||||
inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
|
||||
rest = len - inpage;
|
||||
if (rest > hroom)
|
||||
return 1;
|
||||
memmove(skb->data - rest, skb->data, skb_headlen(skb));
|
||||
skb->data -= rest;
|
||||
skb->tail -= rest;
|
||||
*hdr = (struct qeth_hdr *)skb->data;
|
||||
QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
|
||||
#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
|
||||
MAX_TCP_HEADER)
|
||||
|
||||
/**
|
||||
* qeth_add_hw_header() - add a HW header to an skb.
|
||||
@ -3894,7 +3863,11 @@ int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
|
||||
if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
|
||||
/* Push HW header into same page as first protocol header. */
|
||||
push_ok = true;
|
||||
__elements = qeth_count_elements(skb, 0);
|
||||
/* ... but TSO always needs a separate element for headers: */
|
||||
if (skb_is_gso(skb))
|
||||
__elements = 1 + qeth_count_elements(skb, proto_len);
|
||||
else
|
||||
__elements = qeth_count_elements(skb, 0);
|
||||
} else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
|
||||
/* Push HW header into a new page. */
|
||||
push_ok = true;
|
||||
@ -3935,6 +3908,8 @@ int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
|
||||
return hdr_len;
|
||||
}
|
||||
/* fall back */
|
||||
if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
|
||||
return -E2BIG;
|
||||
*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
|
||||
if (!*hdr)
|
||||
return -ENOMEM;
|
||||
@ -4176,6 +4151,66 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
|
||||
|
||||
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
|
||||
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
|
||||
void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
|
||||
struct sk_buff *skb, int ipv, int cast_type,
|
||||
unsigned int data_len))
|
||||
{
|
||||
const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
|
||||
const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
|
||||
unsigned int frame_len = skb->len;
|
||||
unsigned int data_offset = 0;
|
||||
struct qeth_hdr *hdr = NULL;
|
||||
unsigned int hd_len = 0;
|
||||
unsigned int elements;
|
||||
int push_len, rc;
|
||||
bool is_sg;
|
||||
|
||||
rc = skb_cow_head(skb, hw_hdr_len);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
|
||||
&elements);
|
||||
if (push_len < 0)
|
||||
return push_len;
|
||||
if (!push_len) {
|
||||
/* HW header needs its own buffer element. */
|
||||
hd_len = hw_hdr_len + proto_len;
|
||||
data_offset = proto_len;
|
||||
}
|
||||
memset(hdr, 0, hw_hdr_len);
|
||||
fill_header(card, hdr, skb, ipv, cast_type, frame_len);
|
||||
|
||||
is_sg = skb_is_nonlinear(skb);
|
||||
if (IS_IQD(card)) {
|
||||
rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
|
||||
hd_len);
|
||||
} else {
|
||||
/* TODO: drop skb_orphan() once TX completion is fast enough */
|
||||
skb_orphan(skb);
|
||||
rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
|
||||
hd_len, elements);
|
||||
}
|
||||
|
||||
if (!rc) {
|
||||
if (card->options.performance_stats) {
|
||||
card->perf_stats.buf_elements_sent += elements;
|
||||
if (is_sg)
|
||||
card->perf_stats.sg_skbs_sent++;
|
||||
}
|
||||
} else {
|
||||
if (!push_len)
|
||||
kmem_cache_free(qeth_core_header_cache, hdr);
|
||||
if (rc == -EBUSY)
|
||||
/* roll back to ETH header */
|
||||
skb_pull(skb, push_len);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_xmit);
|
||||
|
||||
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply, unsigned long data)
|
||||
{
|
||||
@ -5456,8 +5491,6 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
|
||||
cmd->data.setassparms.hdr.assist_no = ipa_func;
|
||||
cmd->data.setassparms.hdr.length = 8 + len;
|
||||
cmd->data.setassparms.hdr.command_code = cmd_code;
|
||||
cmd->data.setassparms.hdr.return_code = 0;
|
||||
cmd->data.setassparms.hdr.seq_no = 0;
|
||||
}
|
||||
|
||||
return iob;
|
||||
@ -5731,7 +5764,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
||||
struct device *dev;
|
||||
int rc;
|
||||
enum qeth_discipline_id enforced_disc;
|
||||
unsigned long flags;
|
||||
char dbf_name[DBF_NAME_LEN];
|
||||
|
||||
QETH_DBF_TEXT(SETUP, 2, "probedev");
|
||||
@ -5742,7 +5774,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
||||
|
||||
QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
|
||||
|
||||
card = qeth_alloc_card();
|
||||
card = qeth_alloc_card(gdev);
|
||||
if (!card) {
|
||||
QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
|
||||
rc = -ENOMEM;
|
||||
@ -5758,15 +5790,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
||||
goto err_card;
|
||||
}
|
||||
|
||||
card->read.ccwdev = gdev->cdev[0];
|
||||
card->write.ccwdev = gdev->cdev[1];
|
||||
card->data.ccwdev = gdev->cdev[2];
|
||||
dev_set_drvdata(&gdev->dev, card);
|
||||
card->gdev = gdev;
|
||||
gdev->cdev[0]->handler = qeth_irq;
|
||||
gdev->cdev[1]->handler = qeth_irq;
|
||||
gdev->cdev[2]->handler = qeth_irq;
|
||||
|
||||
qeth_setup_card(card);
|
||||
qeth_update_from_chp_desc(card);
|
||||
|
||||
@ -5797,9 +5821,9 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
||||
break;
|
||||
}
|
||||
|
||||
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
|
||||
write_lock_irq(&qeth_core_card_list.rwlock);
|
||||
list_add_tail(&card->list, &qeth_core_card_list.list);
|
||||
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
|
||||
write_unlock_irq(&qeth_core_card_list.rwlock);
|
||||
return 0;
|
||||
|
||||
err_disc:
|
||||
@ -5815,7 +5839,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
||||
|
||||
static void qeth_core_remove_device(struct ccwgroup_device *gdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
|
||||
|
||||
QETH_DBF_TEXT(SETUP, 2, "removedv");
|
||||
@ -5825,9 +5848,9 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
|
||||
qeth_core_free_discipline(card);
|
||||
}
|
||||
|
||||
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
|
||||
write_lock_irq(&qeth_core_card_list.rwlock);
|
||||
list_del(&card->list);
|
||||
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
|
||||
write_unlock_irq(&qeth_core_card_list.rwlock);
|
||||
free_netdev(card->dev);
|
||||
qeth_core_free_card(card);
|
||||
dev_set_drvdata(&gdev->dev, NULL);
|
||||
@ -6619,8 +6642,10 @@ static int __init qeth_core_init(void)
|
||||
rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
|
||||
if (rc)
|
||||
goto register_err;
|
||||
qeth_core_header_cache = kmem_cache_create("qeth_hdr",
|
||||
sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
|
||||
qeth_core_header_cache =
|
||||
kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
|
||||
roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
|
||||
0, NULL);
|
||||
if (!qeth_core_header_cache) {
|
||||
rc = -ENOMEM;
|
||||
goto slab_err;
|
||||
|
@ -193,15 +193,21 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
|
||||
return RTN_UNICAST;
|
||||
}
|
||||
|
||||
static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
|
||||
int cast_type, unsigned int data_len)
|
||||
static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
|
||||
struct sk_buff *skb, int ipv, int cast_type,
|
||||
unsigned int data_len)
|
||||
{
|
||||
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
|
||||
|
||||
memset(hdr, 0, sizeof(struct qeth_hdr));
|
||||
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
|
||||
hdr->hdr.l2.pkt_length = data_len;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
|
||||
if (card->options.performance_stats)
|
||||
card->perf_stats.tx_csum++;
|
||||
}
|
||||
|
||||
/* set byte byte 3 to casting flags */
|
||||
if (cast_type == RTN_MULTICAST)
|
||||
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
|
||||
@ -641,82 +647,41 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
|
||||
qeth_promisc_to_bridge(card);
|
||||
}
|
||||
|
||||
static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb,
|
||||
struct qeth_qdio_out_q *queue, int cast_type, int ipv)
|
||||
{
|
||||
const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
|
||||
const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
|
||||
unsigned int frame_len = skb->len;
|
||||
unsigned int data_offset = 0;
|
||||
struct qeth_hdr *hdr = NULL;
|
||||
unsigned int hd_len = 0;
|
||||
unsigned int elements;
|
||||
int push_len, rc;
|
||||
bool is_sg;
|
||||
|
||||
rc = skb_cow_head(skb, hw_hdr_len);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
|
||||
&elements);
|
||||
if (push_len < 0)
|
||||
return push_len;
|
||||
if (!push_len) {
|
||||
/* HW header needs its own buffer element. */
|
||||
hd_len = hw_hdr_len + proto_len;
|
||||
data_offset = proto_len;
|
||||
}
|
||||
qeth_l2_fill_header(hdr, skb, cast_type, frame_len);
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
|
||||
if (card->options.performance_stats)
|
||||
card->perf_stats.tx_csum++;
|
||||
}
|
||||
|
||||
is_sg = skb_is_nonlinear(skb);
|
||||
if (IS_IQD(card)) {
|
||||
rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
|
||||
hd_len);
|
||||
} else {
|
||||
/* TODO: drop skb_orphan() once TX completion is fast enough */
|
||||
skb_orphan(skb);
|
||||
rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
|
||||
hd_len, elements);
|
||||
}
|
||||
|
||||
if (!rc) {
|
||||
if (card->options.performance_stats) {
|
||||
card->perf_stats.buf_elements_sent += elements;
|
||||
if (is_sg)
|
||||
card->perf_stats.sg_skbs_sent++;
|
||||
}
|
||||
} else {
|
||||
if (!push_len)
|
||||
kmem_cache_free(qeth_core_header_cache, hdr);
|
||||
if (rc == -EBUSY)
|
||||
/* roll back to ETH header */
|
||||
skb_pull(skb, push_len);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
|
||||
struct qeth_qdio_out_q *queue)
|
||||
{
|
||||
unsigned int elements;
|
||||
struct qeth_hdr *hdr;
|
||||
struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
|
||||
addr_t end = (addr_t)(skb->data + sizeof(*hdr));
|
||||
addr_t start = (addr_t)skb->data;
|
||||
unsigned int elements = 0;
|
||||
unsigned int hd_len = 0;
|
||||
int rc;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
hdr = (struct qeth_hdr *)skb->data;
|
||||
elements = qeth_get_elements_no(card, skb, 0, 0);
|
||||
if (!elements)
|
||||
return -E2BIG;
|
||||
if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr)))
|
||||
return -EINVAL;
|
||||
return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements);
|
||||
if (qeth_get_elements_for_range(start, end) > 1) {
|
||||
/* Misaligned HW header, move it to its own buffer element. */
|
||||
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
|
||||
if (!hdr)
|
||||
return -ENOMEM;
|
||||
hd_len = sizeof(*hdr);
|
||||
skb_copy_from_linear_data(skb, (char *)hdr, hd_len);
|
||||
elements++;
|
||||
}
|
||||
|
||||
elements += qeth_count_elements(skb, hd_len);
|
||||
if (elements > QETH_MAX_BUFFER_ELEMENTS(card)) {
|
||||
rc = -E2BIG;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len,
|
||||
elements);
|
||||
out:
|
||||
if (rc && hd_len)
|
||||
kmem_cache_free(qeth_core_header_cache, hdr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
|
||||
@ -745,7 +710,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
|
||||
if (IS_OSN(card))
|
||||
rc = qeth_l2_xmit_osn(card, skb, queue);
|
||||
else
|
||||
rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv);
|
||||
rc = qeth_xmit(card, skb, queue, ipv, cast_type,
|
||||
qeth_l2_fill_header);
|
||||
|
||||
if (!rc) {
|
||||
card->stats.tx_packets++;
|
||||
@ -789,7 +755,10 @@ static int __qeth_l2_open(struct net_device *dev)
|
||||
|
||||
if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
|
||||
napi_enable(&card->napi);
|
||||
local_bh_disable();
|
||||
napi_schedule(&card->napi);
|
||||
/* kick-start the NAPI softirq: */
|
||||
local_bh_enable();
|
||||
} else
|
||||
rc = -EIO;
|
||||
return rc;
|
||||
@ -1240,7 +1209,6 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
|
||||
struct qeth_cmd_buffer *iob)
|
||||
{
|
||||
struct qeth_channel *channel = iob->channel;
|
||||
unsigned long flags;
|
||||
int rc = 0;
|
||||
|
||||
QETH_CARD_TEXT(card, 5, "osndctrd");
|
||||
@ -1249,10 +1217,10 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
|
||||
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
|
||||
qeth_prepare_control_data(card, len, iob);
|
||||
QETH_CARD_TEXT(card, 6, "osnoirqp");
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
|
||||
(addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||
if (rc) {
|
||||
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
|
||||
"ccw_device_start rc = %i\n", rc);
|
||||
|
@ -33,7 +33,6 @@
|
||||
#include <net/ipv6.h>
|
||||
#include <net/ip6_route.h>
|
||||
#include <net/ip6_fib.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
#include <net/iucv/af_iucv.h>
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
@ -1983,21 +1982,23 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
|
||||
rcu_read_unlock();
|
||||
|
||||
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
|
||||
if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
|
||||
return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
|
||||
RTN_MULTICAST : RTN_UNICAST;
|
||||
else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
|
||||
switch (qeth_get_ip_version(skb)) {
|
||||
case 4:
|
||||
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
|
||||
RTN_MULTICAST : RTN_UNICAST;
|
||||
|
||||
/* ... and MAC address */
|
||||
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
|
||||
return RTN_BROADCAST;
|
||||
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
|
||||
return RTN_MULTICAST;
|
||||
|
||||
/* default to unicast */
|
||||
return RTN_UNICAST;
|
||||
case 6:
|
||||
return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
|
||||
RTN_MULTICAST : RTN_UNICAST;
|
||||
default:
|
||||
/* ... and MAC address */
|
||||
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
|
||||
skb->dev->broadcast))
|
||||
return RTN_BROADCAST;
|
||||
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
|
||||
return RTN_MULTICAST;
|
||||
/* default to unicast */
|
||||
return RTN_UNICAST;
|
||||
}
|
||||
}
|
||||
|
||||
static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
|
||||
@ -2006,7 +2007,6 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
|
||||
char daddr[16];
|
||||
struct af_iucv_trans_hdr *iucv_hdr;
|
||||
|
||||
memset(hdr, 0, sizeof(struct qeth_hdr));
|
||||
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
|
||||
hdr->hdr.l3.length = data_len;
|
||||
hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
|
||||
@ -2034,26 +2034,33 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
|
||||
struct sk_buff *skb, int ipv, int cast_type,
|
||||
unsigned int data_len)
|
||||
{
|
||||
memset(hdr, 0, sizeof(struct qeth_hdr));
|
||||
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
|
||||
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
|
||||
|
||||
hdr->hdr.l3.length = data_len;
|
||||
|
||||
/*
|
||||
* before we're going to overwrite this location with next hop ip.
|
||||
* v6 uses passthrough, v4 sets the tag in the QDIO header.
|
||||
*/
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
|
||||
hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
|
||||
else
|
||||
hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
|
||||
hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
|
||||
if (skb_is_gso(skb)) {
|
||||
hdr->hdr.l3.id = QETH_HEADER_TYPE_TSO;
|
||||
} else {
|
||||
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
|
||||
/* some HW requires combined L3+L4 csum offload: */
|
||||
if (ipv == 4)
|
||||
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
|
||||
if (card->options.performance_stats)
|
||||
card->perf_stats.tx_csum++;
|
||||
}
|
||||
}
|
||||
|
||||
if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
|
||||
if (card->options.performance_stats)
|
||||
card->perf_stats.tx_csum++;
|
||||
if (ipv == 4 || IS_IQD(card)) {
|
||||
/* NETIF_F_HW_VLAN_CTAG_TX */
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME;
|
||||
hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
|
||||
}
|
||||
} else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
|
||||
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG;
|
||||
hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
|
||||
}
|
||||
|
||||
/* OSA only: */
|
||||
@ -2094,85 +2101,57 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void qeth_tso_fill_header(struct qeth_card *card,
|
||||
struct qeth_hdr *qhdr, struct sk_buff *skb)
|
||||
static void qeth_l3_fill_tso_ext(struct qeth_hdr_tso *hdr,
|
||||
unsigned int payload_len, struct sk_buff *skb,
|
||||
unsigned int proto_len)
|
||||
{
|
||||
struct qeth_hdr_ext_tso *ext = &hdr->ext;
|
||||
|
||||
ext->hdr_tot_len = sizeof(*ext);
|
||||
ext->imb_hdr_no = 1;
|
||||
ext->hdr_type = 1;
|
||||
ext->hdr_version = 1;
|
||||
ext->hdr_len = 28;
|
||||
ext->payload_len = payload_len;
|
||||
ext->mss = skb_shinfo(skb)->gso_size;
|
||||
ext->dg_hdr_len = proto_len;
|
||||
}
|
||||
|
||||
static void qeth_l3_fixup_headers(struct sk_buff *skb)
|
||||
{
|
||||
struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
|
||||
struct tcphdr *tcph = tcp_hdr(skb);
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||
|
||||
/*fix header to TSO values ...*/
|
||||
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
|
||||
/*set values which are fix for the first approach ...*/
|
||||
hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
|
||||
hdr->ext.imb_hdr_no = 1;
|
||||
hdr->ext.hdr_type = 1;
|
||||
hdr->ext.hdr_version = 1;
|
||||
hdr->ext.hdr_len = 28;
|
||||
/*insert non-fix values */
|
||||
hdr->ext.mss = skb_shinfo(skb)->gso_size;
|
||||
hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb));
|
||||
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
|
||||
sizeof(struct qeth_hdr_tso));
|
||||
tcph->check = 0;
|
||||
if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) {
|
||||
ip6h->payload_len = 0;
|
||||
tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
|
||||
0, IPPROTO_TCP, 0);
|
||||
} else {
|
||||
/*OSA want us to set these values ...*/
|
||||
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
|
||||
0, IPPROTO_TCP, 0);
|
||||
iph->tot_len = 0;
|
||||
/* this is safe, IPv6 traffic takes a different path */
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
iph->check = 0;
|
||||
if (skb_is_gso(skb)) {
|
||||
iph->tot_len = 0;
|
||||
tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr,
|
||||
iph->daddr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso
|
||||
* @card: qeth card structure, to check max. elems.
|
||||
* @skb: SKB address
|
||||
* @extra_elems: extra elems needed, to check against max.
|
||||
*
|
||||
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
|
||||
* skb data, including linear part and fragments, but excluding TCP header.
|
||||
* (Exclusion of TCP header distinguishes it from qeth_get_elements_no().)
|
||||
* Checks if the result plus extra_elems fits under the limit for the card.
|
||||
* Returns 0 if it does not.
|
||||
* Note: extra_elems is not included in the returned result.
|
||||
*/
|
||||
static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
|
||||
struct sk_buff *skb, int extra_elems)
|
||||
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
|
||||
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
|
||||
{
|
||||
addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
|
||||
addr_t end = (addr_t)skb->data + skb_headlen(skb);
|
||||
int elements = qeth_get_elements_for_frags(skb);
|
||||
|
||||
if (start != end)
|
||||
elements += qeth_get_elements_for_range(start, end);
|
||||
|
||||
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
|
||||
QETH_DBF_MESSAGE(2,
|
||||
"Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n",
|
||||
elements + extra_elems, skb->len);
|
||||
return 0;
|
||||
}
|
||||
return elements;
|
||||
}
|
||||
|
||||
static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
|
||||
struct qeth_qdio_out_q *queue, int ipv,
|
||||
int cast_type)
|
||||
{
|
||||
const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
|
||||
unsigned int frame_len, elements;
|
||||
unsigned int hw_hdr_len, proto_len, frame_len, elements;
|
||||
unsigned char eth_hdr[ETH_HLEN];
|
||||
bool is_tso = skb_is_gso(skb);
|
||||
unsigned int data_offset = 0;
|
||||
struct qeth_hdr *hdr = NULL;
|
||||
unsigned int hd_len = 0;
|
||||
int push_len, rc;
|
||||
bool is_sg;
|
||||
|
||||
if (is_tso) {
|
||||
hw_hdr_len = sizeof(struct qeth_hdr_tso);
|
||||
proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb) -
|
||||
ETH_HLEN;
|
||||
} else {
|
||||
hw_hdr_len = sizeof(struct qeth_hdr);
|
||||
proto_len = 0;
|
||||
}
|
||||
|
||||
/* re-use the L2 header area for the HW header: */
|
||||
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
|
||||
if (rc)
|
||||
@ -2181,28 +2160,37 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
|
||||
skb_pull(skb, ETH_HLEN);
|
||||
frame_len = skb->len;
|
||||
|
||||
push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0,
|
||||
qeth_l3_fixup_headers(skb);
|
||||
push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
|
||||
&elements);
|
||||
if (push_len < 0)
|
||||
return push_len;
|
||||
if (!push_len) {
|
||||
/* hdr was added discontiguous from skb->data */
|
||||
hd_len = hw_hdr_len;
|
||||
if (is_tso || !push_len) {
|
||||
/* HW header needs its own buffer element. */
|
||||
hd_len = hw_hdr_len + proto_len;
|
||||
data_offset = push_len + proto_len;
|
||||
}
|
||||
memset(hdr, 0, hw_hdr_len);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_AF_IUCV))
|
||||
if (skb->protocol == htons(ETH_P_AF_IUCV)) {
|
||||
qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
|
||||
else
|
||||
} else {
|
||||
qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
|
||||
if (is_tso)
|
||||
qeth_l3_fill_tso_ext((struct qeth_hdr_tso *) hdr,
|
||||
frame_len - proto_len, skb,
|
||||
proto_len);
|
||||
}
|
||||
|
||||
is_sg = skb_is_nonlinear(skb);
|
||||
if (IS_IQD(card)) {
|
||||
rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
|
||||
rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
|
||||
hd_len);
|
||||
} else {
|
||||
/* TODO: drop skb_orphan() once TX completion is fast enough */
|
||||
skb_orphan(skb);
|
||||
rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len,
|
||||
elements);
|
||||
rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
|
||||
hd_len, elements);
|
||||
}
|
||||
|
||||
if (!rc) {
|
||||
@ -2210,6 +2198,10 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
|
||||
card->perf_stats.buf_elements_sent += elements;
|
||||
if (is_sg)
|
||||
card->perf_stats.sg_skbs_sent++;
|
||||
if (is_tso) {
|
||||
card->perf_stats.large_send_bytes += frame_len;
|
||||
card->perf_stats.large_send_cnt++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!push_len)
|
||||
@ -2224,118 +2216,6 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
|
||||
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
|
||||
{
|
||||
int elements, len, rc;
|
||||
__be16 *tag;
|
||||
struct qeth_hdr *hdr = NULL;
|
||||
int hdr_elements = 0;
|
||||
struct sk_buff *new_skb = NULL;
|
||||
int tx_bytes = skb->len;
|
||||
unsigned int hd_len;
|
||||
bool use_tso, is_sg;
|
||||
|
||||
/* Ignore segment size from skb_is_gso(), 1 page is always used. */
|
||||
use_tso = skb_is_gso(skb) &&
|
||||
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
|
||||
|
||||
/* create a clone with writeable headroom */
|
||||
new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
|
||||
VLAN_HLEN);
|
||||
if (!new_skb)
|
||||
return -ENOMEM;
|
||||
|
||||
if (ipv == 4) {
|
||||
skb_pull(new_skb, ETH_HLEN);
|
||||
} else if (skb_vlan_tag_present(new_skb)) {
|
||||
skb_push(new_skb, VLAN_HLEN);
|
||||
skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
|
||||
skb_copy_to_linear_data_offset(new_skb, 4,
|
||||
new_skb->data + 8, 4);
|
||||
skb_copy_to_linear_data_offset(new_skb, 8,
|
||||
new_skb->data + 12, 4);
|
||||
tag = (__be16 *)(new_skb->data + 12);
|
||||
*tag = cpu_to_be16(ETH_P_8021Q);
|
||||
*(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
|
||||
}
|
||||
|
||||
/* fix hardware limitation: as long as we do not have sbal
|
||||
* chaining we can not send long frag lists
|
||||
*/
|
||||
if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
|
||||
(!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) {
|
||||
rc = skb_linearize(new_skb);
|
||||
|
||||
if (card->options.performance_stats) {
|
||||
if (rc)
|
||||
card->perf_stats.tx_linfail++;
|
||||
else
|
||||
card->perf_stats.tx_lin++;
|
||||
}
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (use_tso) {
|
||||
hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
|
||||
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
|
||||
qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
|
||||
new_skb->len - sizeof(struct qeth_hdr_tso));
|
||||
qeth_tso_fill_header(card, hdr, new_skb);
|
||||
hdr_elements++;
|
||||
} else {
|
||||
hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
|
||||
qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
|
||||
new_skb->len - sizeof(struct qeth_hdr));
|
||||
}
|
||||
|
||||
elements = use_tso ?
|
||||
qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
|
||||
qeth_get_elements_no(card, new_skb, hdr_elements, 0);
|
||||
if (!elements) {
|
||||
rc = -E2BIG;
|
||||
goto out;
|
||||
}
|
||||
elements += hdr_elements;
|
||||
|
||||
if (use_tso) {
|
||||
hd_len = sizeof(struct qeth_hdr_tso) +
|
||||
ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
|
||||
len = hd_len;
|
||||
} else {
|
||||
hd_len = 0;
|
||||
len = sizeof(struct qeth_hdr_layer3);
|
||||
}
|
||||
|
||||
if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
is_sg = skb_is_nonlinear(new_skb);
|
||||
rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
|
||||
elements);
|
||||
out:
|
||||
if (!rc) {
|
||||
if (new_skb != skb)
|
||||
dev_kfree_skb_any(skb);
|
||||
if (card->options.performance_stats) {
|
||||
card->perf_stats.buf_elements_sent += elements;
|
||||
if (is_sg)
|
||||
card->perf_stats.sg_skbs_sent++;
|
||||
if (use_tso) {
|
||||
card->perf_stats.large_send_bytes += tx_bytes;
|
||||
card->perf_stats.large_send_cnt++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (new_skb != skb)
|
||||
dev_kfree_skb_any(new_skb);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
@ -2371,10 +2251,11 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
|
||||
}
|
||||
netif_stop_queue(dev);
|
||||
|
||||
if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4))
|
||||
rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type);
|
||||
else
|
||||
if (ipv == 4 || IS_IQD(card))
|
||||
rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
|
||||
else
|
||||
rc = qeth_xmit(card, skb, queue, ipv, cast_type,
|
||||
qeth_l3_fill_header);
|
||||
|
||||
if (!rc) {
|
||||
card->stats.tx_packets++;
|
||||
@ -2412,7 +2293,10 @@ static int __qeth_l3_open(struct net_device *dev)
|
||||
|
||||
if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
|
||||
napi_enable(&card->napi);
|
||||
local_bh_disable();
|
||||
napi_schedule(&card->napi);
|
||||
/* kick-start the NAPI softirq: */
|
||||
local_bh_enable();
|
||||
} else
|
||||
rc = -EIO;
|
||||
return rc;
|
||||
@ -2476,6 +2360,15 @@ qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
if (qeth_get_ip_version(skb) != 4)
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
return qeth_features_check(skb, dev, features);
|
||||
}
|
||||
|
||||
static const struct net_device_ops qeth_l3_netdev_ops = {
|
||||
.ndo_open = qeth_l3_open,
|
||||
.ndo_stop = qeth_l3_stop,
|
||||
@ -2496,7 +2389,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
|
||||
.ndo_stop = qeth_l3_stop,
|
||||
.ndo_get_stats = qeth_get_stats,
|
||||
.ndo_start_xmit = qeth_l3_hard_start_xmit,
|
||||
.ndo_features_check = qeth_features_check,
|
||||
.ndo_features_check = qeth_l3_osa_features_check,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
|
||||
.ndo_do_ioctl = qeth_do_ioctl,
|
||||
@ -2524,6 +2417,11 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
|
||||
}
|
||||
|
||||
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
|
||||
card->dev->needed_headroom = sizeof(struct qeth_hdr);
|
||||
/* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
|
||||
card->dev->needed_headroom += VLAN_HLEN;
|
||||
if (qeth_is_supported(card, IPA_OUTBOUND_TSO))
|
||||
card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
|
||||
|
||||
/*IPv6 address autoconfiguration stuff*/
|
||||
qeth_l3_get_unique_id(card);
|
||||
@ -2545,6 +2443,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
|
||||
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
|
||||
card->dev->flags |= IFF_NOARP;
|
||||
card->dev->netdev_ops = &qeth_l3_netdev_ops;
|
||||
card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
|
||||
|
||||
rc = qeth_l3_iqd_read_initial_mac(card);
|
||||
if (rc)
|
||||
@ -2556,7 +2455,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
|
||||
return -ENODEV;
|
||||
|
||||
card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
|
||||
card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
|
||||
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
Loading…
Reference in New Issue
Block a user