Merge branch 'qed-iscsi'

Manish Rangankar says:

====================
Add QLogic FastLinQ iSCSI (qedi) driver.

This series introduces hardware offload iSCSI initiator driver for the
41000 Series Converged Network Adapters (579xx chip) by Qlogic. The overall
driver design includes a common module ('qed') and protocol specific
dependent modules ('qedi' for iSCSI).

This is an open iSCSI driver, modifications to open iSCSI user components
'iscsid', 'iscsiuio', etc. are required for the solution to work. The user
space changes are also in the process of being submitted.

    https://groups.google.com/forum/#!forum/open-iscsi

The 'qed' common module, under drivers/net/ethernet/qlogic/qed/, is
enhanced with functionality required for the iSCSI support. This series
is based on:

    net tree base: Merge of net and net-next as of 11/29/2016

Changes from RFC v2:

  1. qedi patches are squashed into single patch to prevent krobot
     warning.
  2. Fixed 'hw_p_cpuq' incompatible pointer type.
  3. Fixed sparse incompatible types in comparison expression.
  4. Misc fixes with latest 'checkpatch --strict' option.
  5. Remove int_mode option from MODULE_PARAM.
  6. Prefix all MODULE_PARAM params with qedi_*.
  7. Use CONFIG_QED_ISCSI instead of CONFIG_QEDI
  8. Added bad task mem access fix.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-12-02 12:44:38 -05:00
commit 7df5358d47
15 changed files with 2833 additions and 29 deletions

View File

@ -110,4 +110,7 @@ config QEDE
config QED_RDMA
bool
config QED_ISCSI
bool
endif # NET_VENDOR_QLOGIC

View File

@ -6,3 +6,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o
qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o

View File

@ -35,6 +35,7 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
#define ISCSI_BDQ_ID(_port_id) (_port_id)
#define QED_WID_SIZE (1024)
#define QED_PF_DEMS_SIZE (4)
@ -382,7 +383,9 @@ struct qed_hwfn {
/* Protocol related */
bool using_ll2;
struct qed_ll2_info *p_ll2_info;
struct qed_ooo_info *p_ooo_info;
struct qed_rdma_info *p_rdma_info;
struct qed_iscsi_info *p_iscsi_info;
struct qed_pf_params pf_params;
bool b_rdma_enabled_in_prs;
@ -581,6 +584,8 @@ struct qed_dev {
/* Linux specific here */
struct qede_dev *edev;
struct pci_dev *pdev;
u32 flags;
#define QED_FLAG_STORAGE_STARTED (BIT(0))
int msg_enable;
struct pci_params pci_params;
@ -594,6 +599,7 @@ struct qed_dev {
union {
struct qed_common_cb_ops *common;
struct qed_eth_cb_ops *eth;
struct qed_iscsi_cb_ops *iscsi;
} protocol_ops;
void *ops_cookie;
@ -603,7 +609,7 @@ struct qed_dev {
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
u32 rdma_max_sge;

View File

@ -29,8 +29,10 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_ooo.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
@ -146,6 +148,10 @@ void qed_resc_free(struct qed_dev *cdev)
#ifdef CONFIG_QED_LL2
qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
#endif
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
}
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
@ -402,6 +408,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
int qed_resc_alloc(struct qed_dev *cdev)
{
struct qed_iscsi_info *p_iscsi_info;
struct qed_ooo_info *p_ooo_info;
#ifdef CONFIG_QED_LL2
struct qed_ll2_info *p_ll2_info;
#endif
@ -507,6 +515,16 @@ int qed_resc_alloc(struct qed_dev *cdev)
p_hwfn->p_ll2_info = p_ll2_info;
}
#endif
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
p_iscsi_info = qed_iscsi_alloc(p_hwfn);
if (!p_iscsi_info)
goto alloc_no_mem;
p_hwfn->p_iscsi_info = p_iscsi_info;
p_ooo_info = qed_ooo_alloc(p_hwfn);
if (!p_ooo_info)
goto alloc_no_mem;
p_hwfn->p_ooo_info = p_ooo_info;
}
/* DMA info initialization */
rc = qed_dmae_info_alloc(p_hwfn);
@ -560,6 +578,10 @@ void qed_resc_setup(struct qed_dev *cdev)
if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
#endif
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,52 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_ISCSI_H
#define _QED_ISCSI_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/qed/tcp_common.h>
#include <linux/qed/qed_iscsi_if.h>
#include <linux/qed/qed_chain.h>
#include "qed.h"
#include "qed_hsi.h"
#include "qed_mcp.h"
#include "qed_sp.h"
struct qed_iscsi_info {
spinlock_t lock; /* Connection resources. */
struct list_head free_list;
u16 max_num_outstanding_tasks;
void *event_context;
iscsi_event_cb_t event_cb;
};
#ifdef CONFIG_QED_LL2
extern const struct qed_ll2_ops qed_ll2_ops_pass;
#endif
#if IS_ENABLED(CONFIG_QED_ISCSI)
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info);
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline struct qed_iscsi_info *qed_iscsi_alloc(
struct qed_hwfn *p_hwfn) { return NULL; }
static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info) {}
static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif

View File

@ -36,6 +36,7 @@
#include "qed_int.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_ooo.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_roce.h"
@ -296,25 +297,34 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
p_tx->cur_completing_packet = *p_pkt;
p_tx->cur_completing_bd_idx = 1;
b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
tx_frag = p_pkt->bds_set[0].tx_frag;
if (p_ll2_conn->gsi_enable)
qed_ll2b_release_tx_gsi_packet(p_hwfn,
p_ll2_conn->my_id,
p_pkt->cookie,
tx_frag,
b_last_frag,
b_last_packet);
else
qed_ll2b_complete_tx_packet(p_hwfn,
p_ll2_conn->my_id,
p_pkt->cookie,
tx_frag,
b_last_frag,
b_last_packet);
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer);
} else {
p_tx->cur_completing_packet = *p_pkt;
p_tx->cur_completing_bd_idx = 1;
b_last_frag =
p_tx->cur_completing_bd_idx == p_pkt->bd_used;
tx_frag = p_pkt->bds_set[0].tx_frag;
if (p_ll2_conn->gsi_enable)
qed_ll2b_release_tx_gsi_packet(p_hwfn,
p_ll2_conn->
my_id,
p_pkt->cookie,
tx_frag,
b_last_frag,
b_last_packet);
else
qed_ll2b_complete_tx_packet(p_hwfn,
p_ll2_conn->my_id,
p_pkt->cookie,
tx_frag,
b_last_frag,
b_last_packet);
}
}
}
@ -540,13 +550,458 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
rx_buf_addr = p_pkt->rx_buf_addr;
cookie = p_pkt->cookie;
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
b_last = list_empty(&p_rx->active_descq);
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer);
} else {
rx_buf_addr = p_pkt->rx_buf_addr;
cookie = p_pkt->cookie;
b_last = list_empty(&p_rx->active_descq);
}
}
}
#if IS_ENABLED(CONFIG_QED_ISCSI)
static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
{
u8 bd_flags = 0;
if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
return bd_flags;
}
static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
u16 packet_length = 0, parse_flags = 0, vlan = 0;
struct qed_ll2_rx_packet *p_pkt = NULL;
u32 num_ooo_add_to_peninsula = 0, cid;
union core_rx_cqe_union *cqe = NULL;
u16 cq_new_idx = 0, cq_old_idx = 0;
struct qed_ooo_buffer *p_buffer;
struct ooo_opaque *iscsi_ooo;
u8 placement_offset = 0;
u8 cqe_type;
cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
if (cq_new_idx == cq_old_idx)
return 0;
while (cq_new_idx != cq_old_idx) {
struct core_rx_fast_path_cqe *p_cqe_fp;
cqe = qed_chain_consume(&p_rx->rcq_chain);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
cqe_type = cqe->rx_cqe_sp.type;
if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
DP_NOTICE(p_hwfn,
"Got a non-regular LB LL2 completion [type 0x%02x]\n",
cqe_type);
return -EINVAL;
}
p_cqe_fp = &cqe->rx_cqe_fp;
placement_offset = p_cqe_fp->placement_offset;
parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
packet_length = le16_to_cpu(p_cqe_fp->packet_length);
vlan = le16_to_cpu(p_cqe_fp->vlan);
iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
iscsi_ooo);
cid = le32_to_cpu(iscsi_ooo->cid);
/* Process delete isle first */
if (iscsi_ooo->drop_size)
qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
iscsi_ooo->drop_isle,
iscsi_ooo->drop_size);
if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
continue;
/* Now process create/add/join isles */
if (list_empty(&p_rx->active_descq)) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX chain has no submitted buffers\n"
);
return -EIO;
}
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
(iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
(iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
(iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
(iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
if (!p_pkt) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX packet is not valid\n");
return -EIO;
}
list_del(&p_pkt->list_entry);
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
p_buffer->packet_length = packet_length;
p_buffer->parse_flags = parse_flags;
p_buffer->vlan = vlan;
p_buffer->placement_offset = placement_offset;
qed_chain_consume(&p_rx->rxq_chain);
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
switch (iscsi_ooo->ooo_opcode) {
case TCP_EVENT_ADD_NEW_ISLE:
qed_ooo_add_new_isle(p_hwfn,
p_hwfn->p_ooo_info,
cid,
iscsi_ooo->ooo_isle,
p_buffer);
break;
case TCP_EVENT_ADD_ISLE_RIGHT:
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
iscsi_ooo->ooo_isle,
p_buffer,
QED_OOO_RIGHT_BUF);
break;
case TCP_EVENT_ADD_ISLE_LEFT:
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
iscsi_ooo->ooo_isle,
p_buffer,
QED_OOO_LEFT_BUF);
break;
case TCP_EVENT_JOIN:
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
iscsi_ooo->ooo_isle +
1,
p_buffer,
QED_OOO_LEFT_BUF);
qed_ooo_join_isles(p_hwfn,
p_hwfn->p_ooo_info,
cid, iscsi_ooo->ooo_isle);
break;
case TCP_EVENT_ADD_PEN:
num_ooo_add_to_peninsula++;
qed_ooo_put_ready_buffer(p_hwfn,
p_hwfn->p_ooo_info,
p_buffer, true);
break;
}
} else {
DP_NOTICE(p_hwfn,
"Unexpected event (%d) TX OOO completion\n",
iscsi_ooo->ooo_opcode);
}
}
return 0;
}
static void
qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
struct qed_ooo_buffer *p_buffer;
int rc;
u16 l4_hdr_offset_w;
dma_addr_t first_frag;
u16 parse_flags;
u8 bd_flags;
/* Submit Tx buffers here */
while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
p_hwfn->p_ooo_info))) {
l4_hdr_offset_w = 0;
bd_flags = 0;
first_frag = p_buffer->rx_buffer_phys_addr +
p_buffer->placement_offset;
parse_flags = p_buffer->parse_flags;
bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags,
l4_hdr_offset_w,
p_ll2_conn->tx_dest, 0,
first_frag,
p_buffer->packet_length,
p_buffer, true);
if (rc) {
qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer, false);
break;
}
}
}
static void
qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
struct qed_ooo_buffer *p_buffer;
int rc;
while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
p_hwfn->p_ooo_info))) {
rc = qed_ll2_post_rx_buffer(p_hwfn,
p_ll2_conn->my_id,
p_buffer->rx_buffer_phys_addr,
0, p_buffer, true);
if (rc) {
qed_ooo_put_free_buffer(p_hwfn,
p_hwfn->p_ooo_info, p_buffer);
break;
}
}
}
static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
{
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
int rc;
rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
if (rc)
return rc;
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
return 0;
}
static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
{
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct qed_ll2_tx_packet *p_pkt = NULL;
struct qed_ooo_buffer *p_buffer;
bool b_dont_submit_rx = false;
u16 new_idx = 0, num_bds = 0;
int rc;
new_idx = le16_to_cpu(*p_tx->p_fw_cons);
num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
if (!num_bds)
return 0;
while (num_bds) {
if (list_empty(&p_tx->active_descq))
return -EINVAL;
p_pkt = list_first_entry(&p_tx->active_descq,
struct qed_ll2_tx_packet, list_entry);
if (!p_pkt)
return -EINVAL;
if (p_pkt->bd_used != 1) {
DP_NOTICE(p_hwfn,
"Unexpectedly many BDs(%d) in TX OOO completion\n",
p_pkt->bd_used);
return -EINVAL;
}
list_del(&p_pkt->list_entry);
num_bds--;
p_tx->bds_idx++;
qed_chain_consume(&p_tx->txq_chain);
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
if (b_dont_submit_rx) {
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer);
continue;
}
rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
p_buffer->rx_buffer_phys_addr, 0,
p_buffer, true);
if (rc != 0) {
qed_ooo_put_free_buffer(p_hwfn,
p_hwfn->p_ooo_info, p_buffer);
b_dont_submit_rx = true;
}
}
qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
return 0;
}
static int
qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info,
u16 rx_num_ooo_buffers, u16 mtu)
{
struct qed_ooo_buffer *p_buf = NULL;
void *p_virt;
u16 buf_idx;
int rc = 0;
if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
return rc;
if (!rx_num_ooo_buffers)
return -EINVAL;
for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
if (!p_buf) {
rc = -ENOMEM;
goto out;
}
p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
ETH_CACHE_LINE_SIZE - 1) &
~(ETH_CACHE_LINE_SIZE - 1);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
p_buf->rx_buffer_size,
&p_buf->rx_buffer_phys_addr,
GFP_KERNEL);
if (!p_virt) {
kfree(p_buf);
rc = -ENOMEM;
goto out;
}
p_buf->rx_buffer_virt_addr = p_virt;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
}
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
rx_num_ooo_buffers, p_buf->rx_buffer_size);
out:
return rc;
}
static void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
}
static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
struct qed_ooo_buffer *p_buffer;
if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
p_hwfn->p_ooo_info))) {
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_buffer->rx_buffer_size,
p_buffer->rx_buffer_virt_addr,
p_buffer->rx_buffer_phys_addr);
kfree(p_buffer);
}
}
static void qed_ll2_stop_ooo(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
*handle);
qed_ll2_terminate_connection(hwfn, *handle);
qed_ll2_release_connection(hwfn, *handle);
*handle = QED_LL2_UNUSED_HANDLE;
}
static int qed_ll2_start_ooo(struct qed_dev *cdev,
struct qed_ll2_params *params)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
struct qed_ll2_info *ll2_info;
int rc;
ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
if (!ll2_info)
return -ENOMEM;
ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
ll2_info->mtu = params->mtu;
ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
ll2_info->tx_tc = OOO_LB_TC;
ll2_info->tx_dest = CORE_TX_DEST_LB;
rc = qed_ll2_acquire_connection(hwfn, ll2_info,
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
handle);
kfree(ll2_info);
if (rc) {
DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
goto out;
}
rc = qed_ll2_establish_connection(hwfn, *handle);
if (rc) {
DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
goto fail;
}
return 0;
fail:
qed_ll2_release_connection(hwfn, *handle);
out:
*handle = QED_LL2_UNUSED_HANDLE;
return rc;
}
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
void *p_cookie) { return -EINVAL; }
static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
void *p_cookie) { return -EINVAL; }
static inline int
qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info,
u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
static inline void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) { return; }
static inline void
qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) { return; }
static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
struct qed_ll2_params *params)
{ return -EINVAL; }
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
u8 action_on_error)
@ -588,7 +1043,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = 1;
p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
: 1;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
@ -619,6 +1075,11 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_ll2_conn->cid;
@ -636,7 +1097,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_tx->tx_sb_index;
p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
p_ll2_conn->tx_stats_en = 1;
p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
@ -860,9 +1320,19 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
if (rc)
goto q_allocate_fail;
rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
rx_num_desc * 2, p_params->mtu);
if (rc)
goto q_allocate_fail;
/* Register callbacks for the Rx/Tx queues */
comp_rx_cb = qed_ll2_rxq_completion;
comp_tx_cb = qed_ll2_txq_completion;
if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
comp_rx_cb = qed_ll2_lb_rxq_completion;
comp_tx_cb = qed_ll2_lb_txq_completion;
} else {
comp_rx_cb = qed_ll2_rxq_completion;
comp_tx_cb = qed_ll2_txq_completion;
}
if (rx_num_desc) {
qed_int_register_cb(p_hwfn, comp_rx_cb,
@ -975,6 +1445,8 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
return rc;
}
@ -1213,6 +1685,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
u16 vlan,
u8 bd_flags,
u16 l4_hdr_offset_w,
enum qed_ll2_tx_dest e_tx_dest,
enum qed_ll2_roce_flavor_type qed_roce_flavor,
dma_addr_t first_frag,
u16 first_frag_len, void *cookie, u8 notify_fw)
@ -1222,6 +1695,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
enum core_roce_flavor_type roce_flavor;
struct qed_ll2_tx_queue *p_tx;
struct qed_chain *p_tx_chain;
enum core_tx_dest tx_dest;
unsigned long flags;
int rc = 0;
@ -1252,6 +1726,8 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
goto out;
}
tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
CORE_TX_DEST_LB;
if (qed_roce_flavor == QED_LL2_ROCE) {
roce_flavor = CORE_ROCE;
} else if (qed_roce_flavor == QED_LL2_RROCE) {
@ -1266,7 +1742,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
num_of_bds, first_frag,
first_frag_len, cookie, notify_fw);
qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
num_of_bds, CORE_TX_DEST_NW,
num_of_bds, tx_dest,
vlan, bd_flags, l4_hdr_offset_w,
roce_flavor,
first_frag, first_frag_len);
@ -1341,6 +1817,9 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
return rc;
}
@ -1371,6 +1850,8 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
mutex_lock(&p_ll2_conn->mutex);
p_ll2_conn->b_active = false;
mutex_unlock(&p_ll2_conn->mutex);
@ -1517,6 +1998,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
enum qed_ll2_conn_type conn_type;
struct qed_ptt *p_ptt;
int rc, i;
u8 gsi_enable = 1;
/* Initialize LL2 locks & lists */
INIT_LIST_HEAD(&cdev->ll2->list);
@ -1548,6 +2030,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
case QED_PCI_ISCSI:
conn_type = QED_LL2_TYPE_ISCSI;
gsi_enable = 0;
break;
case QED_PCI_ETH_ROCE:
conn_type = QED_LL2_TYPE_ROCE;
@ -1564,7 +2047,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
ll2_info.tx_tc = 0;
ll2_info.tx_dest = CORE_TX_DEST_NW;
ll2_info.gsi_enable = 1;
ll2_info.gsi_enable = gsi_enable;
rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
@ -1611,6 +2094,17 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
goto release_terminate;
}
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
rc = qed_ll2_start_ooo(cdev, params);
if (rc) {
DP_INFO(cdev,
"Failed to initialize the OOO LL2 queue\n");
goto release_terminate;
}
}
p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (!p_ptt) {
DP_INFO(cdev, "Failed to acquire PTT\n");
@ -1660,6 +2154,10 @@ static int qed_ll2_stop(struct qed_dev *cdev)
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
eth_zero_addr(cdev->ll2_mac_address);
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
qed_ll2_stop_ooo(cdev);
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
cdev->ll2->handle);
if (rc)
@ -1714,7 +2212,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
cdev->ll2->handle,
1 + skb_shinfo(skb)->nr_frags,
vlan, flags, 0, 0 /* RoCE FLAVOR */,
vlan, flags, 0, QED_LL2_TX_DEST_NW,
0 /* RoCE FLAVOR */,
mapping, skb->len, skb, 1);
if (rc)
goto err;

View File

@ -41,6 +41,12 @@ enum qed_ll2_conn_type {
MAX_QED_LL2_RX_CONN_TYPE
};
enum qed_ll2_tx_dest {
QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
QED_LL2_TX_DEST_MAX
};
struct qed_ll2_rx_packet {
struct list_head list_entry;
struct core_rx_bd_with_buff_len *rxq_bd;
@ -192,6 +198,8 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
* @param l4_hdr_offset_w L4 Header Offset from start of packet
* (in words). This is needed if both l4_csum
* and ipv6_ext are set
* @param e_tx_dest indicates if the packet is to be transmitted via
* loopback or to the network
* @param first_frag
* @param first_frag_len
* @param cookie
@ -206,6 +214,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
u16 vlan,
u8 bd_flags,
u16 l4_hdr_offset_w,
enum qed_ll2_tx_dest e_tx_dest,
enum qed_ll2_roce_flavor_type qed_roce_flavor,
dma_addr_t first_frag,
u16 first_frag_len, void *cookie, u8 notify_fw);

View File

@ -0,0 +1,501 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "qed.h"
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_ooo.h"
static struct qed_ooo_archipelago
*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
struct qed_ooo_info
*p_ooo_info,
u32 cid)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
list_for_each_entry(p_archipelago,
&p_ooo_info->archipelagos_list, list_entry) {
if (p_archipelago->cid == cid)
return p_archipelago;
}
return NULL;
}
static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 isle)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
struct qed_ooo_isle *p_isle = NULL;
u8 the_num_of_isle = 1;
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
if (!p_archipelago) {
DP_NOTICE(p_hwfn,
"Connection %d is not found in OOO list\n", cid);
return NULL;
}
list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) {
if (the_num_of_isle == isle)
return p_isle;
the_num_of_isle++;
}
return NULL;
}
void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe)
{
struct qed_ooo_history *p_history = &p_ooo_info->ooo_history;
if (p_history->head_idx == p_history->num_of_cqes)
p_history->head_idx = 0;
p_history->p_cqes[p_history->head_idx] = *p_cqe;
p_history->head_idx++;
}
struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_ooo_info *p_ooo_info;
u16 max_num_archipelagos = 0;
u16 max_num_isles = 0;
u32 i;
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown personality\n");
return NULL;
}
max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown amount of connections\n");
return NULL;
}
p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
if (!p_ooo_info)
return NULL;
INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list);
INIT_LIST_HEAD(&p_ooo_info->archipelagos_list);
p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
sizeof(struct qed_ooo_isle),
GFP_KERNEL);
if (!p_ooo_info->p_isles_mem)
goto no_isles_mem;
for (i = 0; i < max_num_isles; i++) {
INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list);
list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry,
&p_ooo_info->free_isles_list);
}
p_ooo_info->p_archipelagos_mem =
kcalloc(max_num_archipelagos,
sizeof(struct qed_ooo_archipelago),
GFP_KERNEL);
if (!p_ooo_info->p_archipelagos_mem)
goto no_archipelagos_mem;
for (i = 0; i < max_num_archipelagos; i++) {
INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry,
&p_ooo_info->free_archipelagos_list);
}
p_ooo_info->ooo_history.p_cqes =
kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
sizeof(struct ooo_opaque),
GFP_KERNEL);
if (!p_ooo_info->ooo_history.p_cqes)
goto no_history_mem;
return p_ooo_info;
no_history_mem:
kfree(p_ooo_info->p_archipelagos_mem);
no_archipelagos_mem:
kfree(p_ooo_info->p_isles_mem);
no_isles_mem:
kfree(p_ooo_info);
return NULL;
}
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid)
{
struct qed_ooo_archipelago *p_archipelago;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
bool b_found = false;
if (list_empty(&p_ooo_info->archipelagos_list))
return;
list_for_each_entry(p_archipelago,
&p_ooo_info->archipelagos_list, list_entry) {
if (p_archipelago->cid == cid) {
list_del(&p_archipelago->list_entry);
b_found = true;
break;
}
}
if (!b_found)
return;
while (!list_empty(&p_archipelago->isles_list)) {
p_isle = list_first_entry(&p_archipelago->isles_list,
struct qed_ooo_isle, list_entry);
list_del(&p_isle->list_entry);
while (!list_empty(&p_isle->buffers_list)) {
p_buffer = list_first_entry(&p_isle->buffers_list,
struct qed_ooo_buffer,
list_entry);
if (!p_buffer)
break;
list_del(&p_buffer->list_entry);
list_add_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
list_add_tail(&p_archipelago->list_entry,
&p_ooo_info->free_archipelagos_list);
}
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{
struct qed_ooo_archipelago *p_arch;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
while (!list_empty(&p_ooo_info->archipelagos_list)) {
p_arch = list_first_entry(&p_ooo_info->archipelagos_list,
struct qed_ooo_archipelago,
list_entry);
list_del(&p_arch->list_entry);
while (!list_empty(&p_arch->isles_list)) {
p_isle = list_first_entry(&p_arch->isles_list,
struct qed_ooo_isle,
list_entry);
list_del(&p_isle->list_entry);
while (!list_empty(&p_isle->buffers_list)) {
p_buffer =
list_first_entry(&p_isle->buffers_list,
struct qed_ooo_buffer,
list_entry);
if (!p_buffer)
break;
list_del(&p_buffer->list_entry);
list_add_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
list_add_tail(&p_arch->list_entry,
&p_ooo_info->free_archipelagos_list);
}
if (!list_empty(&p_ooo_info->ready_buffers_list))
list_splice_tail_init(&p_ooo_info->ready_buffers_list,
&p_ooo_info->free_buffers_list);
}
void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
{
qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
memset(p_ooo_info->ooo_history.p_cqes, 0,
p_ooo_info->ooo_history.num_of_cqes *
sizeof(struct ooo_opaque));
p_ooo_info->ooo_history.head_idx = 0;
}
void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
{
struct qed_ooo_buffer *p_buffer;
qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
while (!list_empty(&p_ooo_info->free_buffers_list)) {
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
struct qed_ooo_buffer, list_entry);
if (!p_buffer)
break;
list_del(&p_buffer->list_entry);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_buffer->rx_buffer_size,
p_buffer->rx_buffer_virt_addr,
p_buffer->rx_buffer_phys_addr);
kfree(p_buffer);
}
kfree(p_ooo_info->p_isles_mem);
kfree(p_ooo_info->p_archipelagos_mem);
kfree(p_ooo_info->ooo_history.p_cqes);
kfree(p_ooo_info);
}
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer)
{
list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list);
}
struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{
struct qed_ooo_buffer *p_buffer = NULL;
if (!list_empty(&p_ooo_info->free_buffers_list)) {
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
struct qed_ooo_buffer, list_entry);
list_del(&p_buffer->list_entry);
}
return p_buffer;
}
void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer, u8 on_tail)
{
if (on_tail)
list_add_tail(&p_buffer->list_entry,
&p_ooo_info->ready_buffers_list);
else
list_add(&p_buffer->list_entry,
&p_ooo_info->ready_buffers_list);
}
struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{
struct qed_ooo_buffer *p_buffer = NULL;
if (!list_empty(&p_ooo_info->ready_buffers_list)) {
p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list,
struct qed_ooo_buffer, list_entry);
list_del(&p_buffer->list_entry);
}
return p_buffer;
}
void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 drop_isle, u8 drop_size)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
struct qed_ooo_isle *p_isle = NULL;
u8 isle_idx;
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
for (isle_idx = 0; isle_idx < drop_size; isle_idx++) {
p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle);
if (!p_isle) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n",
drop_isle, cid);
return;
}
if (list_empty(&p_isle->buffers_list))
DP_NOTICE(p_hwfn,
"Isle %d is empty(cid %d)\n", drop_isle, cid);
else
list_splice_tail_init(&p_isle->buffers_list,
&p_ooo_info->free_buffers_list);
list_del(&p_isle->list_entry);
p_ooo_info->cur_isles_number--;
list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
}
if (list_empty(&p_archipelago->isles_list)) {
list_del(&p_archipelago->list_entry);
list_add(&p_archipelago->list_entry,
&p_ooo_info->free_archipelagos_list);
}
}
void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 ooo_isle,
struct qed_ooo_buffer *p_buffer)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
struct qed_ooo_isle *p_prev_isle = NULL;
struct qed_ooo_isle *p_isle = NULL;
if (ooo_isle > 1) {
p_prev_isle = qed_ooo_seek_isle(p_hwfn,
p_ooo_info, cid, ooo_isle - 1);
if (!p_prev_isle) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n",
ooo_isle - 1, cid);
return;
}
}
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
if (!p_archipelago && (ooo_isle != 1)) {
DP_NOTICE(p_hwfn,
"Connection %d is not found in OOO list\n", cid);
return;
}
if (!list_empty(&p_ooo_info->free_isles_list)) {
p_isle = list_first_entry(&p_ooo_info->free_isles_list,
struct qed_ooo_isle, list_entry);
list_del(&p_isle->list_entry);
if (!list_empty(&p_isle->buffers_list)) {
DP_NOTICE(p_hwfn, "Free isle is not empty\n");
INIT_LIST_HEAD(&p_isle->buffers_list);
}
} else {
DP_NOTICE(p_hwfn, "No more free isles\n");
return;
}
if (!p_archipelago &&
!list_empty(&p_ooo_info->free_archipelagos_list)) {
p_archipelago =
list_first_entry(&p_ooo_info->free_archipelagos_list,
struct qed_ooo_archipelago, list_entry);
list_del(&p_archipelago->list_entry);
if (!list_empty(&p_archipelago->isles_list)) {
DP_NOTICE(p_hwfn,
"Free OOO connection is not empty\n");
INIT_LIST_HEAD(&p_archipelago->isles_list);
}
p_archipelago->cid = cid;
list_add(&p_archipelago->list_entry,
&p_ooo_info->archipelagos_list);
} else if (!p_archipelago) {
DP_NOTICE(p_hwfn, "No more free OOO connections\n");
list_add(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
list_add(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
return;
}
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
p_ooo_info->cur_isles_number++;
p_ooo_info->gen_isles_number++;
if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number)
p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number;
if (!p_prev_isle)
list_add(&p_isle->list_entry, &p_archipelago->isles_list);
else
list_add(&p_isle->list_entry, &p_prev_isle->list_entry);
}
void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle,
struct qed_ooo_buffer *p_buffer, u8 buffer_side)
{
struct qed_ooo_isle *p_isle = NULL;
p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
if (!p_isle) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n", ooo_isle, cid);
return;
}
if (buffer_side == QED_OOO_LEFT_BUF)
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
else
list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list);
}
void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle)
{
struct qed_ooo_archipelago *p_archipelago = NULL;
struct qed_ooo_isle *p_right_isle = NULL;
struct qed_ooo_isle *p_left_isle = NULL;
p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle + 1);
if (!p_right_isle) {
DP_NOTICE(p_hwfn,
"Right isle %d is not found(cid %d)\n",
left_isle + 1, cid);
return;
}
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
list_del(&p_right_isle->list_entry);
p_ooo_info->cur_isles_number--;
if (left_isle) {
p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle);
if (!p_left_isle) {
DP_NOTICE(p_hwfn,
"Left isle %d is not found(cid %d)\n",
left_isle, cid);
return;
}
list_splice_tail_init(&p_right_isle->buffers_list,
&p_left_isle->buffers_list);
} else {
list_splice_tail_init(&p_right_isle->buffers_list,
&p_ooo_info->ready_buffers_list);
if (list_empty(&p_archipelago->isles_list)) {
list_del(&p_archipelago->list_entry);
list_add(&p_archipelago->list_entry,
&p_ooo_info->free_archipelagos_list);
}
}
list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
}

View File

@ -0,0 +1,176 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_OOO_H
#define _QED_OOO_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "qed.h"
#define QED_MAX_NUM_ISLES 256
#define QED_MAX_NUM_OOO_HISTORY_ENTRIES 512
#define QED_OOO_LEFT_BUF 0
#define QED_OOO_RIGHT_BUF 1
struct qed_ooo_buffer {
struct list_head list_entry;
void *rx_buffer_virt_addr;
dma_addr_t rx_buffer_phys_addr;
u32 rx_buffer_size;
u16 packet_length;
u16 parse_flags;
u16 vlan;
u8 placement_offset;
};
struct qed_ooo_isle {
struct list_head list_entry;
struct list_head buffers_list;
};
struct qed_ooo_archipelago {
struct list_head list_entry;
struct list_head isles_list;
u32 cid;
};
struct qed_ooo_history {
struct ooo_opaque *p_cqes;
u32 head_idx;
u32 num_of_cqes;
};
struct qed_ooo_info {
struct list_head free_buffers_list;
struct list_head ready_buffers_list;
struct list_head free_isles_list;
struct list_head free_archipelagos_list;
struct list_head archipelagos_list;
struct qed_ooo_archipelago *p_archipelagos_mem;
struct qed_ooo_isle *p_isles_mem;
struct qed_ooo_history ooo_history;
u32 cur_isles_number;
u32 max_isles_number;
u32 gen_isles_number;
};
#if IS_ENABLED(CONFIG_QED_ISCSI)
void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn);
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid);
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer);
struct qed_ooo_buffer *
qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer, u8 on_tail);
struct qed_ooo_buffer *
qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 drop_isle, u8 drop_size);
void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle, struct qed_ooo_buffer *p_buffer);
void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle,
struct qed_ooo_buffer *p_buffer, u8 buffer_side);
void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid,
u8 left_isle);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe) {}
static inline struct qed_ooo_info *qed_ooo_alloc(
struct qed_hwfn *p_hwfn) { return NULL; }
static inline void
qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid) {}
static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{}
static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) {}
static inline void qed_ooo_free(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) {}
static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer) {}
static inline struct qed_ooo_buffer *
qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) { return NULL; }
static inline void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer,
u8 on_tail) {}
static inline struct qed_ooo_buffer *
qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) { return NULL; }
static inline void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 drop_isle, u8 drop_size) {}
static inline void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 ooo_isle,
struct qed_ooo_buffer *p_buffer) {}
static inline void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 ooo_isle,
struct qed_ooo_buffer *p_buffer,
u8 buffer_side) {}
static inline void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid,
u8 left_isle) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif

View File

@ -82,6 +82,8 @@
0x1c80000UL
#define BAR0_MAP_REG_XSDM_RAM \
0x1e00000UL
#define BAR0_MAP_REG_YSDM_RAM \
0x1e80000UL
#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
0x5011f4UL
#define PRS_REG_SEARCH_TCP \

View File

@ -2771,6 +2771,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
/* Tx header */
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
1 + pkt->n_seg, 0, flags, 0,
QED_LL2_TX_DEST_NW,
qed_roce_flavor, pkt->header.baddr,
pkt->header.len, pkt, 1);
if (rc) {

View File

@ -24,7 +24,9 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_iscsi.h"
#include "qed_mcp.h"
#include "qed_ooo.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
@ -277,6 +279,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
p_eqe->echo, &p_eqe->data);
case PROTOCOLID_ISCSI:
if (!IS_ENABLED(CONFIG_QED_ISCSI))
return -EINVAL;
if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
qed_ooo_release_connection_isles(p_hwfn,
p_hwfn->p_ooo_info,
cid);
return 0;
}
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
p_eqe->opcode, &p_eqe->data);
} else {
DP_NOTICE(p_hwfn,
"iSCSI async completion is not set\n");
return -EINVAL;
}
default:
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",

View File

@ -166,6 +166,7 @@ struct qed_iscsi_pf_params {
u32 max_cwnd;
u16 cq_num_entries;
u16 cmdq_num_entries;
u32 two_msl_timer;
u16 dup_ack_threshold;
u16 tx_sws_timer;
u16 min_rto;
@ -275,6 +276,7 @@ struct qed_dev_info {
enum qed_sb_type {
QED_SB_TYPE_L2_QUEUE,
QED_SB_TYPE_CNQ,
QED_SB_TYPE_STORAGE,
};
enum qed_protocol {

View File

@ -0,0 +1,229 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_ISCSI_IF_H
#define _QED_ISCSI_IF_H
#include <linux/types.h>
#include <linux/qed/qed_if.h>
typedef int (*iscsi_event_cb_t) (void *context,
u8 fw_event_code, void *fw_handle);
struct qed_iscsi_stats {
u64 iscsi_rx_bytes_cnt;
u64 iscsi_rx_packet_cnt;
u64 iscsi_rx_new_ooo_isle_events_cnt;
u32 iscsi_cmdq_threshold_cnt;
u32 iscsi_rq_threshold_cnt;
u32 iscsi_immq_threshold_cnt;
u64 iscsi_rx_dropped_pdus_task_not_valid;
u64 iscsi_rx_data_pdu_cnt;
u64 iscsi_rx_r2t_pdu_cnt;
u64 iscsi_rx_total_pdu_cnt;
u64 iscsi_tx_go_to_slow_start_event_cnt;
u64 iscsi_tx_fast_retransmit_event_cnt;
u64 iscsi_tx_data_pdu_cnt;
u64 iscsi_tx_r2t_pdu_cnt;
u64 iscsi_tx_total_pdu_cnt;
u64 iscsi_tx_bytes_cnt;
u64 iscsi_tx_packet_cnt;
};
struct qed_dev_iscsi_info {
struct qed_dev_info common;
void __iomem *primary_dbq_rq_addr;
void __iomem *secondary_bdq_rq_addr;
};
struct qed_iscsi_id_params {
u8 mac[ETH_ALEN];
u32 ip[4];
u16 port;
};
struct qed_iscsi_params_offload {
u8 layer_code;
dma_addr_t sq_pbl_addr;
u32 initial_ack;
struct qed_iscsi_id_params src;
struct qed_iscsi_id_params dst;
u16 vlan_id;
u8 tcp_flags;
u8 ip_version;
u8 default_cq;
u8 ka_max_probe_cnt;
u8 dup_ack_theshold;
u32 rcv_next;
u32 snd_una;
u32 snd_next;
u32 snd_max;
u32 snd_wnd;
u32 rcv_wnd;
u32 snd_wl1;
u32 cwnd;
u32 ss_thresh;
u16 srtt;
u16 rtt_var;
u32 ts_time;
u32 ts_recent;
u32 ts_recent_age;
u32 total_rt;
u32 ka_timeout_delta;
u32 rt_timeout_delta;
u8 dup_ack_cnt;
u8 snd_wnd_probe_cnt;
u8 ka_probe_cnt;
u8 rt_cnt;
u32 flow_label;
u32 ka_timeout;
u32 ka_interval;
u32 max_rt_time;
u32 initial_rcv_wnd;
u8 ttl;
u8 tos_or_tc;
u16 remote_port;
u16 local_port;
u16 mss;
u8 snd_wnd_scale;
u8 rcv_wnd_scale;
u32 ts_ticks_per_second;
u16 da_timeout_value;
u8 ack_frequency;
};
struct qed_iscsi_params_update {
u8 update_flag;
#define QED_ISCSI_CONN_HD_EN BIT(0)
#define QED_ISCSI_CONN_DD_EN BIT(1)
#define QED_ISCSI_CONN_INITIAL_R2T BIT(2)
#define QED_ISCSI_CONN_IMMEDIATE_DATA BIT(3)
u32 max_seq_size;
u32 max_recv_pdu_length;
u32 max_send_pdu_length;
u32 first_seq_length;
u32 exp_stat_sn;
};
#define MAX_TID_BLOCKS_ISCSI (512)
struct qed_iscsi_tid {
u32 size; /* In bytes per task */
u32 num_tids_per_block;
u8 *blocks[MAX_TID_BLOCKS_ISCSI];
};
struct qed_iscsi_cb_ops {
struct qed_common_cb_ops common;
};
/**
* struct qed_iscsi_ops - qed iSCSI operations.
* @common: common operations pointer
* @ll2: light L2 operations pointer
* @fill_dev_info: fills iSCSI specific information
* @param cdev
* @param info
* @return 0 on sucesss, otherwise error value.
* @register_ops: register iscsi operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
* @param cookie - driver private
* @start: iscsi in FW
* @param cdev
* @param tasks - qed will fill information about tasks
* return 0 on success, otherwise error value.
* @stop: iscsi in FW
* @param cdev
* return 0 on success, otherwise error value.
* @acquire_conn: acquire a new iscsi connection
* @param cdev
* @param handle - qed will fill handle that should be
* used henceforth as identifier of the
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
* @return 0 on sucesss, otherwise error value.
* @release_conn: release a previously acquired iscsi connection
* @param cdev
* @param handle - the connection handle.
* @return 0 on success, otherwise error value.
* @offload_conn: configures an offloaded connection
* @param cdev
* @param handle - the connection handle.
* @param conn_info - the configuration to use for the
* offload.
* @return 0 on success, otherwise error value.
* @update_conn: updates an offloaded connection
* @param cdev
* @param handle - the connection handle.
* @param conn_info - the configuration to use for the
* offload.
* @return 0 on success, otherwise error value.
* @destroy_conn: stops an offloaded connection
* @param cdev
* @param handle - the connection handle.
* @return 0 on success, otherwise error value.
* @clear_sq: clear all task in sq
* @param cdev
* @param handle - the connection handle.
* @return 0 on success, otherwise error value.
* @get_stats: iSCSI related statistics
* @param cdev
* @param stats - pointer to struck that would be filled
* we stats
* @return 0 on success, error otherwise.
*/
struct qed_iscsi_ops {
const struct qed_common_ops *common;
const struct qed_ll2_ops *ll2;
int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_iscsi_info *info);
void (*register_ops)(struct qed_dev *cdev,
struct qed_iscsi_cb_ops *ops, void *cookie);
int (*start)(struct qed_dev *cdev,
struct qed_iscsi_tid *tasks,
void *event_context, iscsi_event_cb_t async_event_cb);
int (*stop)(struct qed_dev *cdev);
int (*acquire_conn)(struct qed_dev *cdev,
u32 *handle,
u32 *fw_cid, void __iomem **p_doorbell);
int (*release_conn)(struct qed_dev *cdev, u32 handle);
int (*offload_conn)(struct qed_dev *cdev,
u32 handle,
struct qed_iscsi_params_offload *conn_info);
int (*update_conn)(struct qed_dev *cdev,
u32 handle,
struct qed_iscsi_params_update *conn_info);
int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
int (*clear_sq)(struct qed_dev *cdev, u32 handle);
int (*get_stats)(struct qed_dev *cdev,
struct qed_iscsi_stats *stats);
};
const struct qed_iscsi_ops *qed_get_iscsi_ops(void);
void qed_put_iscsi_ops(void);
#endif