octeontx2-af: NPA AQ instruction enqueue support

Add support for a RVU PF/VF to submit instructions to NPA AQ
via mbox. Instructions can be to init/write/read Aura/Pool/Qint
contexts. In case of read, context will be returned as part of
response to the mbox msg received.

Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Sunil Goutham 2018-10-16 16:57:13 +05:30 committed by David S. Miller
parent 3fa4c3232a
commit 4a3581cd59
5 changed files with 427 additions and 0 deletions

View File

@ -118,4 +118,17 @@ enum npa_aura_sz {
#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6))
/* NPA AQ result structure for init/read/write of aura HW contexts */
struct npa_aq_aura_res {
struct npa_aq_res_s res;
struct npa_aura_s aura_ctx;
struct npa_aura_s ctx_mask;
};
/* NPA AQ result structure for init/read/write of pool HW contexts */
struct npa_aq_pool_res {
struct npa_aq_res_s res;
struct npa_pool_s pool_ctx;
struct npa_pool_s ctx_mask;
};
#endif /* COMMON_H */

View File

@ -141,6 +141,7 @@ M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
@ -290,4 +291,38 @@ struct npa_lf_alloc_rsp {
u16 qints; /* NPA_AF_CONST::QINTS */
};
/* NPA AQ enqueue msg */
struct npa_aq_enq_req {
struct mbox_msghdr hdr;
u32 aura_id;
u8 ctype;
u8 op;
union {
/* Valid when op == WRITE/INIT and ctype == AURA.
* LF fills the pool_id in aura.pool_addr. AF will translate
* the pool_id to pool context pointer.
*/
struct npa_aura_s aura;
/* Valid when op == WRITE/INIT and ctype == POOL */
struct npa_pool_s pool;
};
/* Mask data when op == WRITE (1=write, 0=don't write) */
union {
/* Valid when op == WRITE and ctype == AURA */
struct npa_aura_s aura_mask;
/* Valid when op == WRITE and ctype == POOL */
struct npa_pool_s pool_mask;
};
};
struct npa_aq_enq_rsp {
struct mbox_msghdr hdr;
union {
/* Valid when op == READ and ctype == AURA */
struct npa_aura_s aura;
/* Valid when op == READ and ctype == POOL */
struct npa_pool_s pool;
};
};
#endif /* MBOX_H */

View File

@ -213,6 +213,9 @@ int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp);
int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp);

View File

@ -15,6 +15,164 @@
#include "rvu_reg.h"
#include "rvu.h"
static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
struct npa_aq_inst_s *inst)
{
struct admin_queue *aq = block->aq;
struct npa_aq_res_s *result;
int timeout = 1000;
u64 reg, head;
result = (struct npa_aq_res_s *)aq->res->base;
/* Get current head pointer where to append this instruction */
reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
head = (reg >> 4) & AQ_PTR_MASK;
memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
(void *)inst, aq->inst->entry_sz);
memset(result, 0, sizeof(*result));
/* sync into memory */
wmb();
/* Ring the doorbell and wait for result */
rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
while (result->compcode == NPA_AQ_COMP_NOTDONE) {
cpu_relax();
udelay(1);
timeout--;
if (!timeout)
return -EBUSY;
}
if (result->compcode != NPA_AQ_COMP_GOOD)
/* TODO: Replace this with some error code */
return -EBUSY;
return 0;
}
static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, npalf, rc = 0;
struct npa_aq_inst_s inst;
struct rvu_block *block;
struct admin_queue *aq;
struct rvu_pfvf *pfvf;
void *ctx, *mask;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
return NPA_AF_ERR_AQ_ENQUEUE;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
if (!pfvf->npalf || blkaddr < 0)
return NPA_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
aq = block->aq;
if (!aq) {
dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
return NPA_AF_ERR_AQ_ENQUEUE;
}
npalf = rvu_get_lf(rvu, block, pcifunc, 0);
if (npalf < 0)
return NPA_AF_ERR_AF_LF_INVALID;
memset(&inst, 0, sizeof(struct npa_aq_inst_s));
inst.cindex = req->aura_id;
inst.lf = npalf;
inst.ctype = req->ctype;
inst.op = req->op;
/* Currently we are not supporting enqueuing multiple instructions,
* so always choose first entry in result memory.
*/
inst.res_addr = (u64)aq->res->iova;
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
ctx = aq->res->base + 128;
/* Mask needs to be written at RES_ADDR + 256 */
mask = aq->res->base + 256;
switch (req->op) {
case NPA_AQ_INSTOP_WRITE:
/* Copy context and write mask */
if (req->ctype == NPA_AQ_CTYPE_AURA) {
memcpy(mask, &req->aura_mask,
sizeof(struct npa_aura_s));
memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
} else {
memcpy(mask, &req->pool_mask,
sizeof(struct npa_pool_s));
memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
}
break;
case NPA_AQ_INSTOP_INIT:
if (req->ctype == NPA_AQ_CTYPE_AURA) {
if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
rc = NPA_AF_ERR_AQ_FULL;
break;
}
/* Set pool's context address */
req->aura.pool_addr = pfvf->pool_ctx->iova +
(req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
} else { /* POOL's context */
memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
}
break;
case NPA_AQ_INSTOP_NOP:
case NPA_AQ_INSTOP_READ:
case NPA_AQ_INSTOP_LOCK:
case NPA_AQ_INSTOP_UNLOCK:
break;
default:
rc = NPA_AF_ERR_AQ_FULL;
break;
}
if (rc)
return rc;
spin_lock(&aq->lock);
/* Submit the instruction to AQ */
rc = npa_aq_enqueue_wait(rvu, block, &inst);
if (rc) {
spin_unlock(&aq->lock);
return rc;
}
spin_unlock(&aq->lock);
if (rsp) {
/* Copy read context into mailbox */
if (req->op == NPA_AQ_INSTOP_READ) {
if (req->ctype == NPA_AQ_CTYPE_AURA)
memcpy(&rsp->aura, ctx,
sizeof(struct npa_aura_s));
else
memcpy(&rsp->pool, ctx,
sizeof(struct npa_pool_s));
}
}
return 0;
}
int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
{
qmem_free(rvu->dev, pfvf->aura_ctx);

View File

@ -136,4 +136,222 @@ struct npa_aq_res_s {
#endif
u64 reserved_64_127; /* W1 */
};
struct npa_aura_s {
u64 pool_addr; /* W0 */
#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
u64 avg_level : 8;
u64 reserved_118_119 : 2;
u64 shift : 6;
u64 aura_drop : 8;
u64 reserved_98_103 : 6;
u64 bp_ena : 2;
u64 aura_drop_ena : 1;
u64 pool_drop_ena : 1;
u64 reserved_93 : 1;
u64 avg_con : 9;
u64 pool_way_mask : 16;
u64 pool_caching : 1;
u64 reserved_65 : 2;
u64 ena : 1;
#else
u64 ena : 1;
u64 reserved_65 : 2;
u64 pool_caching : 1;
u64 pool_way_mask : 16;
u64 avg_con : 9;
u64 reserved_93 : 1;
u64 pool_drop_ena : 1;
u64 aura_drop_ena : 1;
u64 bp_ena : 2;
u64 reserved_98_103 : 6;
u64 aura_drop : 8;
u64 shift : 6;
u64 reserved_118_119 : 2;
u64 avg_level : 8;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
u64 reserved_189_191 : 3;
u64 nix1_bpid : 9;
u64 reserved_177_179 : 3;
u64 nix0_bpid : 9;
u64 reserved_164_167 : 4;
u64 count : 36;
#else
u64 count : 36;
u64 reserved_164_167 : 4;
u64 nix0_bpid : 9;
u64 reserved_177_179 : 3;
u64 nix1_bpid : 9;
u64 reserved_189_191 : 3;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
u64 reserved_252_255 : 4;
u64 fc_hyst_bits : 4;
u64 fc_stype : 2;
u64 fc_up_crossing : 1;
u64 fc_ena : 1;
u64 reserved_240_243 : 4;
u64 bp : 8;
u64 reserved_228_231 : 4;
u64 limit : 36;
#else
u64 limit : 36;
u64 reserved_228_231 : 4;
u64 bp : 8;
u64 reserved_240_243 : 4;
u64 fc_ena : 1;
u64 fc_up_crossing : 1;
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 reserved_252_255 : 4;
#endif
u64 fc_addr; /* W4 */
#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
u64 reserved_379_383 : 5;
u64 err_qint_idx : 7;
u64 reserved_371 : 1;
u64 thresh_qint_idx : 7;
u64 reserved_363 : 1;
u64 thresh_up : 1;
u64 thresh_int_ena : 1;
u64 thresh_int : 1;
u64 err_int_ena : 8;
u64 err_int : 8;
u64 update_time : 16;
u64 pool_drop : 8;
#else
u64 pool_drop : 8;
u64 update_time : 16;
u64 err_int : 8;
u64 err_int_ena : 8;
u64 thresh_int : 1;
u64 thresh_int_ena : 1;
u64 thresh_up : 1;
u64 reserved_363 : 1;
u64 thresh_qint_idx : 7;
u64 reserved_371 : 1;
u64 err_qint_idx : 7;
u64 reserved_379_383 : 5;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
u64 reserved_420_447 : 28;
u64 thresh : 36;
#else
u64 thresh : 36;
u64 reserved_420_447 : 28;
#endif
u64 reserved_448_511; /* W7 */
};
struct npa_pool_s {
u64 stack_base; /* W0 */
#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
u64 reserved_115_127 : 13;
u64 buf_size : 11;
u64 reserved_100_103 : 4;
u64 buf_offset : 12;
u64 stack_way_mask : 16;
u64 reserved_70_71 : 3;
u64 stack_caching : 1;
u64 reserved_66_67 : 2;
u64 nat_align : 1;
u64 ena : 1;
#else
u64 ena : 1;
u64 nat_align : 1;
u64 reserved_66_67 : 2;
u64 stack_caching : 1;
u64 reserved_70_71 : 3;
u64 stack_way_mask : 16;
u64 buf_offset : 12;
u64 reserved_100_103 : 4;
u64 buf_size : 11;
u64 reserved_115_127 : 13;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
u64 stack_pages : 32;
u64 stack_max_pages : 32;
#else
u64 stack_max_pages : 32;
u64 stack_pages : 32;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
u64 reserved_240_255 : 16;
u64 op_pc : 48;
#else
u64 op_pc : 48;
u64 reserved_240_255 : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
u64 reserved_316_319 : 4;
u64 update_time : 16;
u64 reserved_297_299 : 3;
u64 fc_up_crossing : 1;
u64 fc_hyst_bits : 4;
u64 fc_stype : 2;
u64 fc_ena : 1;
u64 avg_con : 9;
u64 avg_level : 8;
u64 reserved_270_271 : 2;
u64 shift : 6;
u64 reserved_260_263 : 4;
u64 stack_offset : 4;
#else
u64 stack_offset : 4;
u64 reserved_260_263 : 4;
u64 shift : 6;
u64 reserved_270_271 : 2;
u64 avg_level : 8;
u64 avg_con : 9;
u64 fc_ena : 1;
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 fc_up_crossing : 1;
u64 reserved_297_299 : 3;
u64 update_time : 16;
u64 reserved_316_319 : 4;
#endif
u64 fc_addr; /* W5 */
u64 ptr_start; /* W6 */
u64 ptr_end; /* W7 */
#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
u64 reserved_571_575 : 5;
u64 err_qint_idx : 7;
u64 reserved_563 : 1;
u64 thresh_qint_idx : 7;
u64 reserved_555 : 1;
u64 thresh_up : 1;
u64 thresh_int_ena : 1;
u64 thresh_int : 1;
u64 err_int_ena : 8;
u64 err_int : 8;
u64 reserved_512_535 : 24;
#else
u64 reserved_512_535 : 24;
u64 err_int : 8;
u64 err_int_ena : 8;
u64 thresh_int : 1;
u64 thresh_int_ena : 1;
u64 thresh_up : 1;
u64 reserved_555 : 1;
u64 thresh_qint_idx : 7;
u64 reserved_563 : 1;
u64 err_qint_idx : 7;
u64 reserved_571_575 : 5;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
u64 reserved_612_639 : 28;
u64 thresh : 36;
#else
u64 thresh : 36;
u64 reserved_612_639 : 28;
#endif
u64 reserved_640_703; /* W10 */
u64 reserved_704_767; /* W11 */
u64 reserved_768_831; /* W12 */
u64 reserved_832_895; /* W13 */
u64 reserved_896_959; /* W14 */
u64 reserved_960_1023; /* W15 */
};
#endif /* RVU_STRUCT_H */