mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-14 14:56:43 +07:00
net/mlx5e: Allow ICO SQ to be used by multiple RQs
Prepare to creation of the XSK RQ, which will require posting UMRs, too.
The same ICO SQ will be used for both RQs and also to trigger interrupts
by posting NOPs. UMR WQEs can't be reused any more. Optimization
introduced in commit ab966d7e4f
("net/mlx5e: RX, Recycle buffer of
UMR WQEs") is reverted.
Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Acked-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
a069e977d6
commit
ed084fb604
@ -348,6 +348,13 @@ enum {
|
||||
|
||||
struct mlx5e_sq_wqe_info {
|
||||
u8 opcode;
|
||||
|
||||
/* Auxiliary data for different opcodes. */
|
||||
union {
|
||||
struct {
|
||||
struct mlx5e_rq *rq;
|
||||
} umr;
|
||||
};
|
||||
};
|
||||
|
||||
struct mlx5e_txqsq {
|
||||
@ -571,6 +578,7 @@ struct mlx5e_rq {
|
||||
u8 log_stride_sz;
|
||||
u8 umr_in_progress;
|
||||
u8 umr_last_bulk;
|
||||
u8 umr_completed;
|
||||
} mpwqe;
|
||||
};
|
||||
struct {
|
||||
@ -798,6 +806,7 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
|
||||
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
|
||||
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
|
||||
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
|
||||
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
|
||||
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
|
||||
|
@ -427,11 +427,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
|
||||
mlx5_wq_ll_update_db_record(wq);
|
||||
}
|
||||
|
||||
static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
|
||||
{
|
||||
return mlx5_wq_cyc_get_ctr_wrap_cnt(&sq->wq, sq->pc);
|
||||
}
|
||||
|
||||
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi, u16 nnops)
|
||||
@ -467,9 +462,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
||||
}
|
||||
|
||||
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
|
||||
if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2))
|
||||
memcpy(umr_wqe, &rq->mpwqe.umr_wqe,
|
||||
offsetof(struct mlx5e_umr_wqe, inline_mtts));
|
||||
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
|
||||
|
||||
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
|
||||
err = mlx5e_page_alloc_mapped(rq, dma_info);
|
||||
@ -487,6 +480,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
||||
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
|
||||
|
||||
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
|
||||
sq->db.ico_wqe[pi].umr.rq = rq;
|
||||
sq->pc += MLX5E_UMR_WQEBBS;
|
||||
|
||||
sq->doorbell_cseg = &umr_wqe->ctrl;
|
||||
@ -544,11 +538,10 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
|
||||
return !!err;
|
||||
}
|
||||
|
||||
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
||||
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
||||
{
|
||||
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
|
||||
struct mlx5_cqe64 *cqe;
|
||||
u8 completed_umr = 0;
|
||||
u16 sqcc;
|
||||
int i;
|
||||
|
||||
@ -589,7 +582,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
||||
|
||||
if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
|
||||
sqcc += MLX5E_UMR_WQEBBS;
|
||||
completed_umr++;
|
||||
wi->umr.rq->mpwqe.umr_completed++;
|
||||
} else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
|
||||
sqcc++;
|
||||
} else {
|
||||
@ -605,24 +598,24 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
||||
sq->cc = sqcc;
|
||||
|
||||
mlx5_cqwq_update_db_record(&cq->wq);
|
||||
|
||||
if (likely(completed_umr)) {
|
||||
mlx5e_post_rx_mpwqe(rq, completed_umr);
|
||||
rq->mpwqe.umr_in_progress -= completed_umr;
|
||||
}
|
||||
}
|
||||
|
||||
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5e_icosq *sq = &rq->channel->icosq;
|
||||
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
|
||||
u8 umr_completed = rq->mpwqe.umr_completed;
|
||||
u8 missing, i;
|
||||
u16 head;
|
||||
|
||||
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
|
||||
return false;
|
||||
|
||||
mlx5e_poll_ico_cq(&sq->cq, rq);
|
||||
if (umr_completed) {
|
||||
mlx5e_post_rx_mpwqe(rq, umr_completed);
|
||||
rq->mpwqe.umr_in_progress -= umr_completed;
|
||||
rq->mpwqe.umr_completed = 0;
|
||||
}
|
||||
|
||||
missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
|
||||
|
||||
|
@ -107,7 +107,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
|
||||
busy |= work_done == budget;
|
||||
}
|
||||
|
||||
busy |= c->rq.post_wqes(rq);
|
||||
mlx5e_poll_ico_cq(&c->icosq.cq);
|
||||
|
||||
busy |= rq->post_wqes(rq);
|
||||
|
||||
if (busy) {
|
||||
if (likely(mlx5e_channel_no_affinity_change(c)))
|
||||
|
@ -134,11 +134,6 @@ static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq)
|
||||
*wq->db = cpu_to_be32(wq->wqe_ctr);
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_cyc_get_ctr_wrap_cnt(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||
{
|
||||
return ctr >> wq->fbc.log_sz;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||
{
|
||||
return ctr & wq->fbc.sz_m1;
|
||||
|
Loading…
Reference in New Issue
Block a user