net/mlx4_core: Add masking for a few queries on HCA caps

Driver reads the query HCA capabilities without the corresponding masks.
Without the correct masks, the base addresses of the queues are
unaligned.  In addition some reserved bits were wrongly read.  Using the
correct masks, ensures alignment of the base addresses and allows future
firmware versions safe use of the reserved bits.

Fixes: ab9c17a009 ("mlx4_core: Modify driver initialization flow to accommodate SRIOV for Ethernet")
Fixes: 0ff1fb654b ("{NET, IB}/mlx4: Add device managed flow steering firmware API")
Signed-off-by: Aya Levin <ayal@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Aya Levin 2019-01-22 15:19:44 +02:00 committed by David S. Miller
parent ecf938fe7d
commit a40ded6043

View File

@ -2067,9 +2067,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
u64 qword_field;
u32 dword_field;
int err;
u16 word_field;
u8 byte_field;
int err;
static const u8 a0_dmfs_query_hw_steering[] = {
[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@ -2097,19 +2099,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
param->qpc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
param->log_num_qps = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
param->srqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
param->log_num_srqs = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
param->cqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
param->log_num_cqs = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
param->altc_base = qword_field;
MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
param->auxc_base = qword_field;
MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
param->eqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
param->log_num_eqs = byte_field & 0x1f;
MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
param->num_sys_eqs = word_field & 0xfff;
MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
param->rdmarc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
param->log_rd_per_qp = byte_field & 0x7;
MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@ -2128,22 +2143,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* steering attributes */
if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
MLX4_GET(param->log_mc_table_sz, outbox,
INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
MLX4_GET(byte_field, outbox,
INIT_HCA_FS_A0_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
param->log_mc_entry_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
param->log_mc_table_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
param->dmfs_high_steer_mode =
a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
} else {
MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
MLX4_GET(param->log_mc_hash_sz, outbox,
INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
MLX4_GET(param->log_mc_table_sz, outbox,
INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
param->log_mc_entry_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
param->log_mc_hash_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
param->log_mc_table_sz = byte_field & 0x1f;
}
/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@ -2167,15 +2181,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
param->mw_enabled = byte_field >> 7;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
param->log_mpt_sz = byte_field & 0x3f;
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
/* UAR attributes */
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
param->log_uar_sz = byte_field & 0xf;
/* phv_check enable */
MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);