RDMA/mlx5: Add support to get QP resource in RAW format

Add a generic function to use the resource dump mechanism to get the
QP resource data.

Link: https://lore.kernel.org/r/20200623113043.1228482-10-leon@kernel.org
Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Maor Gottlieb 2020-06-23 14:30:41 +03:00 committed by Jason Gunthorpe
parent 65959522f8
commit 1776dd234a
3 changed files with 79 additions and 0 deletions

View File

@ -6599,6 +6599,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.drain_sq = mlx5_ib_drain_sq,
.enable_driver = mlx5_ib_enable_driver,
.fill_res_mr_entry = mlx5_ib_fill_res_mr_entry,
.fill_res_qp_entry_raw = mlx5_ib_fill_res_qp_entry_raw,
.fill_stat_mr_entry = mlx5_ib_fill_stat_mr_entry,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = mlx5_ib_get_dma_mr,

View File

@ -1376,6 +1376,7 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
int mlx5_ib_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
int mlx5_ib_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp);
int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
extern const struct uapi_definition mlx5_ib_devx_defs[];

View File

@ -4,10 +4,79 @@
*/
#include <uapi/rdma/rdma_netlink.h>
#include <linux/mlx5/rsc_dump.h>
#include <rdma/ib_umem_odp.h>
#include <rdma/restrack.h>
#include "mlx5_ib.h"
#define MAX_DUMP_SIZE 1024
static int dump_rsc(struct mlx5_core_dev *dev, enum mlx5_sgmt_type type,
int index, void *data, int *data_len)
{
struct mlx5_core_dev *mdev = dev;
struct mlx5_rsc_dump_cmd *cmd;
struct mlx5_rsc_key key = {};
struct page *page;
int offset = 0;
int err = 0;
int cmd_err;
int size;
page = alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
key.size = PAGE_SIZE;
key.rsc = type;
key.index1 = index;
key.num_of_obj1 = 1;
cmd = mlx5_rsc_dump_cmd_create(mdev, &key);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto free_page;
}
do {
cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
if (cmd_err < 0 || size + offset > MAX_DUMP_SIZE) {
err = cmd_err;
goto destroy_cmd;
}
memcpy(data + offset, page_address(page), size);
offset += size;
} while (cmd_err > 0);
*data_len = offset;
destroy_cmd:
mlx5_rsc_dump_cmd_destroy(cmd);
free_page:
__free_page(page);
return err;
}
static int fill_res_raw(struct sk_buff *msg, struct mlx5_ib_dev *dev,
enum mlx5_sgmt_type type, u32 key)
{
int len = 0;
void *data;
int err;
data = kzalloc(MAX_DUMP_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
err = dump_rsc(dev->mdev, type, key, data, &len);
if (err)
goto out;
err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
out:
kfree(data);
return err;
}
int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg,
struct ib_mr *ibmr)
{
@ -68,3 +137,11 @@ int mlx5_ib_fill_res_mr_entry(struct sk_buff *msg,
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}
int mlx5_ib_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_QP,
ibqp->qp_num);
}