IB/mlx5: Introduce DEVX

Introduce DEVX to enable direct device commands in downstream patches
from this series.

In that mode of work the firmware manages the isolation between
processes' resources and as such a DEVX user id is created and assigned
to the given user context upon allocation request.

A capability check is done to make sure that this feature is really
supported by the firmware prior to creating the DEVX user id.

Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Yishai Hadas 2018-06-17 12:59:57 +03:00 committed by Jason Gunthorpe
parent 7dc08dcfc8
commit a8b92ca1b0
5 changed files with 96 additions and 3 deletions

View File

@ -3,3 +3,4 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o

View File

@ -0,0 +1,58 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
*/
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/ib_umem.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
{
u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
u64 general_obj_types;
void *uctx;
void *hdr;
int err;
uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types);
if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) ||
!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
return -EINVAL;
if (!capable(CAP_NET_RAW))
return -EPERM;
MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return 0;
}
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}

View File

@ -1650,8 +1650,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
if (req.flags) if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
return ERR_PTR(-EINVAL); return ERR_PTR(-EOPNOTSUPP);
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
@ -1735,6 +1735,18 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_uars; goto out_uars;
} }
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
/* Block DEVX on Infiniband as of SELinux */
if (mlx5_ib_port_link_layer(ibdev, 1) != IB_LINK_LAYER_ETHERNET) {
err = -EPERM;
goto out_td;
}
err = mlx5_ib_devx_create(dev, context);
if (err)
goto out_td;
}
INIT_LIST_HEAD(&context->vma_private_list); INIT_LIST_HEAD(&context->vma_private_list);
mutex_init(&context->vma_private_list_mutex); mutex_init(&context->vma_private_list_mutex);
INIT_LIST_HEAD(&context->db_page_list); INIT_LIST_HEAD(&context->db_page_list);
@ -1795,7 +1807,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
err = ib_copy_to_udata(udata, &resp, resp.response_length); err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err) if (err)
goto out_td; goto out_mdev;
bfregi->ver = ver; bfregi->ver = ver;
bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
@ -1805,6 +1817,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
return &context->ibucontext; return &context->ibucontext;
out_mdev:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
mlx5_ib_devx_destroy(dev, context);
out_td: out_td:
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mlx5_ib_dealloc_transport_domain(dev, context->tdn); mlx5_ib_dealloc_transport_domain(dev, context->tdn);
@ -1830,6 +1845,9 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_bfreg_info *bfregi; struct mlx5_bfreg_info *bfregi;
if (context->devx_uid)
mlx5_ib_devx_destroy(dev, context);
bfregi = &context->bfregi; bfregi = &context->bfregi;
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mlx5_ib_dealloc_transport_domain(dev, context->tdn); mlx5_ib_dealloc_transport_domain(dev, context->tdn);

View File

@ -143,6 +143,7 @@ struct mlx5_ib_ucontext {
u64 lib_caps; u64 lib_caps;
DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES); DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
u16 devx_uid;
}; };
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@ -1215,6 +1216,18 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num); u8 port_num);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context);
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context);
#else
static inline int
mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context) {}
#endif
static inline void init_query_mad(struct ib_smp *mad) static inline void init_query_mad(struct ib_smp *mad)
{ {
mad->base_version = 1; mad->base_version = 1;

View File

@ -76,6 +76,9 @@ enum mlx5_lib_caps {
MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0, MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
}; };
enum mlx5_ib_alloc_uctx_v2_flags {
MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0,
};
struct mlx5_ib_alloc_ucontext_req_v2 { struct mlx5_ib_alloc_ucontext_req_v2 {
__u32 total_num_bfregs; __u32 total_num_bfregs;
__u32 num_low_latency_bfregs; __u32 num_low_latency_bfregs;