2013-07-07 21:25:49 +07:00
|
|
|
/*
|
2015-04-02 21:07:30 +07:00
|
|
|
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
|
2013-07-07 21:25:49 +07:00
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/kref.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/export.h>
|
2013-10-23 13:53:14 +07:00
|
|
|
#include <linux/delay.h>
|
2013-07-07 21:25:49 +07:00
|
|
|
#include <rdma/ib_umem.h>
|
2014-12-11 22:04:26 +07:00
|
|
|
#include <rdma/ib_umem_odp.h>
|
2014-12-11 22:04:11 +07:00
|
|
|
#include <rdma/ib_verbs.h>
|
2013-07-07 21:25:49 +07:00
|
|
|
#include "mlx5_ib.h"
|
|
|
|
|
|
|
|
enum {
|
2013-10-23 13:53:14 +07:00
|
|
|
MAX_PENDING_REG_MR = 8,
|
2013-07-07 21:25:49 +07:00
|
|
|
};
|
|
|
|
|
2014-12-11 22:04:22 +07:00
|
|
|
#define MLX5_UMR_ALIGN 2048
|
2013-09-11 20:35:35 +07:00
|
|
|
|
2018-03-13 20:29:28 +07:00
|
|
|
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
|
|
|
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
2017-08-17 19:52:29 +07:00
|
|
|
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
|
2017-01-18 21:58:10 +07:00
|
|
|
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
2018-03-22 20:34:04 +07:00
|
|
|
static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
|
|
|
|
{
|
|
|
|
return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
|
|
|
|
{
|
|
|
|
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool use_umr(struct mlx5_ib_dev *dev, int order)
|
|
|
|
{
|
|
|
|
return order <= mr_cache_max_order(dev) &&
|
|
|
|
umr_can_modify_entity_size(dev);
|
|
|
|
}
|
2014-12-11 22:04:23 +07:00
|
|
|
|
2014-12-11 22:04:26 +07:00
|
|
|
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|
|
|
{
|
2016-02-29 23:05:28 +07:00
|
|
|
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
2014-12-11 22:04:26 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
|
|
/* Wait until all page fault handlers using the mr complete. */
|
|
|
|
synchronize_srcu(&dev->mr_srcu);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
static int order2idx(struct mlx5_ib_dev *dev, int order)
|
|
|
|
{
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
|
|
|
|
if (order < cache->ent[0].order)
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return order - cache->ent[0].order;
|
|
|
|
}
|
|
|
|
|
2016-02-29 21:46:51 +07:00
|
|
|
static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
|
|
|
|
{
|
|
|
|
return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
|
|
|
|
length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
|
|
|
|
}
|
|
|
|
|
2016-02-29 21:46:50 +07:00
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
|
|
static void update_odp_mr(struct mlx5_ib_mr *mr)
|
|
|
|
{
|
2018-09-17 00:48:06 +07:00
|
|
|
if (mr->umem->is_odp) {
|
2016-02-29 21:46:50 +07:00
|
|
|
/*
|
|
|
|
* This barrier prevents the compiler from moving the
|
|
|
|
* setting of umem->odp_data->private to point to our
|
|
|
|
* MR, before reg_umr finished, to ensure that the MR
|
|
|
|
* initialization have finished before starting to
|
|
|
|
* handle invalidations.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
2018-09-17 00:48:06 +07:00
|
|
|
to_ib_umem_odp(mr->umem)->private = mr;
|
2016-02-29 21:46:50 +07:00
|
|
|
/*
|
|
|
|
* Make sure we will see the new
|
|
|
|
* umem->odp_data->private value in the invalidation
|
|
|
|
* routines, before we can get page faults on the
|
|
|
|
* MR. Page faults can happen once we put the MR in
|
|
|
|
* the tree, below this line. Without the barrier,
|
|
|
|
* there can be a fault handling and an invalidation
|
|
|
|
* before umem->odp_data->private == mr is visible to
|
|
|
|
* the invalidation handler.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-10-23 13:53:14 +07:00
|
|
|
static void reg_mr_callback(int status, void *context)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_mr *mr = context;
|
|
|
|
struct mlx5_ib_dev *dev = mr->dev;
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
int c = order2idx(dev, mr->order);
|
|
|
|
struct mlx5_cache_ent *ent = &cache->ent[c];
|
|
|
|
u8 key;
|
|
|
|
unsigned long flags;
|
2016-02-29 23:05:28 +07:00
|
|
|
struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
|
2014-05-22 18:50:09 +07:00
|
|
|
int err;
|
2013-10-23 13:53:14 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(&ent->lock, flags);
|
|
|
|
ent->pending--;
|
|
|
|
spin_unlock_irqrestore(&ent->lock, flags);
|
|
|
|
if (status) {
|
|
|
|
mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
|
|
|
|
kfree(mr);
|
|
|
|
dev->fill_delay = 1;
|
|
|
|
mod_timer(&dev->delay_timer, jiffies + HZ);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-01-02 16:37:48 +07:00
|
|
|
mr->mmkey.type = MLX5_MKEY_MR;
|
2014-07-29 03:30:22 +07:00
|
|
|
spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
|
|
|
|
key = dev->mdev->priv.mkey_key++;
|
|
|
|
spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
|
2016-07-16 10:28:36 +07:00
|
|
|
mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
|
2013-10-23 13:53:14 +07:00
|
|
|
|
|
|
|
cache->last_add = jiffies;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ent->lock, flags);
|
|
|
|
list_add_tail(&mr->list, &ent->head);
|
|
|
|
ent->cur++;
|
|
|
|
ent->size++;
|
|
|
|
spin_unlock_irqrestore(&ent->lock, flags);
|
2014-05-22 18:50:09 +07:00
|
|
|
|
|
|
|
write_lock_irqsave(&table->lock, flags);
|
2016-02-29 23:05:28 +07:00
|
|
|
err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
|
|
|
|
&mr->mmkey);
|
2014-05-22 18:50:09 +07:00
|
|
|
if (err)
|
2016-02-29 23:05:28 +07:00
|
|
|
pr_err("Error inserting to mkey tree. 0x%x\n", -err);
|
2014-05-22 18:50:09 +07:00
|
|
|
write_unlock_irqrestore(&table->lock, flags);
|
2017-01-18 21:58:10 +07:00
|
|
|
|
|
|
|
if (!completion_done(&ent->compl))
|
|
|
|
complete(&ent->compl);
|
2013-10-23 13:53:14 +07:00
|
|
|
}
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
|
|
|
|
{
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
struct mlx5_cache_ent *ent = &cache->ent[c];
|
2016-07-16 10:28:36 +07:00
|
|
|
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
2013-07-07 21:25:49 +07:00
|
|
|
struct mlx5_ib_mr *mr;
|
2016-07-16 10:28:36 +07:00
|
|
|
void *mkc;
|
|
|
|
u32 *in;
|
2013-07-07 21:25:49 +07:00
|
|
|
int err = 0;
|
|
|
|
int i;
|
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
in = kzalloc(inlen, GFP_KERNEL);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (!in)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
2013-07-07 21:25:49 +07:00
|
|
|
for (i = 0; i < num; i++) {
|
2013-10-23 13:53:14 +07:00
|
|
|
if (ent->pending >= MAX_PENDING_REG_MR) {
|
|
|
|
err = -EAGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
|
|
|
|
if (!mr) {
|
|
|
|
err = -ENOMEM;
|
2013-10-23 13:53:14 +07:00
|
|
|
break;
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
mr->order = ent->order;
|
2017-08-17 19:52:29 +07:00
|
|
|
mr->allocated_from_cache = 1;
|
2013-10-23 13:53:14 +07:00
|
|
|
mr->dev = dev;
|
2016-07-16 10:28:36 +07:00
|
|
|
|
|
|
|
MLX5_SET(mkc, mkc, free, 1);
|
|
|
|
MLX5_SET(mkc, mkc, umr_en, 1);
|
2018-04-05 22:53:28 +07:00
|
|
|
MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
|
|
|
|
MLX5_SET(mkc, mkc, access_mode_4_2,
|
|
|
|
(ent->access_mode >> 2) & 0x7);
|
2016-07-16 10:28:36 +07:00
|
|
|
|
|
|
|
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
2017-01-18 21:58:10 +07:00
|
|
|
MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
|
|
|
|
MLX5_SET(mkc, mkc, log_page_size, ent->page);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_lock_irq(&ent->lock);
|
|
|
|
ent->pending++;
|
|
|
|
spin_unlock_irq(&ent->lock);
|
2016-07-16 10:28:36 +07:00
|
|
|
err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
|
|
|
|
in, inlen,
|
|
|
|
mr->out, sizeof(mr->out),
|
|
|
|
reg_mr_callback, mr);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (err) {
|
2014-12-02 17:26:19 +07:00
|
|
|
spin_lock_irq(&ent->lock);
|
|
|
|
ent->pending--;
|
|
|
|
spin_unlock_irq(&ent->lock);
|
2013-07-07 21:25:49 +07:00
|
|
|
mlx5_ib_warn(dev, "create mkey failed %d\n", err);
|
|
|
|
kfree(mr);
|
2013-10-23 13:53:14 +07:00
|
|
|
break;
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(in);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
|
|
|
|
{
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
struct mlx5_cache_ent *ent = &cache->ent[c];
|
2018-03-13 20:18:47 +07:00
|
|
|
struct mlx5_ib_mr *tmp_mr;
|
2013-07-07 21:25:49 +07:00
|
|
|
struct mlx5_ib_mr *mr;
|
2018-03-13 20:18:47 +07:00
|
|
|
LIST_HEAD(del_list);
|
2013-07-07 21:25:49 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_lock_irq(&ent->lock);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (list_empty(&ent->head)) {
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_unlock_irq(&ent->lock);
|
2018-03-13 20:18:47 +07:00
|
|
|
break;
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
|
2018-03-13 20:18:47 +07:00
|
|
|
list_move(&mr->list, &del_list);
|
2013-07-07 21:25:49 +07:00
|
|
|
ent->cur--;
|
|
|
|
ent->size--;
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_unlock_irq(&ent->lock);
|
2018-03-13 20:18:47 +07:00
|
|
|
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
|
|
synchronize_srcu(&dev->mr_srcu);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
|
|
|
|
list_del(&mr->list);
|
|
|
|
kfree(mr);
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t size_write(struct file *filp, const char __user *buf,
|
|
|
|
size_t count, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct mlx5_cache_ent *ent = filp->private_data;
|
|
|
|
struct mlx5_ib_dev *dev = ent->dev;
|
2018-07-07 03:48:03 +07:00
|
|
|
char lbuf[20] = {0};
|
2013-07-07 21:25:49 +07:00
|
|
|
u32 var;
|
|
|
|
int err;
|
|
|
|
int c;
|
|
|
|
|
2018-07-07 03:48:03 +07:00
|
|
|
count = min(count, sizeof(lbuf) - 1);
|
|
|
|
if (copy_from_user(lbuf, buf, count))
|
2013-07-10 17:58:59 +07:00
|
|
|
return -EFAULT;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
c = order2idx(dev, ent->order);
|
|
|
|
|
|
|
|
if (sscanf(lbuf, "%u", &var) != 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (var < ent->limit)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (var > ent->size) {
|
2013-10-23 13:53:14 +07:00
|
|
|
do {
|
|
|
|
err = add_keys(dev, c, var - ent->size);
|
|
|
|
if (err && err != -EAGAIN)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
usleep_range(3000, 5000);
|
|
|
|
} while (err);
|
2013-07-07 21:25:49 +07:00
|
|
|
} else if (var < ent->size) {
|
|
|
|
remove_keys(dev, c, ent->size - var);
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
|
|
|
|
loff_t *pos)
|
|
|
|
{
|
|
|
|
struct mlx5_cache_ent *ent = filp->private_data;
|
|
|
|
char lbuf[20];
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2018-07-07 03:48:03 +07:00
|
|
|
return simple_read_from_buffer(buf, count, pos, lbuf, err);
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations size_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = simple_open,
|
|
|
|
.write = size_write,
|
|
|
|
.read = size_read,
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t limit_write(struct file *filp, const char __user *buf,
|
|
|
|
size_t count, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct mlx5_cache_ent *ent = filp->private_data;
|
|
|
|
struct mlx5_ib_dev *dev = ent->dev;
|
2018-07-07 03:48:03 +07:00
|
|
|
char lbuf[20] = {0};
|
2013-07-07 21:25:49 +07:00
|
|
|
u32 var;
|
|
|
|
int err;
|
|
|
|
int c;
|
|
|
|
|
2018-07-07 03:48:03 +07:00
|
|
|
count = min(count, sizeof(lbuf) - 1);
|
|
|
|
if (copy_from_user(lbuf, buf, count))
|
2013-07-10 17:58:59 +07:00
|
|
|
return -EFAULT;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
c = order2idx(dev, ent->order);
|
|
|
|
|
|
|
|
if (sscanf(lbuf, "%u", &var) != 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (var > ent->size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ent->limit = var;
|
|
|
|
|
|
|
|
if (ent->cur < ent->limit) {
|
|
|
|
err = add_keys(dev, c, 2 * ent->limit - ent->cur);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
|
|
|
|
loff_t *pos)
|
|
|
|
{
|
|
|
|
struct mlx5_cache_ent *ent = filp->private_data;
|
|
|
|
char lbuf[20];
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2018-07-07 03:48:03 +07:00
|
|
|
return simple_read_from_buffer(buf, count, pos, lbuf, err);
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations limit_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = simple_open,
|
|
|
|
.write = limit_write,
|
|
|
|
.read = limit_read,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int someone_adding(struct mlx5_mr_cache *cache)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
|
|
|
|
if (cache->ent[i].cur < cache->ent[i].limit)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __cache_work_func(struct mlx5_cache_ent *ent)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = ent->dev;
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
int i = order2idx(dev, ent->order);
|
2013-10-23 13:53:14 +07:00
|
|
|
int err;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
if (cache->stopped)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ent = &dev->cache.ent[i];
|
2013-10-23 13:53:14 +07:00
|
|
|
if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
|
|
|
|
err = add_keys(dev, i, 1);
|
|
|
|
if (ent->cur < 2 * ent->limit) {
|
|
|
|
if (err == -EAGAIN) {
|
|
|
|
mlx5_ib_dbg(dev, "returned eagain, order %d\n",
|
|
|
|
i + 2);
|
|
|
|
queue_delayed_work(cache->wq, &ent->dwork,
|
|
|
|
msecs_to_jiffies(3));
|
|
|
|
} else if (err) {
|
|
|
|
mlx5_ib_warn(dev, "command failed order %d, err %d\n",
|
|
|
|
i + 2, err);
|
|
|
|
queue_delayed_work(cache->wq, &ent->dwork,
|
|
|
|
msecs_to_jiffies(1000));
|
|
|
|
} else {
|
|
|
|
queue_work(cache->wq, &ent->work);
|
|
|
|
}
|
|
|
|
}
|
2013-07-07 21:25:49 +07:00
|
|
|
} else if (ent->cur > 2 * ent->limit) {
|
2015-10-21 13:21:17 +07:00
|
|
|
/*
|
|
|
|
* The remove_keys() logic is performed as garbage collection
|
|
|
|
* task. Such task is intended to be run when no other active
|
|
|
|
* processes are running.
|
|
|
|
*
|
|
|
|
* The need_resched() will return TRUE if there are user tasks
|
|
|
|
* to be activated in near future.
|
|
|
|
*
|
|
|
|
* In such case, we don't execute remove_keys() and postpone
|
|
|
|
* the garbage collection work to try to run in next cycle,
|
|
|
|
* in order to free CPU resources to other tasks.
|
|
|
|
*/
|
|
|
|
if (!need_resched() && !someone_adding(cache) &&
|
2013-10-23 13:53:14 +07:00
|
|
|
time_after(jiffies, cache->last_add + 300 * HZ)) {
|
2013-07-07 21:25:49 +07:00
|
|
|
remove_keys(dev, i, 1);
|
|
|
|
if (ent->cur > ent->limit)
|
|
|
|
queue_work(cache->wq, &ent->work);
|
|
|
|
} else {
|
2013-10-23 13:53:14 +07:00
|
|
|
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void delayed_cache_work_func(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct mlx5_cache_ent *ent;
|
|
|
|
|
|
|
|
ent = container_of(work, struct mlx5_cache_ent, dwork.work);
|
|
|
|
__cache_work_func(ent);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cache_work_func(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct mlx5_cache_ent *ent;
|
|
|
|
|
|
|
|
ent = container_of(work, struct mlx5_cache_ent, work);
|
|
|
|
__cache_work_func(ent);
|
|
|
|
}
|
|
|
|
|
2017-01-18 21:58:10 +07:00
|
|
|
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
|
|
|
|
{
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
struct mlx5_cache_ent *ent;
|
|
|
|
struct mlx5_ib_mr *mr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
|
|
|
|
mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ent = &cache->ent[entry];
|
|
|
|
while (1) {
|
|
|
|
spin_lock_irq(&ent->lock);
|
|
|
|
if (list_empty(&ent->head)) {
|
|
|
|
spin_unlock_irq(&ent->lock);
|
|
|
|
|
|
|
|
err = add_keys(dev, entry, 1);
|
2017-01-18 21:58:11 +07:00
|
|
|
if (err && err != -EAGAIN)
|
2017-01-18 21:58:10 +07:00
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
wait_for_completion(&ent->compl);
|
|
|
|
} else {
|
|
|
|
mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
|
|
|
|
list);
|
|
|
|
list_del(&mr->list);
|
|
|
|
ent->cur--;
|
|
|
|
spin_unlock_irq(&ent->lock);
|
|
|
|
if (ent->cur < ent->limit)
|
|
|
|
queue_work(cache->wq, &ent->work);
|
|
|
|
return mr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
|
|
|
|
{
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
struct mlx5_ib_mr *mr = NULL;
|
|
|
|
struct mlx5_cache_ent *ent;
|
2017-06-12 14:36:15 +07:00
|
|
|
int last_umr_cache_entry;
|
2013-07-07 21:25:49 +07:00
|
|
|
int c;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
c = order2idx(dev, order);
|
2017-08-17 19:52:29 +07:00
|
|
|
last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
|
2017-06-12 14:36:15 +07:00
|
|
|
if (c < 0 || c > last_umr_cache_entry) {
|
2013-07-07 21:25:49 +07:00
|
|
|
mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-06-12 14:36:15 +07:00
|
|
|
for (i = c; i <= last_umr_cache_entry; i++) {
|
2013-07-07 21:25:49 +07:00
|
|
|
ent = &cache->ent[i];
|
|
|
|
|
|
|
|
mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
|
|
|
|
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_lock_irq(&ent->lock);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (!list_empty(&ent->head)) {
|
|
|
|
mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
|
|
|
|
list);
|
|
|
|
list_del(&mr->list);
|
|
|
|
ent->cur--;
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_unlock_irq(&ent->lock);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (ent->cur < ent->limit)
|
|
|
|
queue_work(cache->wq, &ent->work);
|
|
|
|
break;
|
|
|
|
}
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_unlock_irq(&ent->lock);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
queue_work(cache->wq, &ent->work);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mr)
|
|
|
|
cache->ent[c].miss++;
|
|
|
|
|
|
|
|
return mr;
|
|
|
|
}
|
|
|
|
|
2017-01-18 21:58:10 +07:00
|
|
|
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
2013-07-07 21:25:49 +07:00
|
|
|
{
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
struct mlx5_cache_ent *ent;
|
|
|
|
int shrink = 0;
|
|
|
|
int c;
|
|
|
|
|
|
|
|
c = order2idx(dev, mr->order);
|
|
|
|
if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
|
|
|
|
mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
|
|
|
|
return;
|
|
|
|
}
|
2017-01-18 21:58:10 +07:00
|
|
|
|
|
|
|
if (unreg_umr(dev, mr))
|
|
|
|
return;
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
ent = &cache->ent[c];
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_lock_irq(&ent->lock);
|
2013-07-07 21:25:49 +07:00
|
|
|
list_add_tail(&mr->list, &ent->head);
|
|
|
|
ent->cur++;
|
|
|
|
if (ent->cur > 2 * ent->limit)
|
|
|
|
shrink = 1;
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_unlock_irq(&ent->lock);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
if (shrink)
|
|
|
|
queue_work(cache->wq, &ent->work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clean_keys(struct mlx5_ib_dev *dev, int c)
|
|
|
|
{
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
struct mlx5_cache_ent *ent = &cache->ent[c];
|
2018-03-13 20:18:47 +07:00
|
|
|
struct mlx5_ib_mr *tmp_mr;
|
2013-07-07 21:25:49 +07:00
|
|
|
struct mlx5_ib_mr *mr;
|
2018-03-13 20:18:47 +07:00
|
|
|
LIST_HEAD(del_list);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2013-09-11 20:35:23 +07:00
|
|
|
cancel_delayed_work(&ent->dwork);
|
2013-07-07 21:25:49 +07:00
|
|
|
while (1) {
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_lock_irq(&ent->lock);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (list_empty(&ent->head)) {
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_unlock_irq(&ent->lock);
|
2018-03-13 20:18:47 +07:00
|
|
|
break;
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
|
2018-03-13 20:18:47 +07:00
|
|
|
list_move(&mr->list, &del_list);
|
2013-07-07 21:25:49 +07:00
|
|
|
ent->cur--;
|
|
|
|
ent->size--;
|
2013-10-23 13:53:14 +07:00
|
|
|
spin_unlock_irq(&ent->lock);
|
2018-03-13 20:18:47 +07:00
|
|
|
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
|
|
synchronize_srcu(&dev->mr_srcu);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
|
|
|
|
list_del(&mr->list);
|
|
|
|
kfree(mr);
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-30 13:44:48 +07:00
|
|
|
static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
|
|
|
|
{
|
2018-01-22 22:29:44 +07:00
|
|
|
if (!mlx5_debugfs_root || dev->rep)
|
2017-05-30 13:44:48 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
debugfs_remove_recursive(dev->cache.root);
|
|
|
|
dev->cache.root = NULL;
|
|
|
|
}
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
struct mlx5_cache_ent *ent;
|
|
|
|
int i;
|
|
|
|
|
2018-01-22 22:29:44 +07:00
|
|
|
if (!mlx5_debugfs_root || dev->rep)
|
2013-07-07 21:25:49 +07:00
|
|
|
return 0;
|
|
|
|
|
2014-07-29 03:30:22 +07:00
|
|
|
cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (!cache->root)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
|
|
|
|
ent = &cache->ent[i];
|
|
|
|
sprintf(ent->name, "%d", ent->order);
|
|
|
|
ent->dir = debugfs_create_dir(ent->name, cache->root);
|
|
|
|
if (!ent->dir)
|
2017-05-30 13:44:48 +07:00
|
|
|
goto err;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
|
|
|
|
&size_fops);
|
|
|
|
if (!ent->fsize)
|
2017-05-30 13:44:48 +07:00
|
|
|
goto err;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
|
|
|
|
&limit_fops);
|
|
|
|
if (!ent->flimit)
|
2017-05-30 13:44:48 +07:00
|
|
|
goto err;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
|
|
|
|
&ent->cur);
|
|
|
|
if (!ent->fcur)
|
2017-05-30 13:44:48 +07:00
|
|
|
goto err;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
|
|
|
|
&ent->miss);
|
|
|
|
if (!ent->fmiss)
|
2017-05-30 13:44:48 +07:00
|
|
|
goto err;
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2017-05-30 13:44:48 +07:00
|
|
|
err:
|
|
|
|
mlx5_mr_cache_debugfs_cleanup(dev);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2017-05-30 13:44:48 +07:00
|
|
|
return -ENOMEM;
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
|
|
|
static void delay_time_func(struct timer_list *t)
|
2013-10-23 13:53:14 +07:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
|
|
|
struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
|
2013-10-23 13:53:14 +07:00
|
|
|
|
|
|
|
dev->fill_delay = 0;
|
|
|
|
}
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
struct mlx5_cache_ent *ent;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
2016-10-27 20:36:42 +07:00
|
|
|
mutex_init(&dev->slow_path_mutex);
|
2016-08-16 01:11:18 +07:00
|
|
|
cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (!cache->wq) {
|
|
|
|
mlx5_ib_warn(dev, "failed to create work queue\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
|
|
|
timer_setup(&dev->delay_timer, delay_time_func, 0);
|
2013-07-07 21:25:49 +07:00
|
|
|
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
|
|
|
|
ent = &cache->ent[i];
|
|
|
|
INIT_LIST_HEAD(&ent->head);
|
|
|
|
spin_lock_init(&ent->lock);
|
|
|
|
ent->order = i + 2;
|
|
|
|
ent->dev = dev;
|
2017-01-18 21:58:10 +07:00
|
|
|
ent->limit = 0;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2017-01-18 21:58:10 +07:00
|
|
|
init_completion(&ent->compl);
|
2013-07-07 21:25:49 +07:00
|
|
|
INIT_WORK(&ent->work, cache_work_func);
|
|
|
|
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
|
|
|
|
queue_work(cache->wq, &ent->work);
|
2017-01-18 21:58:10 +07:00
|
|
|
|
2017-08-17 19:52:29 +07:00
|
|
|
if (i > MR_CACHE_LAST_STD_ENTRY) {
|
2017-01-18 21:58:11 +07:00
|
|
|
mlx5_odp_init_mr_cache_entry(ent);
|
2017-01-18 21:58:10 +07:00
|
|
|
continue;
|
2017-01-18 21:58:11 +07:00
|
|
|
}
|
2017-01-18 21:58:10 +07:00
|
|
|
|
2017-08-17 19:52:29 +07:00
|
|
|
if (ent->order > mr_cache_max_order(dev))
|
2017-01-18 21:58:10 +07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
ent->page = PAGE_SHIFT;
|
|
|
|
ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
|
|
|
|
MLX5_IB_UMR_OCTOWORD;
|
|
|
|
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
|
|
|
|
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
|
2018-01-22 22:29:44 +07:00
|
|
|
!dev->rep &&
|
2017-01-18 21:58:10 +07:00
|
|
|
mlx5_core_is_pf(dev->mdev))
|
|
|
|
ent->limit = dev->mdev->profile->mr_cache[i].limit;
|
|
|
|
else
|
|
|
|
ent->limit = 0;
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
err = mlx5_mr_cache_debugfs_init(dev);
|
|
|
|
if (err)
|
|
|
|
mlx5_ib_warn(dev, "cache debugfs failure\n");
|
|
|
|
|
2017-05-30 13:44:48 +07:00
|
|
|
/*
|
|
|
|
* We don't want to fail driver if debugfs failed to initialize,
|
|
|
|
* so we are not forwarding error to the user.
|
|
|
|
*/
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-27 20:36:43 +07:00
|
|
|
static void wait_for_async_commands(struct mlx5_ib_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_mr_cache *cache = &dev->cache;
|
|
|
|
struct mlx5_cache_ent *ent;
|
|
|
|
int total = 0;
|
|
|
|
int i;
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
|
|
|
|
ent = &cache->ent[i];
|
|
|
|
for (j = 0 ; j < 1000; j++) {
|
|
|
|
if (!ent->pending)
|
|
|
|
break;
|
|
|
|
msleep(50);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
|
|
|
|
ent = &cache->ent[i];
|
|
|
|
total += ent->pending;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
|
|
|
|
else
|
|
|
|
mlx5_ib_warn(dev, "done with all pending requests\n");
|
|
|
|
}
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2018-03-20 20:45:37 +07:00
|
|
|
if (!dev->cache.wq)
|
|
|
|
return 0;
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
dev->cache.stopped = 1;
|
2013-09-11 20:35:23 +07:00
|
|
|
flush_workqueue(dev->cache.wq);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
mlx5_mr_cache_debugfs_cleanup(dev);
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
|
|
|
|
clean_keys(dev, i);
|
|
|
|
|
2013-09-11 20:35:23 +07:00
|
|
|
destroy_workqueue(dev->cache.wq);
|
2016-10-27 20:36:43 +07:00
|
|
|
wait_for_async_commands(dev);
|
2013-10-23 13:53:14 +07:00
|
|
|
del_timer_sync(&dev->delay_timer);
|
2013-09-11 20:35:23 +07:00
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
2016-07-16 10:28:36 +07:00
|
|
|
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
2014-07-29 03:30:22 +07:00
|
|
|
struct mlx5_core_dev *mdev = dev->mdev;
|
2013-07-07 21:25:49 +07:00
|
|
|
struct mlx5_ib_mr *mr;
|
2016-07-16 10:28:36 +07:00
|
|
|
void *mkc;
|
|
|
|
u32 *in;
|
2013-07-07 21:25:49 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
|
|
|
|
if (!mr)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
in = kzalloc(inlen, GFP_KERNEL);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (!in) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_free;
|
|
|
|
}
|
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
|
|
|
|
2018-04-05 22:53:28 +07:00
|
|
|
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
|
2016-07-16 10:28:36 +07:00
|
|
|
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
|
|
|
|
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
|
|
|
|
MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
|
|
|
|
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
|
|
|
|
MLX5_SET(mkc, mkc, lr, 1);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
MLX5_SET(mkc, mkc, length64, 1);
|
|
|
|
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
|
|
|
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
|
|
MLX5_SET64(mkc, mkc, start_addr, 0);
|
|
|
|
|
|
|
|
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (err)
|
|
|
|
goto err_in;
|
|
|
|
|
|
|
|
kfree(in);
|
2017-01-02 16:37:48 +07:00
|
|
|
mr->mmkey.type = MLX5_MKEY_MR;
|
2016-02-29 23:05:28 +07:00
|
|
|
mr->ibmr.lkey = mr->mmkey.key;
|
|
|
|
mr->ibmr.rkey = mr->mmkey.key;
|
2013-07-07 21:25:49 +07:00
|
|
|
mr->umem = NULL;
|
|
|
|
|
|
|
|
return &mr->ibmr;
|
|
|
|
|
|
|
|
err_in:
|
|
|
|
kfree(in);
|
|
|
|
|
|
|
|
err_free:
|
|
|
|
kfree(mr);
|
|
|
|
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2017-08-17 19:52:32 +07:00
|
|
|
static int get_octo_len(u64 addr, u64 len, int page_shift)
|
2013-07-07 21:25:49 +07:00
|
|
|
{
|
2017-08-17 19:52:32 +07:00
|
|
|
u64 page_size = 1ULL << page_shift;
|
2013-07-07 21:25:49 +07:00
|
|
|
u64 offset;
|
|
|
|
int npages;
|
|
|
|
|
|
|
|
offset = addr & (page_size - 1);
|
2017-08-17 19:52:32 +07:00
|
|
|
npages = ALIGN(len + offset, page_size) >> page_shift;
|
2013-07-07 21:25:49 +07:00
|
|
|
return (npages + 1) / 2;
|
|
|
|
}
|
|
|
|
|
2017-08-17 19:52:29 +07:00
|
|
|
static int mr_cache_max_order(struct mlx5_ib_dev *dev)
|
2013-07-07 21:25:49 +07:00
|
|
|
{
|
2017-01-02 16:37:44 +07:00
|
|
|
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
|
2017-08-17 19:52:29 +07:00
|
|
|
return MR_CACHE_LAST_STD_ENTRY + 2;
|
2017-06-12 14:36:15 +07:00
|
|
|
return MLX5_MAX_UMR_SHIFT;
|
|
|
|
}
|
|
|
|
|
2016-10-25 03:48:21 +07:00
|
|
|
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
|
|
|
|
int access_flags, struct ib_umem **umem,
|
|
|
|
int *npages, int *page_shift, int *ncont,
|
|
|
|
int *order)
|
2016-02-29 21:46:50 +07:00
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
2018-04-23 21:01:52 +07:00
|
|
|
struct ib_umem *u;
|
2016-10-25 03:48:21 +07:00
|
|
|
int err;
|
|
|
|
|
2018-04-23 21:01:52 +07:00
|
|
|
*umem = NULL;
|
|
|
|
|
|
|
|
u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
|
|
|
|
err = PTR_ERR_OR_ZERO(u);
|
2018-03-13 02:26:37 +07:00
|
|
|
if (err) {
|
2018-04-23 21:01:52 +07:00
|
|
|
mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
|
2016-10-25 03:48:21 +07:00
|
|
|
return err;
|
2016-02-29 21:46:50 +07:00
|
|
|
}
|
|
|
|
|
2018-04-23 21:01:52 +07:00
|
|
|
mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
|
2016-10-27 20:36:47 +07:00
|
|
|
page_shift, ncont, order);
|
2016-02-29 21:46:50 +07:00
|
|
|
if (!*npages) {
|
|
|
|
mlx5_ib_warn(dev, "avoid zero region\n");
|
2018-04-23 21:01:52 +07:00
|
|
|
ib_umem_release(u);
|
2016-10-25 03:48:21 +07:00
|
|
|
return -EINVAL;
|
2016-02-29 21:46:50 +07:00
|
|
|
}
|
|
|
|
|
2018-04-23 21:01:52 +07:00
|
|
|
*umem = u;
|
|
|
|
|
2016-02-29 21:46:50 +07:00
|
|
|
mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
|
|
|
|
*npages, *ncont, *order, *page_shift);
|
|
|
|
|
2016-10-25 03:48:21 +07:00
|
|
|
return 0;
|
2016-02-29 21:46:50 +07:00
|
|
|
}
|
|
|
|
|
2016-03-03 15:38:22 +07:00
|
|
|
static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
|
2013-07-07 21:25:49 +07:00
|
|
|
{
|
2016-03-03 15:38:22 +07:00
|
|
|
struct mlx5_ib_umr_context *context =
|
|
|
|
container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2016-03-03 15:38:22 +07:00
|
|
|
context->status = wc->status;
|
|
|
|
complete(&context->done);
|
|
|
|
}
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2016-03-03 15:38:22 +07:00
|
|
|
static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
|
|
|
|
{
|
|
|
|
context->cqe.done = mlx5_ib_umr_done;
|
|
|
|
context->status = -1;
|
|
|
|
init_completion(&context->done);
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
|
2017-01-02 16:37:40 +07:00
|
|
|
static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
|
|
|
|
struct mlx5_umr_wr *umrwr)
|
|
|
|
{
|
|
|
|
struct umr_common *umrc = &dev->umrc;
|
2018-07-18 23:25:32 +07:00
|
|
|
const struct ib_send_wr *bad;
|
2017-01-02 16:37:40 +07:00
|
|
|
int err;
|
|
|
|
struct mlx5_ib_umr_context umr_context;
|
|
|
|
|
|
|
|
mlx5_ib_init_umr_context(&umr_context);
|
|
|
|
umrwr->wr.wr_cqe = &umr_context.cqe;
|
|
|
|
|
|
|
|
down(&umrc->sem);
|
|
|
|
err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
|
|
|
|
if (err) {
|
|
|
|
mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
|
|
|
|
} else {
|
|
|
|
wait_for_completion(&umr_context.done);
|
|
|
|
if (umr_context.status != IB_WC_SUCCESS) {
|
|
|
|
mlx5_ib_warn(dev, "reg umr failed (%u)\n",
|
|
|
|
umr_context.status);
|
|
|
|
err = -EFAULT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
up(&umrc->sem);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-08-17 19:52:30 +07:00
|
|
|
static struct mlx5_ib_mr *alloc_mr_from_cache(
|
|
|
|
struct ib_pd *pd, struct ib_umem *umem,
|
2013-07-07 21:25:49 +07:00
|
|
|
u64 virt_addr, u64 len, int npages,
|
|
|
|
int page_shift, int order, int access_flags)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
|
|
|
struct mlx5_ib_mr *mr;
|
2014-05-22 18:50:08 +07:00
|
|
|
int err = 0;
|
2013-07-07 21:25:49 +07:00
|
|
|
int i;
|
|
|
|
|
2013-10-23 13:53:14 +07:00
|
|
|
for (i = 0; i < 1; i++) {
|
2013-07-07 21:25:49 +07:00
|
|
|
mr = alloc_cached_mr(dev, order);
|
|
|
|
if (mr)
|
|
|
|
break;
|
|
|
|
|
|
|
|
err = add_keys(dev, order2idx(dev, order), 1);
|
2013-10-23 13:53:14 +07:00
|
|
|
if (err && err != -EAGAIN) {
|
|
|
|
mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
|
2013-07-07 21:25:49 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mr)
|
|
|
|
return ERR_PTR(-EAGAIN);
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
mr->ibmr.pd = pd;
|
|
|
|
mr->umem = umem;
|
|
|
|
mr->access_flags = access_flags;
|
|
|
|
mr->desc_size = sizeof(struct mlx5_mtt);
|
2016-02-29 23:05:28 +07:00
|
|
|
mr->mmkey.iova = virt_addr;
|
|
|
|
mr->mmkey.size = len;
|
|
|
|
mr->mmkey.pd = to_mpd(pd)->pdn;
|
2014-05-22 18:50:10 +07:00
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
return mr;
|
|
|
|
}
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
|
|
|
|
void *xlt, int page_shift, size_t size,
|
|
|
|
int flags)
|
2014-12-11 22:04:22 +07:00
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = mr->dev;
|
|
|
|
struct ib_umem *umem = mr->umem;
|
2018-03-22 20:34:04 +07:00
|
|
|
|
2017-01-18 21:58:11 +07:00
|
|
|
if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
|
2018-03-22 20:34:04 +07:00
|
|
|
if (!umr_can_use_indirect_mkey(dev))
|
|
|
|
return -EPERM;
|
2017-01-18 21:58:11 +07:00
|
|
|
mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
|
|
|
|
return npages;
|
|
|
|
}
|
2017-01-02 16:37:44 +07:00
|
|
|
|
|
|
|
npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
|
|
|
|
|
|
|
|
if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
|
|
|
|
__mlx5_ib_populate_pas(dev, umem, page_shift,
|
|
|
|
idx, npages, xlt,
|
|
|
|
MLX5_IB_MTT_PRESENT);
|
|
|
|
/* Clear padding after the pages
|
|
|
|
* brought from the umem.
|
|
|
|
*/
|
|
|
|
memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
|
|
|
|
size - npages * sizeof(struct mlx5_mtt));
|
|
|
|
}
|
|
|
|
|
|
|
|
return npages;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
|
|
|
|
MLX5_UMR_MTT_ALIGNMENT)
|
|
|
|
#define MLX5_SPARE_UMR_CHUNK 0x10000
|
|
|
|
|
|
|
|
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
|
|
|
|
int page_shift, int flags)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = mr->dev;
|
2017-01-21 04:04:21 +07:00
|
|
|
struct device *ddev = dev->ib_dev.dev.parent;
|
2014-12-11 22:04:22 +07:00
|
|
|
int size;
|
2017-01-02 16:37:44 +07:00
|
|
|
void *xlt;
|
2014-12-11 22:04:22 +07:00
|
|
|
dma_addr_t dma;
|
2015-10-08 15:16:33 +07:00
|
|
|
struct mlx5_umr_wr wr;
|
2014-12-11 22:04:22 +07:00
|
|
|
struct ib_sge sg;
|
|
|
|
int err = 0;
|
2017-01-18 21:58:11 +07:00
|
|
|
int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
|
|
|
|
? sizeof(struct mlx5_klm)
|
|
|
|
: sizeof(struct mlx5_mtt);
|
2017-01-02 16:37:44 +07:00
|
|
|
const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
|
|
|
|
const int page_mask = page_align - 1;
|
2014-12-11 22:04:22 +07:00
|
|
|
size_t pages_mapped = 0;
|
|
|
|
size_t pages_to_map = 0;
|
|
|
|
size_t pages_iter = 0;
|
2017-01-02 16:37:44 +07:00
|
|
|
gfp_t gfp;
|
2018-03-13 20:18:48 +07:00
|
|
|
bool use_emergency_page = false;
|
2014-12-11 22:04:22 +07:00
|
|
|
|
2018-03-22 20:34:04 +07:00
|
|
|
if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
|
|
|
|
!umr_can_use_indirect_mkey(dev))
|
|
|
|
return -EPERM;
|
2014-12-11 22:04:22 +07:00
|
|
|
|
|
|
|
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
|
2017-01-02 16:37:44 +07:00
|
|
|
* so we need to align the offset and length accordingly
|
|
|
|
*/
|
|
|
|
if (idx & page_mask) {
|
|
|
|
npages += idx & page_mask;
|
|
|
|
idx &= ~page_mask;
|
2014-12-11 22:04:22 +07:00
|
|
|
}
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
|
|
|
|
gfp |= __GFP_ZERO | __GFP_NOWARN;
|
2014-12-11 22:04:22 +07:00
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
pages_to_map = ALIGN(npages, page_align);
|
|
|
|
size = desc_size * pages_to_map;
|
|
|
|
size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
|
2014-12-11 22:04:22 +07:00
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
xlt = (void *)__get_free_pages(gfp, get_order(size));
|
|
|
|
if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
|
|
|
|
mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
|
|
|
|
size, get_order(size), MLX5_SPARE_UMR_CHUNK);
|
|
|
|
|
|
|
|
size = MLX5_SPARE_UMR_CHUNK;
|
|
|
|
xlt = (void *)__get_free_pages(gfp, get_order(size));
|
2014-12-11 22:04:22 +07:00
|
|
|
}
|
2017-01-02 16:37:44 +07:00
|
|
|
|
|
|
|
if (!xlt) {
|
|
|
|
mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
|
2018-03-13 20:18:48 +07:00
|
|
|
xlt = (void *)mlx5_ib_get_xlt_emergency_page();
|
2017-01-02 16:37:44 +07:00
|
|
|
size = PAGE_SIZE;
|
|
|
|
memset(xlt, 0, size);
|
2018-03-13 20:18:48 +07:00
|
|
|
use_emergency_page = true;
|
2017-01-02 16:37:44 +07:00
|
|
|
}
|
|
|
|
pages_iter = size / desc_size;
|
|
|
|
dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
|
2014-12-11 22:04:22 +07:00
|
|
|
if (dma_mapping_error(ddev, dma)) {
|
2017-01-02 16:37:44 +07:00
|
|
|
mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
|
2014-12-11 22:04:22 +07:00
|
|
|
err = -ENOMEM;
|
2017-01-02 16:37:44 +07:00
|
|
|
goto free_xlt;
|
2014-12-11 22:04:22 +07:00
|
|
|
}
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
sg.addr = dma;
|
|
|
|
sg.lkey = dev->umrc.pd->local_dma_lkey;
|
|
|
|
|
|
|
|
memset(&wr, 0, sizeof(wr));
|
|
|
|
wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
|
|
|
|
if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
|
|
|
|
wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
|
|
|
wr.wr.sg_list = &sg;
|
|
|
|
wr.wr.num_sge = 1;
|
|
|
|
wr.wr.opcode = MLX5_IB_WR_UMR;
|
|
|
|
|
|
|
|
wr.pd = mr->ibmr.pd;
|
|
|
|
wr.mkey = mr->mmkey.key;
|
|
|
|
wr.length = mr->mmkey.size;
|
|
|
|
wr.virt_addr = mr->mmkey.iova;
|
|
|
|
wr.access_flags = mr->access_flags;
|
|
|
|
wr.page_shift = page_shift;
|
|
|
|
|
2014-12-11 22:04:22 +07:00
|
|
|
for (pages_mapped = 0;
|
|
|
|
pages_mapped < pages_to_map && !err;
|
2017-01-02 16:37:44 +07:00
|
|
|
pages_mapped += pages_iter, idx += pages_iter) {
|
2017-04-05 13:23:52 +07:00
|
|
|
npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
|
2014-12-11 22:04:22 +07:00
|
|
|
dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
|
2017-04-05 13:23:52 +07:00
|
|
|
npages = populate_xlt(mr, idx, npages, xlt,
|
2017-01-02 16:37:44 +07:00
|
|
|
page_shift, size, flags);
|
2014-12-11 22:04:22 +07:00
|
|
|
|
|
|
|
dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
sg.length = ALIGN(npages * desc_size,
|
|
|
|
MLX5_UMR_MTT_ALIGNMENT);
|
|
|
|
|
|
|
|
if (pages_mapped + pages_iter >= pages_to_map) {
|
|
|
|
if (flags & MLX5_IB_UPD_XLT_ENABLE)
|
|
|
|
wr.wr.send_flags |=
|
|
|
|
MLX5_IB_SEND_UMR_ENABLE_MR |
|
|
|
|
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
|
|
|
|
MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
|
|
|
|
if (flags & MLX5_IB_UPD_XLT_PD ||
|
|
|
|
flags & MLX5_IB_UPD_XLT_ACCESS)
|
|
|
|
wr.wr.send_flags |=
|
|
|
|
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
|
|
|
|
if (flags & MLX5_IB_UPD_XLT_ADDR)
|
|
|
|
wr.wr.send_flags |=
|
|
|
|
MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
|
|
|
|
}
|
2014-12-11 22:04:22 +07:00
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
wr.offset = idx * desc_size;
|
2017-01-02 16:37:42 +07:00
|
|
|
wr.xlt_size = sg.length;
|
2014-12-11 22:04:22 +07:00
|
|
|
|
2017-01-02 16:37:40 +07:00
|
|
|
err = mlx5_ib_post_send_wait(dev, &wr);
|
2014-12-11 22:04:22 +07:00
|
|
|
}
|
|
|
|
dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
free_xlt:
|
2018-03-13 20:18:48 +07:00
|
|
|
if (use_emergency_page)
|
|
|
|
mlx5_ib_put_xlt_emergency_page();
|
2014-12-11 22:04:22 +07:00
|
|
|
else
|
2017-01-02 16:37:44 +07:00
|
|
|
free_pages((unsigned long)xlt, get_order(size));
|
2014-12-11 22:04:22 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-02-29 21:46:50 +07:00
|
|
|
/*
|
|
|
|
* If ibmr is NULL it will be allocated by reg_create.
|
|
|
|
* Else, the given ibmr will be used.
|
|
|
|
*/
|
|
|
|
static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|
|
|
u64 virt_addr, u64 length,
|
|
|
|
struct ib_umem *umem, int npages,
|
2017-08-17 19:52:30 +07:00
|
|
|
int page_shift, int access_flags,
|
|
|
|
bool populate)
|
2013-07-07 21:25:49 +07:00
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
|
|
|
struct mlx5_ib_mr *mr;
|
2016-07-16 10:28:36 +07:00
|
|
|
__be64 *pas;
|
|
|
|
void *mkc;
|
2013-07-07 21:25:49 +07:00
|
|
|
int inlen;
|
2016-07-16 10:28:36 +07:00
|
|
|
u32 *in;
|
2013-07-07 21:25:49 +07:00
|
|
|
int err;
|
2015-05-29 02:28:41 +07:00
|
|
|
bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2016-02-29 21:46:50 +07:00
|
|
|
mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (!mr)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2017-08-17 19:52:30 +07:00
|
|
|
mr->ibmr.pd = pd;
|
|
|
|
mr->access_flags = access_flags;
|
|
|
|
|
|
|
|
inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
|
|
|
if (populate)
|
|
|
|
inlen += sizeof(*pas) * roundup(npages, 2);
|
2017-05-11 01:32:18 +07:00
|
|
|
in = kvzalloc(inlen, GFP_KERNEL);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (!in) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_1;
|
|
|
|
}
|
2016-07-16 10:28:36 +07:00
|
|
|
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
|
2017-08-17 19:52:30 +07:00
|
|
|
if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
|
2017-01-02 16:37:43 +07:00
|
|
|
mlx5_ib_populate_pas(dev, umem, page_shift, pas,
|
|
|
|
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
/* The pg_access bit allows setting the access flags
|
2014-12-11 22:04:21 +07:00
|
|
|
* in the page list submitted with the command. */
|
2016-07-16 10:28:36 +07:00
|
|
|
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
|
|
|
|
|
|
|
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
2017-08-17 19:52:30 +07:00
|
|
|
MLX5_SET(mkc, mkc, free, !populate);
|
2018-04-05 22:53:28 +07:00
|
|
|
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
|
2016-07-16 10:28:36 +07:00
|
|
|
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
|
|
|
|
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
|
|
|
|
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
|
|
|
|
MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
|
|
|
|
MLX5_SET(mkc, mkc, lr, 1);
|
2017-08-17 19:52:29 +07:00
|
|
|
MLX5_SET(mkc, mkc, umr_en, 1);
|
2016-07-16 10:28:36 +07:00
|
|
|
|
|
|
|
MLX5_SET64(mkc, mkc, start_addr, virt_addr);
|
|
|
|
MLX5_SET64(mkc, mkc, len, length);
|
|
|
|
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
|
|
|
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
|
|
|
|
MLX5_SET(mkc, mkc, translations_octword_size,
|
2017-08-17 19:52:32 +07:00
|
|
|
get_octo_len(virt_addr, length, page_shift));
|
2016-07-16 10:28:36 +07:00
|
|
|
MLX5_SET(mkc, mkc, log_page_size, page_shift);
|
|
|
|
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
2017-08-17 19:52:30 +07:00
|
|
|
if (populate) {
|
|
|
|
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
|
2017-08-17 19:52:32 +07:00
|
|
|
get_octo_len(virt_addr, length, page_shift));
|
2017-08-17 19:52:30 +07:00
|
|
|
}
|
2016-07-16 10:28:36 +07:00
|
|
|
|
|
|
|
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (err) {
|
|
|
|
mlx5_ib_warn(dev, "create mkey failed\n");
|
|
|
|
goto err_2;
|
|
|
|
}
|
2017-01-02 16:37:48 +07:00
|
|
|
mr->mmkey.type = MLX5_MKEY_MR;
|
2017-01-18 21:58:10 +07:00
|
|
|
mr->desc_size = sizeof(struct mlx5_mtt);
|
2015-01-06 18:56:01 +07:00
|
|
|
mr->dev = dev;
|
2014-11-20 15:13:57 +07:00
|
|
|
kvfree(in);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2016-02-29 23:05:28 +07:00
|
|
|
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
return mr;
|
|
|
|
|
|
|
|
err_2:
|
2014-11-20 15:13:57 +07:00
|
|
|
kvfree(in);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
err_1:
|
2016-02-29 21:46:50 +07:00
|
|
|
if (!ibmr)
|
|
|
|
kfree(mr);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2016-02-29 21:46:50 +07:00
|
|
|
static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
|
|
|
int npages, u64 length, int access_flags)
|
|
|
|
{
|
|
|
|
mr->npages = npages;
|
|
|
|
atomic_add(npages, &dev->mdev->priv.reg_pages);
|
2016-02-29 23:05:28 +07:00
|
|
|
mr->ibmr.lkey = mr->mmkey.key;
|
|
|
|
mr->ibmr.rkey = mr->mmkey.key;
|
2016-02-29 21:46:50 +07:00
|
|
|
mr->ibmr.length = length;
|
2016-02-29 21:46:51 +07:00
|
|
|
mr->access_flags = access_flags;
|
2016-02-29 21:46:50 +07:00
|
|
|
}
|
|
|
|
|
2018-04-05 22:53:29 +07:00
|
|
|
static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
|
|
|
|
u64 length, int acc)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
|
|
|
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
|
|
|
struct mlx5_core_dev *mdev = dev->mdev;
|
|
|
|
struct mlx5_ib_mr *mr;
|
|
|
|
void *mkc;
|
|
|
|
u32 *in;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
|
|
|
|
if (!mr)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
in = kzalloc(inlen, GFP_KERNEL);
|
|
|
|
if (!in) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
|
|
|
|
|
|
|
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3);
|
|
|
|
MLX5_SET(mkc, mkc, access_mode_4_2,
|
|
|
|
(MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7);
|
|
|
|
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
|
|
|
|
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
|
|
|
|
MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
|
|
|
|
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
|
|
|
|
MLX5_SET(mkc, mkc, lr, 1);
|
|
|
|
|
|
|
|
MLX5_SET64(mkc, mkc, len, length);
|
|
|
|
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
|
|
|
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
|
|
MLX5_SET64(mkc, mkc, start_addr,
|
|
|
|
memic_addr - pci_resource_start(dev->mdev->pdev, 0));
|
|
|
|
|
|
|
|
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
|
|
|
|
if (err)
|
|
|
|
goto err_in;
|
|
|
|
|
|
|
|
kfree(in);
|
|
|
|
|
|
|
|
mr->umem = NULL;
|
|
|
|
set_mr_fileds(dev, mr, 0, length, acc);
|
|
|
|
|
|
|
|
return &mr->ibmr;
|
|
|
|
|
|
|
|
err_in:
|
|
|
|
kfree(in);
|
|
|
|
|
|
|
|
err_free:
|
|
|
|
kfree(mr);
|
|
|
|
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
|
|
|
|
struct ib_dm_mr_attr *attr,
|
|
|
|
struct uverbs_attr_bundle *attrs)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_dm *mdm = to_mdm(dm);
|
|
|
|
u64 memic_addr;
|
|
|
|
|
|
|
|
if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
memic_addr = mdm->dev_addr + attr->offset;
|
|
|
|
|
|
|
|
return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length,
|
|
|
|
attr->access_flags);
|
|
|
|
}
|
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|
|
|
u64 virt_addr, int access_flags,
|
|
|
|
struct ib_udata *udata)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
|
|
|
struct mlx5_ib_mr *mr = NULL;
|
2018-03-22 20:34:04 +07:00
|
|
|
bool populate_mtts = false;
|
2013-07-07 21:25:49 +07:00
|
|
|
struct ib_umem *umem;
|
|
|
|
int page_shift;
|
|
|
|
int npages;
|
|
|
|
int ncont;
|
|
|
|
int order;
|
|
|
|
int err;
|
|
|
|
|
2017-12-11 18:45:44 +07:00
|
|
|
if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
|
2018-03-13 20:29:25 +07:00
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
2017-12-11 18:45:44 +07:00
|
|
|
|
2014-09-14 20:47:51 +07:00
|
|
|
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
|
|
|
|
start, virt_addr, length, access_flags);
|
2017-01-18 21:58:11 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
|
|
if (!start && length == U64_MAX) {
|
|
|
|
if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
|
|
|
|
!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
|
2018-03-13 20:29:24 +07:00
|
|
|
if (IS_ERR(mr))
|
|
|
|
return ERR_CAST(mr);
|
2017-01-18 21:58:11 +07:00
|
|
|
return &mr->ibmr;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-10-25 03:48:21 +07:00
|
|
|
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
|
2016-02-29 21:46:50 +07:00
|
|
|
&page_shift, &ncont, &order);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2017-08-17 19:52:30 +07:00
|
|
|
if (err < 0)
|
2016-10-25 03:48:21 +07:00
|
|
|
return ERR_PTR(err);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2018-03-22 20:34:04 +07:00
|
|
|
if (use_umr(dev, order)) {
|
2017-08-17 19:52:30 +07:00
|
|
|
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
|
|
|
|
page_shift, order, access_flags);
|
2013-07-07 21:25:49 +07:00
|
|
|
if (PTR_ERR(mr) == -EAGAIN) {
|
2017-09-26 13:50:01 +07:00
|
|
|
mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
|
2013-07-07 21:25:49 +07:00
|
|
|
mr = NULL;
|
|
|
|
}
|
2018-03-22 20:34:04 +07:00
|
|
|
populate_mtts = false;
|
2017-08-17 19:52:30 +07:00
|
|
|
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
|
|
|
|
if (access_flags & IB_ACCESS_ON_DEMAND) {
|
|
|
|
err = -EINVAL;
|
2017-09-26 13:50:01 +07:00
|
|
|
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
|
2017-08-17 19:52:30 +07:00
|
|
|
goto error;
|
|
|
|
}
|
2018-03-22 20:34:04 +07:00
|
|
|
populate_mtts = true;
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
|
2016-10-27 20:36:42 +07:00
|
|
|
if (!mr) {
|
2018-03-22 20:34:04 +07:00
|
|
|
if (!umr_can_modify_entity_size(dev))
|
|
|
|
populate_mtts = true;
|
2016-10-27 20:36:42 +07:00
|
|
|
mutex_lock(&dev->slow_path_mutex);
|
2016-02-29 21:46:50 +07:00
|
|
|
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
|
2018-03-22 20:34:04 +07:00
|
|
|
page_shift, access_flags, populate_mtts);
|
2016-10-27 20:36:42 +07:00
|
|
|
mutex_unlock(&dev->slow_path_mutex);
|
|
|
|
}
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
if (IS_ERR(mr)) {
|
|
|
|
err = PTR_ERR(mr);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2016-02-29 23:05:28 +07:00
|
|
|
mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
|
|
|
mr->umem = umem;
|
2016-02-29 21:46:50 +07:00
|
|
|
set_mr_fileds(dev, mr, npages, length, access_flags);
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2014-12-11 22:04:26 +07:00
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
2016-02-29 21:46:50 +07:00
|
|
|
update_odp_mr(mr);
|
2014-12-11 22:04:26 +07:00
|
|
|
#endif
|
|
|
|
|
2018-03-22 20:34:04 +07:00
|
|
|
if (!populate_mtts) {
|
2017-08-17 19:52:30 +07:00
|
|
|
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
|
|
|
|
|
|
|
|
if (access_flags & IB_ACCESS_ON_DEMAND)
|
|
|
|
update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2017-08-17 19:52:30 +07:00
|
|
|
err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
|
|
|
|
update_xlt_flags);
|
2017-09-25 01:46:35 +07:00
|
|
|
|
2017-08-17 19:52:30 +07:00
|
|
|
if (err) {
|
2017-09-25 01:46:35 +07:00
|
|
|
dereg_mr(dev, mr);
|
2017-08-17 19:52:30 +07:00
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-13 20:29:27 +07:00
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
2017-08-17 19:52:30 +07:00
|
|
|
mr->live = 1;
|
2018-03-13 20:29:27 +07:00
|
|
|
#endif
|
2017-08-17 19:52:30 +07:00
|
|
|
return &mr->ibmr;
|
2013-07-07 21:25:49 +07:00
|
|
|
error:
|
|
|
|
ib_umem_release(umem);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|
|
|
{
|
2016-06-17 19:01:38 +07:00
|
|
|
struct mlx5_core_dev *mdev = dev->mdev;
|
2016-03-03 23:23:37 +07:00
|
|
|
struct mlx5_umr_wr umrwr = {};
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2016-06-17 19:01:38 +07:00
|
|
|
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
|
|
|
return 0;
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
|
|
|
|
MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
|
|
|
umrwr.wr.opcode = MLX5_IB_WR_UMR;
|
|
|
|
umrwr.mkey = mr->mmkey.key;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2017-01-02 16:37:40 +07:00
|
|
|
return mlx5_ib_post_send_wait(dev, &umrwr);
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
|
2016-02-29 21:46:51 +07:00
|
|
|
int access_flags, int flags)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
|
|
|
struct mlx5_umr_wr umrwr = {};
|
|
|
|
int err;
|
|
|
|
|
|
|
|
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
umrwr.wr.opcode = MLX5_IB_WR_UMR;
|
|
|
|
umrwr.mkey = mr->mmkey.key;
|
2016-02-29 21:46:51 +07:00
|
|
|
|
2017-01-02 16:37:42 +07:00
|
|
|
if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
|
2016-02-29 21:46:51 +07:00
|
|
|
umrwr.pd = pd;
|
|
|
|
umrwr.access_flags = access_flags;
|
2017-01-02 16:37:42 +07:00
|
|
|
umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
|
2016-02-29 21:46:51 +07:00
|
|
|
}
|
|
|
|
|
2017-01-02 16:37:40 +07:00
|
|
|
err = mlx5_ib_post_send_wait(dev, &umrwr);
|
2016-02-29 21:46:51 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|
|
|
u64 length, u64 virt_addr, int new_access_flags,
|
|
|
|
struct ib_pd *new_pd, struct ib_udata *udata)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
|
|
|
|
struct mlx5_ib_mr *mr = to_mmr(ib_mr);
|
|
|
|
struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
|
|
|
|
int access_flags = flags & IB_MR_REREG_ACCESS ?
|
|
|
|
new_access_flags :
|
|
|
|
mr->access_flags;
|
|
|
|
int page_shift = 0;
|
2017-01-02 16:37:44 +07:00
|
|
|
int upd_flags = 0;
|
2016-02-29 21:46:51 +07:00
|
|
|
int npages = 0;
|
|
|
|
int ncont = 0;
|
|
|
|
int order = 0;
|
2018-04-23 21:01:52 +07:00
|
|
|
u64 addr, len;
|
2016-02-29 21:46:51 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
|
|
|
|
start, virt_addr, length, access_flags);
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
|
|
|
|
|
2018-04-23 21:01:52 +07:00
|
|
|
if (!mr->umem)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (flags & IB_MR_REREG_TRANS) {
|
|
|
|
addr = virt_addr;
|
|
|
|
len = length;
|
|
|
|
} else {
|
|
|
|
addr = mr->umem->address;
|
|
|
|
len = mr->umem->length;
|
|
|
|
}
|
|
|
|
|
2016-02-29 21:46:51 +07:00
|
|
|
if (flags != IB_MR_REREG_PD) {
|
|
|
|
/*
|
|
|
|
* Replace umem. This needs to be done whether or not UMR is
|
|
|
|
* used.
|
|
|
|
*/
|
|
|
|
flags |= IB_MR_REREG_TRANS;
|
|
|
|
ib_umem_release(mr->umem);
|
2018-04-23 21:01:52 +07:00
|
|
|
mr->umem = NULL;
|
2016-10-25 03:48:21 +07:00
|
|
|
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
|
|
|
|
&npages, &page_shift, &ncont, &order);
|
2018-03-13 20:29:26 +07:00
|
|
|
if (err)
|
|
|
|
goto err;
|
2016-02-29 21:46:51 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
|
|
|
|
/*
|
|
|
|
* UMR can't be used - MKey needs to be replaced.
|
|
|
|
*/
|
2018-03-13 20:29:28 +07:00
|
|
|
if (mr->allocated_from_cache)
|
2016-02-29 21:46:51 +07:00
|
|
|
err = unreg_umr(dev, mr);
|
2018-03-13 20:29:28 +07:00
|
|
|
else
|
2016-02-29 21:46:51 +07:00
|
|
|
err = destroy_mkey(dev, mr);
|
|
|
|
if (err)
|
2018-03-13 20:29:26 +07:00
|
|
|
goto err;
|
2016-02-29 21:46:51 +07:00
|
|
|
|
|
|
|
mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
|
2017-08-17 19:52:30 +07:00
|
|
|
page_shift, access_flags, true);
|
2016-02-29 21:46:51 +07:00
|
|
|
|
2018-03-13 20:29:26 +07:00
|
|
|
if (IS_ERR(mr)) {
|
|
|
|
err = PTR_ERR(mr);
|
|
|
|
mr = to_mmr(ib_mr);
|
|
|
|
goto err;
|
|
|
|
}
|
2016-02-29 21:46:51 +07:00
|
|
|
|
2017-08-17 19:52:29 +07:00
|
|
|
mr->allocated_from_cache = 0;
|
2018-03-13 20:29:27 +07:00
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
2017-08-17 19:52:30 +07:00
|
|
|
mr->live = 1;
|
2018-03-13 20:29:27 +07:00
|
|
|
#endif
|
2016-02-29 21:46:51 +07:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Send a UMR WQE
|
|
|
|
*/
|
2017-01-02 16:37:44 +07:00
|
|
|
mr->ibmr.pd = pd;
|
|
|
|
mr->access_flags = access_flags;
|
|
|
|
mr->mmkey.iova = addr;
|
|
|
|
mr->mmkey.size = len;
|
|
|
|
mr->mmkey.pd = to_mpd(pd)->pdn;
|
|
|
|
|
|
|
|
if (flags & IB_MR_REREG_TRANS) {
|
|
|
|
upd_flags = MLX5_IB_UPD_XLT_ADDR;
|
|
|
|
if (flags & IB_MR_REREG_PD)
|
|
|
|
upd_flags |= MLX5_IB_UPD_XLT_PD;
|
|
|
|
if (flags & IB_MR_REREG_ACCESS)
|
|
|
|
upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
|
|
|
|
err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
|
|
|
|
upd_flags);
|
|
|
|
} else {
|
|
|
|
err = rereg_umr(pd, mr, access_flags, flags);
|
|
|
|
}
|
|
|
|
|
2018-03-13 20:29:26 +07:00
|
|
|
if (err)
|
|
|
|
goto err;
|
2016-02-29 21:46:51 +07:00
|
|
|
}
|
|
|
|
|
2017-01-02 16:37:44 +07:00
|
|
|
set_mr_fileds(dev, mr, npages, len, access_flags);
|
2016-02-29 21:46:51 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
|
|
update_odp_mr(mr);
|
|
|
|
#endif
|
|
|
|
return 0;
|
2018-03-13 20:29:26 +07:00
|
|
|
|
|
|
|
err:
|
|
|
|
if (mr->umem) {
|
|
|
|
ib_umem_release(mr->umem);
|
|
|
|
mr->umem = NULL;
|
|
|
|
}
|
|
|
|
clean_mr(dev, mr);
|
|
|
|
return err;
|
2016-02-29 21:46:51 +07:00
|
|
|
}
|
|
|
|
|
2015-10-13 23:11:26 +07:00
|
|
|
static int
|
|
|
|
mlx5_alloc_priv_descs(struct ib_device *device,
|
|
|
|
struct mlx5_ib_mr *mr,
|
|
|
|
int ndescs,
|
|
|
|
int desc_size)
|
|
|
|
{
|
|
|
|
int size = ndescs * desc_size;
|
|
|
|
int add_size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
|
|
|
|
|
|
|
|
mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
|
|
|
|
if (!mr->descs_alloc)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
|
|
|
|
|
2017-01-21 04:04:21 +07:00
|
|
|
mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
|
2015-10-13 23:11:26 +07:00
|
|
|
size, DMA_TO_DEVICE);
|
2017-01-21 04:04:21 +07:00
|
|
|
if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
|
2015-10-13 23:11:26 +07:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
kfree(mr->descs_alloc);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
|
|
|
|
{
|
|
|
|
if (mr->descs) {
|
|
|
|
struct ib_device *device = mr->ibmr.device;
|
|
|
|
int size = mr->max_descs * mr->desc_size;
|
|
|
|
|
2017-01-21 04:04:21 +07:00
|
|
|
dma_unmap_single(device->dev.parent, mr->desc_map,
|
2015-10-13 23:11:26 +07:00
|
|
|
size, DMA_TO_DEVICE);
|
|
|
|
kfree(mr->descs_alloc);
|
|
|
|
mr->descs = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-13 20:29:28 +07:00
|
|
|
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
2013-07-07 21:25:49 +07:00
|
|
|
{
|
2017-08-17 19:52:29 +07:00
|
|
|
int allocated_from_cache = mr->allocated_from_cache;
|
2013-07-07 21:25:49 +07:00
|
|
|
|
2015-07-30 14:32:34 +07:00
|
|
|
if (mr->sig) {
|
|
|
|
if (mlx5_core_destroy_psv(dev->mdev,
|
|
|
|
mr->sig->psv_memory.psv_idx))
|
|
|
|
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
|
|
|
|
mr->sig->psv_memory.psv_idx);
|
|
|
|
if (mlx5_core_destroy_psv(dev->mdev,
|
|
|
|
mr->sig->psv_wire.psv_idx))
|
|
|
|
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
|
|
|
|
mr->sig->psv_wire.psv_idx);
|
|
|
|
kfree(mr->sig);
|
|
|
|
mr->sig = NULL;
|
|
|
|
}
|
|
|
|
|
2015-10-13 23:11:26 +07:00
|
|
|
mlx5_free_priv_descs(mr);
|
|
|
|
|
2018-03-13 20:29:28 +07:00
|
|
|
if (!allocated_from_cache)
|
|
|
|
destroy_mkey(dev, mr);
|
2014-12-11 22:04:23 +07:00
|
|
|
}
|
|
|
|
|
2018-03-13 20:29:28 +07:00
|
|
|
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
2014-12-11 22:04:23 +07:00
|
|
|
{
|
|
|
|
int npages = mr->npages;
|
|
|
|
struct ib_umem *umem = mr->umem;
|
|
|
|
|
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
2018-09-17 00:48:06 +07:00
|
|
|
if (umem && umem->is_odp) {
|
|
|
|
struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
|
|
|
|
|
2014-12-11 22:04:26 +07:00
|
|
|
/* Prevent new page faults from succeeding */
|
|
|
|
mr->live = 0;
|
2014-12-11 22:04:23 +07:00
|
|
|
/* Wait for all running page-fault handlers to finish. */
|
|
|
|
synchronize_srcu(&dev->mr_srcu);
|
2014-12-11 22:04:26 +07:00
|
|
|
/* Destroy all page mappings */
|
2018-09-17 00:48:06 +07:00
|
|
|
if (umem_odp->page_list)
|
|
|
|
mlx5_ib_invalidate_range(umem_odp, ib_umem_start(umem),
|
2017-01-18 21:58:11 +07:00
|
|
|
ib_umem_end(umem));
|
|
|
|
else
|
|
|
|
mlx5_ib_free_implicit_mr(mr);
|
2014-12-11 22:04:26 +07:00
|
|
|
/*
|
|
|
|
* We kill the umem before the MR for ODP,
|
|
|
|
* so that there will not be any invalidations in
|
|
|
|
* flight, looking at the *mr struct.
|
|
|
|
*/
|
|
|
|
ib_umem_release(umem);
|
|
|
|
atomic_sub(npages, &dev->mdev->priv.reg_pages);
|
|
|
|
|
|
|
|
/* Avoid double-freeing the umem. */
|
|
|
|
umem = NULL;
|
|
|
|
}
|
2014-12-11 22:04:23 +07:00
|
|
|
#endif
|
|
|
|
|
2017-09-25 01:46:35 +07:00
|
|
|
clean_mr(dev, mr);
|
2014-12-11 22:04:23 +07:00
|
|
|
|
2013-07-07 21:25:49 +07:00
|
|
|
if (umem) {
|
|
|
|
ib_umem_release(umem);
|
2014-12-11 22:04:23 +07:00
|
|
|
atomic_sub(npages, &dev->mdev->priv.reg_pages);
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
|
2018-03-13 02:26:37 +07:00
|
|
|
if (!mr->allocated_from_cache)
|
|
|
|
kfree(mr);
|
|
|
|
else
|
|
|
|
mlx5_mr_cache_free(dev, mr);
|
2013-07-07 21:25:49 +07:00
|
|
|
}
|
|
|
|
|
2017-09-25 01:46:35 +07:00
|
|
|
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
|
|
|
|
{
|
2018-03-13 20:29:28 +07:00
|
|
|
dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
|
|
|
|
return 0;
|
2017-09-25 01:46:35 +07:00
|
|
|
}
|
|
|
|
|
2015-07-30 14:32:35 +07:00
|
|
|
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
|
|
|
enum ib_mr_type mr_type,
|
|
|
|
u32 max_num_sg)
|
2014-02-23 19:19:06 +07:00
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
2016-07-16 10:28:36 +07:00
|
|
|
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
2016-03-01 00:07:33 +07:00
|
|
|
int ndescs = ALIGN(max_num_sg, 4);
|
2016-07-16 10:28:36 +07:00
|
|
|
struct mlx5_ib_mr *mr;
|
|
|
|
void *mkc;
|
|
|
|
u32 *in;
|
2016-03-01 00:07:33 +07:00
|
|
|
int err;
|
2014-02-23 19:19:06 +07:00
|
|
|
|
|
|
|
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
|
|
|
|
if (!mr)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
in = kzalloc(inlen, GFP_KERNEL);
|
2014-02-23 19:19:06 +07:00
|
|
|
if (!in) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_free;
|
|
|
|
}
|
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
|
|
|
MLX5_SET(mkc, mkc, free, 1);
|
|
|
|
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
|
|
|
|
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
|
|
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
2014-02-23 19:19:06 +07:00
|
|
|
|
2015-07-30 14:32:35 +07:00
|
|
|
if (mr_type == IB_MR_TYPE_MEM_REG) {
|
2016-07-16 10:28:36 +07:00
|
|
|
mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
|
|
|
|
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
|
2015-10-13 23:11:26 +07:00
|
|
|
err = mlx5_alloc_priv_descs(pd->device, mr,
|
2017-01-02 16:37:42 +07:00
|
|
|
ndescs, sizeof(struct mlx5_mtt));
|
2015-10-13 23:11:26 +07:00
|
|
|
if (err)
|
|
|
|
goto err_free_in;
|
|
|
|
|
2017-01-02 16:37:42 +07:00
|
|
|
mr->desc_size = sizeof(struct mlx5_mtt);
|
2015-10-13 23:11:26 +07:00
|
|
|
mr->max_descs = ndescs;
|
2016-03-01 00:07:33 +07:00
|
|
|
} else if (mr_type == IB_MR_TYPE_SG_GAPS) {
|
2016-07-16 10:28:36 +07:00
|
|
|
mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
|
2016-03-01 00:07:33 +07:00
|
|
|
|
|
|
|
err = mlx5_alloc_priv_descs(pd->device, mr,
|
|
|
|
ndescs, sizeof(struct mlx5_klm));
|
|
|
|
if (err)
|
|
|
|
goto err_free_in;
|
|
|
|
mr->desc_size = sizeof(struct mlx5_klm);
|
|
|
|
mr->max_descs = ndescs;
|
2015-07-30 14:32:35 +07:00
|
|
|
} else if (mr_type == IB_MR_TYPE_SIGNATURE) {
|
2014-02-23 19:19:06 +07:00
|
|
|
u32 psv_index[2];
|
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
MLX5_SET(mkc, mkc, bsf_en, 1);
|
|
|
|
MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
|
2014-02-23 19:19:06 +07:00
|
|
|
mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
|
|
|
|
if (!mr->sig) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_free_in;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* create mem & wire PSVs */
|
2014-07-29 03:30:22 +07:00
|
|
|
err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
|
2014-02-23 19:19:06 +07:00
|
|
|
2, psv_index);
|
|
|
|
if (err)
|
|
|
|
goto err_free_sig;
|
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
|
2014-02-23 19:19:06 +07:00
|
|
|
mr->sig->psv_memory.psv_idx = psv_index[0];
|
|
|
|
mr->sig->psv_wire.psv_idx = psv_index[1];
|
2014-02-23 19:19:12 +07:00
|
|
|
|
|
|
|
mr->sig->sig_status_checked = true;
|
|
|
|
mr->sig->sig_err_exists = false;
|
|
|
|
/* Next UMR, Arm SIGERR */
|
|
|
|
++mr->sig->sigerr_count;
|
2015-07-30 14:32:35 +07:00
|
|
|
} else {
|
|
|
|
mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err_free_in;
|
2014-02-23 19:19:06 +07:00
|
|
|
}
|
|
|
|
|
2018-04-05 22:53:28 +07:00
|
|
|
MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3);
|
|
|
|
MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7);
|
2016-07-16 10:28:36 +07:00
|
|
|
MLX5_SET(mkc, mkc, umr_en, 1);
|
|
|
|
|
2017-12-26 16:20:20 +07:00
|
|
|
mr->ibmr.device = pd->device;
|
2016-07-16 10:28:36 +07:00
|
|
|
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
|
2014-02-23 19:19:06 +07:00
|
|
|
if (err)
|
|
|
|
goto err_destroy_psv;
|
|
|
|
|
2017-01-02 16:37:48 +07:00
|
|
|
mr->mmkey.type = MLX5_MKEY_MR;
|
2016-02-29 23:05:28 +07:00
|
|
|
mr->ibmr.lkey = mr->mmkey.key;
|
|
|
|
mr->ibmr.rkey = mr->mmkey.key;
|
2014-02-23 19:19:06 +07:00
|
|
|
mr->umem = NULL;
|
|
|
|
kfree(in);
|
|
|
|
|
|
|
|
return &mr->ibmr;
|
|
|
|
|
|
|
|
err_destroy_psv:
|
|
|
|
if (mr->sig) {
|
2014-07-29 03:30:22 +07:00
|
|
|
if (mlx5_core_destroy_psv(dev->mdev,
|
2014-02-23 19:19:06 +07:00
|
|
|
mr->sig->psv_memory.psv_idx))
|
|
|
|
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
|
|
|
|
mr->sig->psv_memory.psv_idx);
|
2014-07-29 03:30:22 +07:00
|
|
|
if (mlx5_core_destroy_psv(dev->mdev,
|
2014-02-23 19:19:06 +07:00
|
|
|
mr->sig->psv_wire.psv_idx))
|
|
|
|
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
|
|
|
|
mr->sig->psv_wire.psv_idx);
|
|
|
|
}
|
2015-10-13 23:11:26 +07:00
|
|
|
mlx5_free_priv_descs(mr);
|
2014-02-23 19:19:06 +07:00
|
|
|
err_free_sig:
|
|
|
|
kfree(mr->sig);
|
|
|
|
err_free_in:
|
|
|
|
kfree(in);
|
|
|
|
err_free:
|
|
|
|
kfree(mr);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2016-02-29 23:05:30 +07:00
|
|
|
struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
|
|
|
struct ib_udata *udata)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
2016-07-16 10:28:36 +07:00
|
|
|
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
2016-02-29 23:05:30 +07:00
|
|
|
struct mlx5_ib_mw *mw = NULL;
|
2016-07-16 10:28:36 +07:00
|
|
|
u32 *in = NULL;
|
|
|
|
void *mkc;
|
2016-02-29 23:05:30 +07:00
|
|
|
int ndescs;
|
|
|
|
int err;
|
|
|
|
struct mlx5_ib_alloc_mw req = {};
|
|
|
|
struct {
|
|
|
|
__u32 comp_mask;
|
|
|
|
__u32 response_length;
|
|
|
|
} resp = {};
|
|
|
|
|
|
|
|
err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
if (req.comp_mask || req.reserved1 || req.reserved2)
|
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
|
|
|
|
if (udata->inlen > sizeof(req) &&
|
|
|
|
!ib_is_udata_cleared(udata, sizeof(req),
|
|
|
|
udata->inlen - sizeof(req)))
|
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
|
|
|
|
ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
|
|
|
|
|
|
|
|
mw = kzalloc(sizeof(*mw), GFP_KERNEL);
|
2016-07-16 10:28:36 +07:00
|
|
|
in = kzalloc(inlen, GFP_KERNEL);
|
2016-02-29 23:05:30 +07:00
|
|
|
if (!mw || !in) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
|
|
|
|
|
|
|
MLX5_SET(mkc, mkc, free, 1);
|
|
|
|
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
|
|
|
|
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
|
|
|
MLX5_SET(mkc, mkc, umr_en, 1);
|
|
|
|
MLX5_SET(mkc, mkc, lr, 1);
|
2018-04-05 22:53:28 +07:00
|
|
|
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
|
2016-07-16 10:28:36 +07:00
|
|
|
MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
|
|
|
|
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
|
|
|
|
|
|
err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
|
2016-02-29 23:05:30 +07:00
|
|
|
if (err)
|
|
|
|
goto free;
|
|
|
|
|
2017-01-02 16:37:48 +07:00
|
|
|
mw->mmkey.type = MLX5_MKEY_MW;
|
2016-02-29 23:05:30 +07:00
|
|
|
mw->ibmw.rkey = mw->mmkey.key;
|
2017-04-05 13:23:59 +07:00
|
|
|
mw->ndescs = ndescs;
|
2016-02-29 23:05:30 +07:00
|
|
|
|
|
|
|
resp.response_length = min(offsetof(typeof(resp), response_length) +
|
|
|
|
sizeof(resp.response_length), udata->outlen);
|
|
|
|
if (resp.response_length) {
|
|
|
|
err = ib_copy_to_udata(udata, &resp, resp.response_length);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(in);
|
|
|
|
return &mw->ibmw;
|
|
|
|
|
|
|
|
free:
|
|
|
|
kfree(mw);
|
|
|
|
kfree(in);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_ib_dealloc_mw(struct ib_mw *mw)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_mw *mmw = to_mmw(mw);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
|
|
|
|
&mmw->mmkey);
|
|
|
|
if (!err)
|
|
|
|
kfree(mmw);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-02-23 19:19:12 +07:00
|
|
|
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
|
|
|
|
struct ib_mr_status *mr_status)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_mr *mmr = to_mmr(ibmr);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
|
|
|
|
pr_err("Invalid status check mask\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
mr_status->fail_status = 0;
|
|
|
|
if (check_mask & IB_MR_CHECK_SIG_STATUS) {
|
|
|
|
if (!mmr->sig) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
pr_err("signature status check requested on a non-signature enabled MR\n");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
mmr->sig->sig_status_checked = true;
|
|
|
|
if (!mmr->sig->sig_err_exists)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
if (ibmr->lkey == mmr->sig->err_item.key)
|
|
|
|
memcpy(&mr_status->sig_err, &mmr->sig->err_item,
|
|
|
|
sizeof(mr_status->sig_err));
|
|
|
|
else {
|
|
|
|
mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
|
|
|
|
mr_status->sig_err.sig_err_offset = 0;
|
|
|
|
mr_status->sig_err.key = mmr->sig->err_item.key;
|
|
|
|
}
|
|
|
|
|
|
|
|
mmr->sig->sig_err_exists = false;
|
|
|
|
mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
return ret;
|
|
|
|
}
|
2015-10-13 23:11:26 +07:00
|
|
|
|
2016-03-01 00:07:33 +07:00
|
|
|
static int
|
|
|
|
mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
|
|
|
|
struct scatterlist *sgl,
|
2016-05-03 23:01:04 +07:00
|
|
|
unsigned short sg_nents,
|
2016-05-13 00:49:15 +07:00
|
|
|
unsigned int *sg_offset_p)
|
2016-03-01 00:07:33 +07:00
|
|
|
{
|
|
|
|
struct scatterlist *sg = sgl;
|
|
|
|
struct mlx5_klm *klms = mr->descs;
|
2016-05-13 00:49:15 +07:00
|
|
|
unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
|
2016-03-01 00:07:33 +07:00
|
|
|
u32 lkey = mr->ibmr.pd->local_dma_lkey;
|
|
|
|
int i;
|
|
|
|
|
2016-05-03 23:01:04 +07:00
|
|
|
mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
|
2016-03-01 00:07:33 +07:00
|
|
|
mr->ibmr.length = 0;
|
|
|
|
|
|
|
|
for_each_sg(sgl, sg, sg_nents, i) {
|
2017-04-25 05:15:28 +07:00
|
|
|
if (unlikely(i >= mr->max_descs))
|
2016-03-01 00:07:33 +07:00
|
|
|
break;
|
2016-05-03 23:01:04 +07:00
|
|
|
klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
|
|
|
|
klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
|
2016-03-01 00:07:33 +07:00
|
|
|
klms[i].key = cpu_to_be32(lkey);
|
2017-04-23 18:31:42 +07:00
|
|
|
mr->ibmr.length += sg_dma_len(sg) - sg_offset;
|
2016-05-03 23:01:04 +07:00
|
|
|
|
|
|
|
sg_offset = 0;
|
2016-03-01 00:07:33 +07:00
|
|
|
}
|
2018-02-25 18:39:48 +07:00
|
|
|
mr->ndescs = i;
|
2016-03-01 00:07:33 +07:00
|
|
|
|
2016-05-13 00:49:15 +07:00
|
|
|
if (sg_offset_p)
|
|
|
|
*sg_offset_p = sg_offset;
|
|
|
|
|
2016-03-01 00:07:33 +07:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2015-10-13 23:11:26 +07:00
|
|
|
static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
|
|
|
|
{
|
|
|
|
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
|
|
|
__be64 *descs;
|
|
|
|
|
|
|
|
if (unlikely(mr->ndescs == mr->max_descs))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
descs = mr->descs;
|
|
|
|
descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-03 23:01:04 +07:00
|
|
|
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
2016-05-13 00:49:15 +07:00
|
|
|
unsigned int *sg_offset)
|
2015-10-13 23:11:26 +07:00
|
|
|
{
|
|
|
|
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
|
|
|
int n;
|
|
|
|
|
|
|
|
mr->ndescs = 0;
|
|
|
|
|
|
|
|
ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
|
|
|
|
mr->desc_size * mr->max_descs,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
2016-07-16 10:28:36 +07:00
|
|
|
if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
|
2016-05-03 23:01:04 +07:00
|
|
|
n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
|
2016-03-01 00:07:33 +07:00
|
|
|
else
|
2016-05-03 23:01:04 +07:00
|
|
|
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
|
|
|
|
mlx5_set_page);
|
2015-10-13 23:11:26 +07:00
|
|
|
|
|
|
|
ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
|
|
|
|
mr->desc_size * mr->max_descs,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|