2007-05-09 08:00:38 +07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
|
|
|
|
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
|
|
|
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
2008-07-26 00:32:52 +07:00
|
|
|
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
|
2007-05-09 08:00:38 +07:00
|
|
|
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/hardirq.h>
|
2011-05-28 03:14:23 +07:00
|
|
|
#include <linux/export.h>
|
2007-05-09 08:00:38 +07:00
|
|
|
|
|
|
|
#include <linux/mlx4/cmd.h>
|
2008-04-17 11:09:33 +07:00
|
|
|
#include <linux/mlx4/cq.h>
|
2007-05-09 08:00:38 +07:00
|
|
|
|
|
|
|
#include "mlx4.h"
|
|
|
|
#include "icm.h"
|
|
|
|
|
|
|
|
#define MLX4_CQ_STATUS_OK ( 0 << 28)
|
|
|
|
#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
|
|
|
|
#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
|
|
|
|
#define MLX4_CQ_FLAG_CC ( 1 << 18)
|
|
|
|
#define MLX4_CQ_FLAG_OI ( 1 << 17)
|
|
|
|
#define MLX4_CQ_STATE_ARMED ( 9 << 8)
|
|
|
|
#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
|
|
|
|
#define MLX4_EQ_STATE_FIRED (10 << 8)
|
|
|
|
|
net/mlx4_core: Use tasklet for user-space CQ completion events
Previously, we've fired all our completion callbacks straight from our ISR.
Some of those callbacks were lightweight (for example, mlx4_en's and
IPoIB napi callbacks), but some of them did more work (for example,
the user-space RDMA stack uverbs' completion handler). Besides that,
doing more than the minimal work in ISR is generally considered wrong,
it could even lead to a hard lockup of the system. Since when a lot
of completion events are generated by the hardware, the loop over those
events could be so long, that we'll get into a hard lockup by the system
watchdog.
In order to avoid that, add a new way of invoking completion events
callbacks. In the interrupt itself, we add the CQs which receive completion
event to a per-EQ list and schedule a tasklet. In the tasklet context
we loop over all the CQs in the list and invoke the user callback.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-11 15:57:53 +07:00
|
|
|
#define TASKLET_MAX_TIME 2
|
|
|
|
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
|
|
|
|
|
|
|
|
void mlx4_cq_tasklet_cb(unsigned long data)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
|
|
|
|
struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
|
|
|
|
struct mlx4_cq *mcq, *temp;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctx->lock, flags);
|
|
|
|
list_splice_tail_init(&ctx->list, &ctx->process_list);
|
|
|
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
|
|
|
|
list_del_init(&mcq->tasklet_ctx.list);
|
|
|
|
mcq->tasklet_ctx.comp(mcq);
|
2017-10-20 14:23:37 +07:00
|
|
|
if (refcount_dec_and_test(&mcq->refcount))
|
net/mlx4_core: Use tasklet for user-space CQ completion events
Previously, we've fired all our completion callbacks straight from our ISR.
Some of those callbacks were lightweight (for example, mlx4_en's and
IPoIB napi callbacks), but some of them did more work (for example,
the user-space RDMA stack uverbs' completion handler). Besides that,
doing more than the minimal work in ISR is generally considered wrong,
it could even lead to a hard lockup of the system. Since when a lot
of completion events are generated by the hardware, the loop over those
events could be so long, that we'll get into a hard lockup by the system
watchdog.
In order to avoid that, add a new way of invoking completion events
callbacks. In the interrupt itself, we add the CQs which receive completion
event to a per-EQ list and schedule a tasklet. In the tasklet context
we loop over all the CQs in the list and invoke the user callback.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-11 15:57:53 +07:00
|
|
|
complete(&mcq->free);
|
|
|
|
if (time_after(jiffies, end))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!list_empty(&ctx->process_list))
|
|
|
|
tasklet_schedule(&ctx->task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
|
|
|
|
{
|
|
|
|
struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
|
2017-02-10 19:27:58 +07:00
|
|
|
unsigned long flags;
|
|
|
|
bool kick;
|
net/mlx4_core: Use tasklet for user-space CQ completion events
Previously, we've fired all our completion callbacks straight from our ISR.
Some of those callbacks were lightweight (for example, mlx4_en's and
IPoIB napi callbacks), but some of them did more work (for example,
the user-space RDMA stack uverbs' completion handler). Besides that,
doing more than the minimal work in ISR is generally considered wrong,
it could even lead to a hard lockup of the system. Since when a lot
of completion events are generated by the hardware, the loop over those
events could be so long, that we'll get into a hard lockup by the system
watchdog.
In order to avoid that, add a new way of invoking completion events
callbacks. In the interrupt itself, we add the CQs which receive completion
event to a per-EQ list and schedule a tasklet. In the tasklet context
we loop over all the CQs in the list and invoke the user callback.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-11 15:57:53 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(&tasklet_ctx->lock, flags);
|
|
|
|
/* When migrating CQs between EQs will be implemented, please note
|
|
|
|
* that you need to sync this point. It is possible that
|
|
|
|
* while migrating a CQ, completions on the old EQs could
|
|
|
|
* still arrive.
|
|
|
|
*/
|
|
|
|
if (list_empty_careful(&cq->tasklet_ctx.list)) {
|
2017-10-20 14:23:37 +07:00
|
|
|
refcount_inc(&cq->refcount);
|
2017-02-10 19:27:58 +07:00
|
|
|
kick = list_empty(&tasklet_ctx->list);
|
net/mlx4_core: Use tasklet for user-space CQ completion events
Previously, we've fired all our completion callbacks straight from our ISR.
Some of those callbacks were lightweight (for example, mlx4_en's and
IPoIB napi callbacks), but some of them did more work (for example,
the user-space RDMA stack uverbs' completion handler). Besides that,
doing more than the minimal work in ISR is generally considered wrong,
it could even lead to a hard lockup of the system. Since when a lot
of completion events are generated by the hardware, the loop over those
events could be so long, that we'll get into a hard lockup by the system
watchdog.
In order to avoid that, add a new way of invoking completion events
callbacks. In the interrupt itself, we add the CQs which receive completion
event to a per-EQ list and schedule a tasklet. In the tasklet context
we loop over all the CQs in the list and invoke the user callback.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-11 15:57:53 +07:00
|
|
|
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
|
2017-02-10 19:27:58 +07:00
|
|
|
if (kick)
|
|
|
|
tasklet_schedule(&tasklet_ctx->task);
|
net/mlx4_core: Use tasklet for user-space CQ completion events
Previously, we've fired all our completion callbacks straight from our ISR.
Some of those callbacks were lightweight (for example, mlx4_en's and
IPoIB napi callbacks), but some of them did more work (for example,
the user-space RDMA stack uverbs' completion handler). Besides that,
doing more than the minimal work in ISR is generally considered wrong,
it could even lead to a hard lockup of the system. Since when a lot
of completion events are generated by the hardware, the loop over those
events could be so long, that we'll get into a hard lockup by the system
watchdog.
In order to avoid that, add a new way of invoking completion events
callbacks. In the interrupt itself, we add the CQs which receive completion
event to a per-EQ list and schedule a tasklet. In the tasklet context
we loop over all the CQs in the list and invoke the user callback.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-11 15:57:53 +07:00
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
|
|
|
|
}
|
|
|
|
|
2007-05-09 08:00:38 +07:00
|
|
|
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
|
|
|
|
{
|
|
|
|
struct mlx4_cq *cq;
|
|
|
|
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
rcu_read_lock();
|
2007-05-09 08:00:38 +07:00
|
|
|
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
|
|
|
|
cqn & (dev->caps.num_cqs - 1));
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2007-05-09 08:00:38 +07:00
|
|
|
if (!cq) {
|
2011-12-13 11:13:36 +07:00
|
|
|
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
|
2007-05-09 08:00:38 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
/* Acessing the CQ outside of rcu_read_lock is safe, because
|
|
|
|
* the CQ is freed only after interrupt handling is completed.
|
|
|
|
*/
|
2007-05-09 08:00:38 +07:00
|
|
|
++cq->arm_sn;
|
|
|
|
|
|
|
|
cq->comp(cq);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
|
|
|
|
{
|
|
|
|
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
|
|
|
|
struct mlx4_cq *cq;
|
|
|
|
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
rcu_read_lock();
|
2007-05-09 08:00:38 +07:00
|
|
|
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
rcu_read_unlock();
|
2007-05-09 08:00:38 +07:00
|
|
|
|
|
|
|
if (!cq) {
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
|
2007-05-09 08:00:38 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
/* Acessing the CQ outside of rcu_read_lock is safe, because
|
|
|
|
* the CQ is freed only after interrupt handling is completed.
|
|
|
|
*/
|
2007-05-09 08:00:38 +07:00
|
|
|
cq->event(cq, event_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
|
|
|
int cq_num)
|
|
|
|
{
|
2012-01-19 16:45:19 +07:00
|
|
|
return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
|
2011-12-13 11:13:36 +07:00
|
|
|
MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
|
|
|
|
MLX4_CMD_WRAPPED);
|
2007-05-09 08:00:38 +07:00
|
|
|
}
|
|
|
|
|
2008-04-17 11:09:33 +07:00
|
|
|
static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
|
|
|
int cq_num, u32 opmod)
|
|
|
|
{
|
|
|
|
return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
|
2011-12-13 11:10:51 +07:00
|
|
|
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
2008-04-17 11:09:33 +07:00
|
|
|
}
|
|
|
|
|
2007-05-09 08:00:38 +07:00
|
|
|
static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
|
|
|
int cq_num)
|
|
|
|
{
|
2012-01-19 16:45:19 +07:00
|
|
|
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
|
2011-12-13 11:13:36 +07:00
|
|
|
cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
|
2011-12-13 11:10:51 +07:00
|
|
|
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
2007-05-09 08:00:38 +07:00
|
|
|
}
|
|
|
|
|
2008-04-17 11:09:33 +07:00
|
|
|
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
|
|
|
u16 count, u16 period)
|
|
|
|
{
|
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
|
|
struct mlx4_cq_context *cq_context;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(mailbox))
|
|
|
|
return PTR_ERR(mailbox);
|
|
|
|
|
|
|
|
cq_context = mailbox->buf;
|
|
|
|
cq_context->cq_max_count = cpu_to_be16(count);
|
|
|
|
cq_context->cq_period = cpu_to_be16(period);
|
|
|
|
|
|
|
|
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
|
|
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mlx4_cq_modify);
|
|
|
|
|
2008-04-17 11:09:33 +07:00
|
|
|
int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
|
|
|
int entries, struct mlx4_mtt *mtt)
|
|
|
|
{
|
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
|
|
struct mlx4_cq_context *cq_context;
|
|
|
|
u64 mtt_addr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(mailbox))
|
|
|
|
return PTR_ERR(mailbox);
|
|
|
|
|
|
|
|
cq_context = mailbox->buf;
|
|
|
|
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
|
|
|
|
cq_context->log_page_size = mtt->page_shift - 12;
|
|
|
|
mtt_addr = mlx4_mtt_addr(dev, mtt);
|
|
|
|
cq_context->mtt_base_addr_h = mtt_addr >> 32;
|
|
|
|
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
|
|
|
|
|
2008-04-24 01:55:45 +07:00
|
|
|
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
|
2008-04-17 11:09:33 +07:00
|
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
|
|
|
|
|
mlx4_core: resource tracking for HCA resources used by guests
The resource tracker is used to track usage of HCA resources by the different
guests.
Virtual functions (VFs) are attached to guest operating systems but
resources are allocated from the same pool and are assigned to VFs. It is
essential that hostile/buggy guests not be able to affect the operation of
other VFs, possibly attached to other guest OSs since ConnectX firmware is not
tolerant to misuse of resources.
The resource tracker module associates each resource with a VF and maintains
state information for the allocated object. It also defines allowed state
transitions and enforces them.
Relationships between resources are also referred to. For example, CQs are
pointed to by QPs, so it is forbidden to destroy a CQ if a QP refers to it.
ICM memory is always accessible through the primary function and hence it is
allocated by the owner of the primary function.
When a guest dies, an FLR is generated for all the VFs it owns and all the
resources it used are freed.
The tracked resource types are: QPs, CQs, SRQs, MPTs, MTTs, MACs, RES_EQs,
and XRCDNs.
Signed-off-by: Eli Cohen <eli@mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-12-13 11:15:24 +07:00
|
|
|
int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
|
2011-12-13 11:13:36 +07:00
|
|
|
{
|
|
|
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
|
struct mlx4_cq_table *cq_table = &priv->cq_table;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
*cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
|
|
|
|
if (*cqn == -1)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-05-23 18:38:15 +07:00
|
|
|
err = mlx4_table_get(dev, &cq_table->table, *cqn);
|
2011-12-13 11:13:36 +07:00
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
|
2017-05-23 18:38:15 +07:00
|
|
|
err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
|
2011-12-13 11:13:36 +07:00
|
|
|
if (err)
|
|
|
|
goto err_put;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_put:
|
|
|
|
mlx4_table_put(dev, &cq_table->table, *cqn);
|
|
|
|
|
|
|
|
err_out:
|
mlx4_core: Roll back round robin bitmap allocation commit for CQs, SRQs, and MPTs
Commit f4ec9e9 "mlx4_core: Change bitmap allocator to work in round-robin fashion"
introduced round-robin allocation (via bitmap) for all resources which allocate
via a bitmap.
Round robin allocation is desirable for mcgs, counters, pd's, UARs, and xrcds.
These are simply numbers, with no involvement of ICM memory mapping.
Round robin is required for QPs, since we had a problem with immediate
reuse of a 24-bit QP number (commit f4ec9e9).
However, for other resources which use the bitmap allocator and involve
mapping ICM memory -- MPTs, CQs, SRQs -- round-robin is not desirable.
What happens in these cases is the following:
ICM memory is allocated and mapped in chunks of 256K.
Since the resource allocation index goes up monotonically, the allocator
will eventually require mapping a new chunk. Now, chunks are also unmapped
when their reference count goes back to zero. Thus, if a single app is
running and starts/exits frequently we will have the following situation:
When the app starts, a new chunk must be allocated and mapped.
When the app exits, the chunk reference count goes back to zero, and the
chunk is unmapped and freed. Therefore, the app must pay the cost of allocation
and mapping of ICM memory each time it runs (although the price is paid only when
allocating the initial entry in the new chunk).
For apps which allocate MPTs/SRQs/CQs and which operate as described above,
this presented a performance problem.
We therefore roll back the round-robin allocator modification for MPTs, CQs, SRQs.
Reported-by: Matthew Finlay <matt@mellanox.com>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-12-08 21:50:17 +07:00
|
|
|
mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
|
2011-12-13 11:13:36 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-06-21 13:29:36 +07:00
|
|
|
static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn, u8 usage)
|
2011-12-13 11:13:36 +07:00
|
|
|
{
|
2017-06-21 13:29:36 +07:00
|
|
|
u32 in_modifier = RES_CQ | (((u32)usage & 3) << 30);
|
2011-12-13 11:13:36 +07:00
|
|
|
u64 out_param;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (mlx4_is_mfunc(dev)) {
|
2017-06-21 13:29:36 +07:00
|
|
|
err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier,
|
2011-12-13 11:13:36 +07:00
|
|
|
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
|
|
|
|
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
else {
|
|
|
|
*cqn = get_param_l(&out_param);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return __mlx4_cq_alloc_icm(dev, cqn);
|
|
|
|
}
|
|
|
|
|
mlx4_core: resource tracking for HCA resources used by guests
The resource tracker is used to track usage of HCA resources by the different
guests.
Virtual functions (VFs) are attached to guest operating systems but
resources are allocated from the same pool and are assigned to VFs. It is
essential that hostile/buggy guests not be able to affect the operation of
other VFs, possibly attached to other guest OSs since ConnectX firmware is not
tolerant to misuse of resources.
The resource tracker module associates each resource with a VF and maintains
state information for the allocated object. It also defines allowed state
transitions and enforces them.
Relationships between resources are also referred to. For example, CQs are
pointed to by QPs, so it is forbidden to destroy a CQ if a QP refers to it.
ICM memory is always accessible through the primary function and hence it is
allocated by the owner of the primary function.
When a guest dies, an FLR is generated for all the VFs it owns and all the
resources it used are freed.
The tracked resource types are: QPs, CQs, SRQs, MPTs, MTTs, MACs, RES_EQs,
and XRCDNs.
Signed-off-by: Eli Cohen <eli@mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-12-13 11:15:24 +07:00
|
|
|
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
|
2011-12-13 11:13:36 +07:00
|
|
|
{
|
|
|
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
|
struct mlx4_cq_table *cq_table = &priv->cq_table;
|
|
|
|
|
|
|
|
mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
|
|
|
|
mlx4_table_put(dev, &cq_table->table, cqn);
|
mlx4_core: Roll back round robin bitmap allocation commit for CQs, SRQs, and MPTs
Commit f4ec9e9 "mlx4_core: Change bitmap allocator to work in round-robin fashion"
introduced round-robin allocation (via bitmap) for all resources which allocate
via a bitmap.
Round robin allocation is desirable for mcgs, counters, pd's, UARs, and xrcds.
These are simply numbers, with no involvement of ICM memory mapping.
Round robin is required for QPs, since we had a problem with immediate
reuse of a 24-bit QP number (commit f4ec9e9).
However, for other resources which use the bitmap allocator and involve
mapping ICM memory -- MPTs, CQs, SRQs -- round-robin is not desirable.
What happens in these cases is the following:
ICM memory is allocated and mapped in chunks of 256K.
Since the resource allocation index goes up monotonically, the allocator
will eventually require mapping a new chunk. Now, chunks are also unmapped
when their reference count goes back to zero. Thus, if a single app is
running and starts/exits frequently we will have the following situation:
When the app starts, a new chunk must be allocated and mapped.
When the app exits, the chunk reference count goes back to zero, and the
chunk is unmapped and freed. Therefore, the app must pay the cost of allocation
and mapping of ICM memory each time it runs (although the price is paid only when
allocating the initial entry in the new chunk).
For apps which allocate MPTs/SRQs/CQs and which operate as described above,
this presented a performance problem.
We therefore roll back the round-robin allocator modification for MPTs, CQs, SRQs.
Reported-by: Matthew Finlay <matt@mellanox.com>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-12-08 21:50:17 +07:00
|
|
|
mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
|
2011-12-13 11:13:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
|
|
|
|
{
|
2013-03-07 10:46:54 +07:00
|
|
|
u64 in_param = 0;
|
2011-12-13 11:13:36 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (mlx4_is_mfunc(dev)) {
|
|
|
|
set_param_l(&in_param, cqn);
|
|
|
|
err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
|
|
|
|
MLX4_CMD_FREE_RES,
|
|
|
|
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
|
|
|
if (err)
|
|
|
|
mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
|
|
|
|
} else
|
|
|
|
__mlx4_cq_free_icm(dev, cqn);
|
|
|
|
}
|
|
|
|
|
2013-04-23 13:06:49 +07:00
|
|
|
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
|
|
|
struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
|
|
|
|
struct mlx4_cq *cq, unsigned vector, int collapsed,
|
|
|
|
int timestamp_en)
|
2007-05-09 08:00:38 +07:00
|
|
|
{
|
|
|
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
|
struct mlx4_cq_table *cq_table = &priv->cq_table;
|
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
|
|
struct mlx4_cq_context *cq_context;
|
|
|
|
u64 mtt_addr;
|
|
|
|
int err;
|
|
|
|
|
2015-05-31 13:30:16 +07:00
|
|
|
if (vector >= dev->caps.num_comp_vectors)
|
2008-12-22 22:15:03 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cq->vector = vector;
|
|
|
|
|
2017-06-21 13:29:36 +07:00
|
|
|
err = mlx4_cq_alloc_icm(dev, &cq->cqn, cq->usage);
|
2007-05-09 08:00:38 +07:00
|
|
|
if (err)
|
2011-12-13 11:13:36 +07:00
|
|
|
return err;
|
2007-05-09 08:00:38 +07:00
|
|
|
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
spin_lock(&cq_table->lock);
|
2007-05-09 08:00:38 +07:00
|
|
|
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
spin_unlock(&cq_table->lock);
|
2007-05-09 08:00:38 +07:00
|
|
|
if (err)
|
2011-12-13 11:13:36 +07:00
|
|
|
goto err_icm;
|
2007-05-09 08:00:38 +07:00
|
|
|
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(mailbox)) {
|
|
|
|
err = PTR_ERR(mailbox);
|
|
|
|
goto err_radix;
|
|
|
|
}
|
|
|
|
|
|
|
|
cq_context = mailbox->buf;
|
2008-04-30 03:46:50 +07:00
|
|
|
cq_context->flags = cpu_to_be32(!!collapsed << 18);
|
2013-04-23 13:06:49 +07:00
|
|
|
if (timestamp_en)
|
|
|
|
cq_context->flags |= cpu_to_be32(1 << 19);
|
|
|
|
|
net/mlx4_core: Set UAR page size to 4KB regardless of system page size
problem description:
The current code sets UAR page size equal to system page size.
The ConnectX-3 and ConnectX-3 Pro HWs require minimum 128 UAR pages.
The mlx4 kernel drivers are not loaded if there is less than 128 UAR pages.
solution:
Always set UAR page to 4KB. This allows more UAR pages if the OS
has PAGE_SIZE larger than 4KB. For example, PowerPC kernel use 64KB
system page size, with 4MB uar region, there are 4MB/2/64KB = 32
uars (half for uar, half for blueflame). This does not meet minimum 128
UAR pages requirement. With 4KB UAR page, there are 4MB/2/4KB = 512 uars
which meet the minimum requirement.
Note that only codes in mlx4_core that deal with firmware know that uar
page size is 4KB. Codes that deal with usr page in cq and qp context
(mlx4_ib, mlx4_en and part of mlx4_core) still have the same assumption
that uar page size equals to system page size.
Note that with this implementation, on 64KB system page size kernel, there
are 16 uars per system page but only one uars is used. The other 15
uars are ignored because of the above assumption.
Regarding SR-IOV, mlx4_core in hypervisor will set the uar page size
to 4KB and mlx4_core code in virtual OS will obtain the uar page size from
firmware.
Regarding backward compatibility in SR-IOV, if hypervisor has this new code,
the virtual OS must be updated. If hypervisor has old code, and the virtual
OS has this new code, the new code will be backward compatible with the
old code. If the uar size is big enough, this new code in VF continues to
work with 64 KB uar page size (on PowerPc kernel). If the uar size does not
meet 128 uars requirement, this new code not loaded in VF and print the same
error message as the old code in Hypervisor.
Signed-off-by: Huy Nguyen <huyn@mellanox.com>
Reviewed-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-02-17 22:24:26 +07:00
|
|
|
cq_context->logsize_usrpage =
|
|
|
|
cpu_to_be32((ilog2(nent) << 24) |
|
|
|
|
mlx4_to_hw_uar_index(dev, uar->index));
|
2015-05-31 13:30:16 +07:00
|
|
|
cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
|
2007-05-09 08:00:38 +07:00
|
|
|
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
|
|
|
|
|
|
|
|
mtt_addr = mlx4_mtt_addr(dev, mtt);
|
|
|
|
cq_context->mtt_base_addr_h = mtt_addr >> 32;
|
|
|
|
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
|
|
|
|
cq_context->db_rec_addr = cpu_to_be64(db_rec);
|
|
|
|
|
|
|
|
err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
if (err)
|
|
|
|
goto err_radix;
|
|
|
|
|
|
|
|
cq->cons_index = 0;
|
|
|
|
cq->arm_sn = 1;
|
|
|
|
cq->uar = uar;
|
2017-10-20 14:23:37 +07:00
|
|
|
refcount_set(&cq->refcount, 1);
|
2007-05-09 08:00:38 +07:00
|
|
|
init_completion(&cq->free);
|
net/mlx4_core: Use tasklet for user-space CQ completion events
Previously, we've fired all our completion callbacks straight from our ISR.
Some of those callbacks were lightweight (for example, mlx4_en's and
IPoIB napi callbacks), but some of them did more work (for example,
the user-space RDMA stack uverbs' completion handler). Besides that,
doing more than the minimal work in ISR is generally considered wrong,
it could even lead to a hard lockup of the system. Since when a lot
of completion events are generated by the hardware, the loop over those
events could be so long, that we'll get into a hard lockup by the system
watchdog.
In order to avoid that, add a new way of invoking completion events
callbacks. In the interrupt itself, we add the CQs which receive completion
event to a per-EQ list and schedule a tasklet. In the tasklet context
we loop over all the CQs in the list and invoke the user callback.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-11 15:57:53 +07:00
|
|
|
cq->comp = mlx4_add_cq_to_tasklet;
|
|
|
|
cq->tasklet_ctx.priv =
|
2015-05-31 13:30:16 +07:00
|
|
|
&priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
|
net/mlx4_core: Use tasklet for user-space CQ completion events
Previously, we've fired all our completion callbacks straight from our ISR.
Some of those callbacks were lightweight (for example, mlx4_en's and
IPoIB napi callbacks), but some of them did more work (for example,
the user-space RDMA stack uverbs' completion handler). Besides that,
doing more than the minimal work in ISR is generally considered wrong,
it could even lead to a hard lockup of the system. Since when a lot
of completion events are generated by the hardware, the loop over those
events could be so long, that we'll get into a hard lockup by the system
watchdog.
In order to avoid that, add a new way of invoking completion events
callbacks. In the interrupt itself, we add the CQs which receive completion
event to a per-EQ list and schedule a tasklet. In the tasklet context
we loop over all the CQs in the list and invoke the user callback.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-11 15:57:53 +07:00
|
|
|
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
|
|
|
|
|
2007-05-09 08:00:38 +07:00
|
|
|
|
2015-05-31 13:30:16 +07:00
|
|
|
cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
|
2007-05-09 08:00:38 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_radix:
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
spin_lock(&cq_table->lock);
|
2007-05-09 08:00:38 +07:00
|
|
|
radix_tree_delete(&cq_table->tree, cq->cqn);
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
spin_unlock(&cq_table->lock);
|
2007-05-09 08:00:38 +07:00
|
|
|
|
2011-12-13 11:13:36 +07:00
|
|
|
err_icm:
|
|
|
|
mlx4_cq_free_icm(dev, cq->cqn);
|
2007-05-09 08:00:38 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
|
|
|
|
|
|
|
|
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
|
|
|
|
{
|
|
|
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
|
struct mlx4_cq_table *cq_table = &priv->cq_table;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
|
|
|
|
if (err)
|
|
|
|
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
|
|
|
|
|
net/mlx4_core: Fix racy CQ (Completion Queue) free
In function mlx4_cq_completion() and mlx4_cq_event(), the
radix_tree_lookup requires a rcu_read_lock.
This is mandatory: if another core frees the CQ, it could
run the radix_tree_node_rcu_free() call_rcu() callback while
its being used by the radix tree lookup function.
Additionally, in function mlx4_cq_event(), since we are adding
the rcu lock around the radix-tree lookup, we no longer need to take
the spinlock. Also, the synchronize_irq() call for the async event
eliminates the need for incrementing the cq reference count in
mlx4_cq_event().
Other changes:
1. In function mlx4_cq_free(), replace spin_lock_irq with spin_lock:
we no longer take this spinlock in the interrupt context.
The spinlock here, therefore, simply protects against different
threads simultaneously invoking mlx4_cq_free() for different cq's.
2. In function mlx4_cq_free(), we move the radix tree delete to before
the synchronize_irq() calls. This guarantees that we will not
access this cq during any subsequent interrupts, and therefore can
safely free the CQ after the synchronize_irq calls. The rcu_read_lock
in the interrupt handlers only needs to protect against corrupting the
radix tree; the interrupt handlers may access the cq outside the
rcu_read_lock due to the synchronize_irq calls which protect against
premature freeing of the cq.
3. In function mlx4_cq_event(), we change the mlx_warn message to mlx4_dbg.
4. We leave the cq reference count mechanism in place, because it is
still needed for the cq completion tasklet mechanism.
Fixes: 6d90aa5cf17b ("net/mlx4_core: Make sure there are no pending async events when freeing CQ")
Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 23:31:37 +07:00
|
|
|
spin_lock(&cq_table->lock);
|
|
|
|
radix_tree_delete(&cq_table->tree, cq->cqn);
|
|
|
|
spin_unlock(&cq_table->lock);
|
|
|
|
|
2015-05-31 13:30:16 +07:00
|
|
|
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
|
2015-05-31 13:30:18 +07:00
|
|
|
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
|
|
|
|
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
|
|
|
|
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
|
2007-05-09 08:00:38 +07:00
|
|
|
|
2017-10-20 14:23:37 +07:00
|
|
|
if (refcount_dec_and_test(&cq->refcount))
|
2007-05-09 08:00:38 +07:00
|
|
|
complete(&cq->free);
|
|
|
|
wait_for_completion(&cq->free);
|
|
|
|
|
2011-12-13 11:13:36 +07:00
|
|
|
mlx4_cq_free_icm(dev, cq->cqn);
|
2007-05-09 08:00:38 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mlx4_cq_free);
|
|
|
|
|
2007-10-11 05:43:54 +07:00
|
|
|
int mlx4_init_cq_table(struct mlx4_dev *dev)
|
2007-05-09 08:00:38 +07:00
|
|
|
{
|
|
|
|
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
spin_lock_init(&cq_table->lock);
|
|
|
|
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
|
2011-12-13 11:13:36 +07:00
|
|
|
if (mlx4_is_slave(dev))
|
|
|
|
return 0;
|
2007-05-09 08:00:38 +07:00
|
|
|
|
|
|
|
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
|
2008-10-23 00:25:29 +07:00
|
|
|
dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
|
2007-05-09 08:00:38 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
|
|
|
|
{
|
2011-12-13 11:13:36 +07:00
|
|
|
if (mlx4_is_slave(dev))
|
|
|
|
return;
|
2007-05-09 08:00:38 +07:00
|
|
|
/* Nothing to do to clean up radix_tree */
|
|
|
|
mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
|
|
|
|
}
|