mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 09:25:45 +07:00
09dc9cd652
The code produces the following trace: [1750924.419007] general protection fault: 0000 [#3] SMP [1750924.420364] Modules linked in: nfnetlink autofs4 rpcsec_gss_krb5 nfsv4 dcdbas rfcomm bnep bluetooth nfsd auth_rpcgss nfs_acl dm_multipath nfs lockd scsi_dh sunrpc fscache radeon ttm drm_kms_helper drm serio_raw parport_pc ppdev i2c_algo_bit lpc_ich ipmi_si ib_mthca ib_qib dca lp parport ib_ipoib mac_hid ib_cm i3000_edac ib_sa ib_uverbs edac_core ib_umad ib_mad ib_core ib_addr tg3 ptp dm_mirror dm_region_hash dm_log psmouse pps_core [1750924.420364] CPU: 1 PID: 8401 Comm: python Tainted: G D 3.13.0-39-generic #66-Ubuntu [1750924.420364] Hardware name: Dell Computer Corporation PowerEdge 860/0XM089, BIOS A04 07/24/2007 [1750924.420364] task: ffff8800366a9800 ti: ffff88007af1c000 task.ti: ffff88007af1c000 [1750924.420364] RIP: 0010:[<ffffffffa0131d51>] [<ffffffffa0131d51>] qib_mcast_qp_free+0x11/0x50 [ib_qib] [1750924.420364] RSP: 0018:ffff88007af1dd70 EFLAGS: 00010246 [1750924.420364] RAX: 0000000000000001 RBX: ffff88007b822688 RCX: 000000000000000f [1750924.420364] RDX: ffff88007b822688 RSI: ffff8800366c15a0 RDI: 6764697200000000 [1750924.420364] RBP: ffff88007af1dd78 R08: 0000000000000001 R09: 0000000000000000 [1750924.420364] R10: 0000000000000011 R11: 0000000000000246 R12: ffff88007baa1d98 [1750924.420364] R13: ffff88003ecab000 R14: ffff88007b822660 R15: 0000000000000000 [1750924.420364] FS: 00007ffff7fd8740(0000) GS:ffff88007fc80000(0000) knlGS:0000000000000000 [1750924.420364] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [1750924.420364] CR2: 00007ffff597c750 CR3: 000000006860b000 CR4: 00000000000007e0 [1750924.420364] Stack: [1750924.420364] ffff88007b822688 ffff88007af1ddf0 ffffffffa0132429 000000007af1de20 [1750924.420364] ffff88007baa1dc8 ffff88007baa0000 ffff88007af1de70 ffffffffa00cb313 [1750924.420364] 00007fffffffde88 0000000000000000 0000000000000008 ffff88003ecab000 [1750924.420364] Call Trace: [1750924.420364] [<ffffffffa0132429>] qib_multicast_detach+0x1e9/0x350 [ib_qib] [1750924.568035] [<ffffffffa00cb313>] ? ib_uverbs_modify_qp+0x323/0x3d0 [ib_uverbs] [1750924.568035] [<ffffffffa0092d61>] ib_detach_mcast+0x31/0x50 [ib_core] [1750924.568035] [<ffffffffa00cc213>] ib_uverbs_detach_mcast+0x93/0x170 [ib_uverbs] [1750924.568035] [<ffffffffa00c61f6>] ib_uverbs_write+0xc6/0x2c0 [ib_uverbs] [1750924.568035] [<ffffffff81312e68>] ? apparmor_file_permission+0x18/0x20 [1750924.568035] [<ffffffff812d4cd3>] ? security_file_permission+0x23/0xa0 [1750924.568035] [<ffffffff811bd214>] vfs_write+0xb4/0x1f0 [1750924.568035] [<ffffffff811bdc49>] SyS_write+0x49/0xa0 [1750924.568035] [<ffffffff8172f7ed>] system_call_fastpath+0x1a/0x1f [1750924.568035] Code: 66 2e 0f 1f 84 00 00 00 00 00 31 c0 5d c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 0f 1f 44 00 00 55 48 89 e5 53 48 89 fb 48 8b 7f 10 <f0> ff 8f 40 01 00 00 74 0e 48 89 df e8 8e f8 06 e1 5b 5d c3 0f [1750924.568035] RIP [<ffffffffa0131d51>] qib_mcast_qp_free+0x11/0x50 [ib_qib] [1750924.568035] RSP <ffff88007af1dd70> [1750924.650439] ---[ end trace 73d5d4b3f8ad4851 ] The fix is to note the qib_mcast_qp that was found. If none is found, then return EINVAL indicating the error. Cc: <stable@vger.kernel.org> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Reported-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
364 lines
8.4 KiB
C
364 lines
8.4 KiB
C
/*
|
|
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
|
|
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
|
*
|
|
* This software is available to you under a choice of one of two
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
* General Public License (GPL) Version 2, available from the file
|
|
* COPYING in the main directory of this source tree, or the
|
|
* OpenIB.org BSD license below:
|
|
*
|
|
* Redistribution and use in source and binary forms, with or
|
|
* without modification, are permitted provided that the following
|
|
* conditions are met:
|
|
*
|
|
* - Redistributions of source code must retain the above
|
|
* copyright notice, this list of conditions and the following
|
|
* disclaimer.
|
|
*
|
|
* - Redistributions in binary form must reproduce the above
|
|
* copyright notice, this list of conditions and the following
|
|
* disclaimer in the documentation and/or other materials
|
|
* provided with the distribution.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
* SOFTWARE.
|
|
*/
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include "qib.h"
|
|
|
|
/**
|
|
* qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
|
|
* @qp: the QP to link
|
|
*/
|
|
static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
|
|
{
|
|
struct qib_mcast_qp *mqp;
|
|
|
|
mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
|
|
if (!mqp)
|
|
goto bail;
|
|
|
|
mqp->qp = qp;
|
|
atomic_inc(&qp->refcount);
|
|
|
|
bail:
|
|
return mqp;
|
|
}
|
|
|
|
static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
|
|
{
|
|
struct qib_qp *qp = mqp->qp;
|
|
|
|
/* Notify qib_destroy_qp() if it is waiting. */
|
|
if (atomic_dec_and_test(&qp->refcount))
|
|
wake_up(&qp->wait);
|
|
|
|
kfree(mqp);
|
|
}
|
|
|
|
/**
|
|
* qib_mcast_alloc - allocate the multicast GID structure
|
|
* @mgid: the multicast GID
|
|
*
|
|
* A list of QPs will be attached to this structure.
|
|
*/
|
|
static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
|
|
{
|
|
struct qib_mcast *mcast;
|
|
|
|
mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
|
|
if (!mcast)
|
|
goto bail;
|
|
|
|
mcast->mgid = *mgid;
|
|
INIT_LIST_HEAD(&mcast->qp_list);
|
|
init_waitqueue_head(&mcast->wait);
|
|
atomic_set(&mcast->refcount, 0);
|
|
mcast->n_attached = 0;
|
|
|
|
bail:
|
|
return mcast;
|
|
}
|
|
|
|
static void qib_mcast_free(struct qib_mcast *mcast)
|
|
{
|
|
struct qib_mcast_qp *p, *tmp;
|
|
|
|
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
|
|
qib_mcast_qp_free(p);
|
|
|
|
kfree(mcast);
|
|
}
|
|
|
|
/**
|
|
* qib_mcast_find - search the global table for the given multicast GID
|
|
* @ibp: the IB port structure
|
|
* @mgid: the multicast GID to search for
|
|
*
|
|
* Returns NULL if not found.
|
|
*
|
|
* The caller is responsible for decrementing the reference count if found.
|
|
*/
|
|
struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
|
|
{
|
|
struct rb_node *n;
|
|
unsigned long flags;
|
|
struct qib_mcast *mcast;
|
|
|
|
spin_lock_irqsave(&ibp->lock, flags);
|
|
n = ibp->mcast_tree.rb_node;
|
|
while (n) {
|
|
int ret;
|
|
|
|
mcast = rb_entry(n, struct qib_mcast, rb_node);
|
|
|
|
ret = memcmp(mgid->raw, mcast->mgid.raw,
|
|
sizeof(union ib_gid));
|
|
if (ret < 0)
|
|
n = n->rb_left;
|
|
else if (ret > 0)
|
|
n = n->rb_right;
|
|
else {
|
|
atomic_inc(&mcast->refcount);
|
|
spin_unlock_irqrestore(&ibp->lock, flags);
|
|
goto bail;
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&ibp->lock, flags);
|
|
|
|
mcast = NULL;
|
|
|
|
bail:
|
|
return mcast;
|
|
}
|
|
|
|
/**
|
|
* qib_mcast_add - insert mcast GID into table and attach QP struct
|
|
* @mcast: the mcast GID table
|
|
* @mqp: the QP to attach
|
|
*
|
|
* Return zero if both were added. Return EEXIST if the GID was already in
|
|
* the table but the QP was added. Return ESRCH if the QP was already
|
|
* attached and neither structure was added.
|
|
*/
|
|
static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
|
|
struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
|
|
{
|
|
struct rb_node **n = &ibp->mcast_tree.rb_node;
|
|
struct rb_node *pn = NULL;
|
|
int ret;
|
|
|
|
spin_lock_irq(&ibp->lock);
|
|
|
|
while (*n) {
|
|
struct qib_mcast *tmcast;
|
|
struct qib_mcast_qp *p;
|
|
|
|
pn = *n;
|
|
tmcast = rb_entry(pn, struct qib_mcast, rb_node);
|
|
|
|
ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
|
|
sizeof(union ib_gid));
|
|
if (ret < 0) {
|
|
n = &pn->rb_left;
|
|
continue;
|
|
}
|
|
if (ret > 0) {
|
|
n = &pn->rb_right;
|
|
continue;
|
|
}
|
|
|
|
/* Search the QP list to see if this is already there. */
|
|
list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
|
|
if (p->qp == mqp->qp) {
|
|
ret = ESRCH;
|
|
goto bail;
|
|
}
|
|
}
|
|
if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
|
|
ret = ENOMEM;
|
|
goto bail;
|
|
}
|
|
|
|
tmcast->n_attached++;
|
|
|
|
list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
|
|
ret = EEXIST;
|
|
goto bail;
|
|
}
|
|
|
|
spin_lock(&dev->n_mcast_grps_lock);
|
|
if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
|
|
spin_unlock(&dev->n_mcast_grps_lock);
|
|
ret = ENOMEM;
|
|
goto bail;
|
|
}
|
|
|
|
dev->n_mcast_grps_allocated++;
|
|
spin_unlock(&dev->n_mcast_grps_lock);
|
|
|
|
mcast->n_attached++;
|
|
|
|
list_add_tail_rcu(&mqp->list, &mcast->qp_list);
|
|
|
|
atomic_inc(&mcast->refcount);
|
|
rb_link_node(&mcast->rb_node, pn, n);
|
|
rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
|
|
|
|
ret = 0;
|
|
|
|
bail:
|
|
spin_unlock_irq(&ibp->lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
{
|
|
struct qib_qp *qp = to_iqp(ibqp);
|
|
struct qib_ibdev *dev = to_idev(ibqp->device);
|
|
struct qib_ibport *ibp;
|
|
struct qib_mcast *mcast;
|
|
struct qib_mcast_qp *mqp;
|
|
int ret;
|
|
|
|
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
|
|
ret = -EINVAL;
|
|
goto bail;
|
|
}
|
|
|
|
/*
|
|
* Allocate data structures since its better to do this outside of
|
|
* spin locks and it will most likely be needed.
|
|
*/
|
|
mcast = qib_mcast_alloc(gid);
|
|
if (mcast == NULL) {
|
|
ret = -ENOMEM;
|
|
goto bail;
|
|
}
|
|
mqp = qib_mcast_qp_alloc(qp);
|
|
if (mqp == NULL) {
|
|
qib_mcast_free(mcast);
|
|
ret = -ENOMEM;
|
|
goto bail;
|
|
}
|
|
ibp = to_iport(ibqp->device, qp->port_num);
|
|
switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
|
|
case ESRCH:
|
|
/* Neither was used: OK to attach the same QP twice. */
|
|
qib_mcast_qp_free(mqp);
|
|
qib_mcast_free(mcast);
|
|
break;
|
|
|
|
case EEXIST: /* The mcast wasn't used */
|
|
qib_mcast_free(mcast);
|
|
break;
|
|
|
|
case ENOMEM:
|
|
/* Exceeded the maximum number of mcast groups. */
|
|
qib_mcast_qp_free(mqp);
|
|
qib_mcast_free(mcast);
|
|
ret = -ENOMEM;
|
|
goto bail;
|
|
|
|
default:
|
|
break;
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
bail:
|
|
return ret;
|
|
}
|
|
|
|
int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
{
|
|
struct qib_qp *qp = to_iqp(ibqp);
|
|
struct qib_ibdev *dev = to_idev(ibqp->device);
|
|
struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
|
|
struct qib_mcast *mcast = NULL;
|
|
struct qib_mcast_qp *p, *tmp, *delp = NULL;
|
|
struct rb_node *n;
|
|
int last = 0;
|
|
int ret;
|
|
|
|
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
|
|
return -EINVAL;
|
|
|
|
spin_lock_irq(&ibp->lock);
|
|
|
|
/* Find the GID in the mcast table. */
|
|
n = ibp->mcast_tree.rb_node;
|
|
while (1) {
|
|
if (n == NULL) {
|
|
spin_unlock_irq(&ibp->lock);
|
|
return -EINVAL;
|
|
}
|
|
|
|
mcast = rb_entry(n, struct qib_mcast, rb_node);
|
|
ret = memcmp(gid->raw, mcast->mgid.raw,
|
|
sizeof(union ib_gid));
|
|
if (ret < 0)
|
|
n = n->rb_left;
|
|
else if (ret > 0)
|
|
n = n->rb_right;
|
|
else
|
|
break;
|
|
}
|
|
|
|
/* Search the QP list. */
|
|
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
|
|
if (p->qp != qp)
|
|
continue;
|
|
/*
|
|
* We found it, so remove it, but don't poison the forward
|
|
* link until we are sure there are no list walkers.
|
|
*/
|
|
list_del_rcu(&p->list);
|
|
mcast->n_attached--;
|
|
delp = p;
|
|
|
|
/* If this was the last attached QP, remove the GID too. */
|
|
if (list_empty(&mcast->qp_list)) {
|
|
rb_erase(&mcast->rb_node, &ibp->mcast_tree);
|
|
last = 1;
|
|
}
|
|
break;
|
|
}
|
|
|
|
spin_unlock_irq(&ibp->lock);
|
|
/* QP not attached */
|
|
if (!delp)
|
|
return -EINVAL;
|
|
/*
|
|
* Wait for any list walkers to finish before freeing the
|
|
* list element.
|
|
*/
|
|
wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
|
|
qib_mcast_qp_free(delp);
|
|
|
|
if (last) {
|
|
atomic_dec(&mcast->refcount);
|
|
wait_event(mcast->wait, !atomic_read(&mcast->refcount));
|
|
qib_mcast_free(mcast);
|
|
spin_lock_irq(&dev->n_mcast_grps_lock);
|
|
dev->n_mcast_grps_allocated--;
|
|
spin_unlock_irq(&dev->n_mcast_grps_lock);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
int qib_mcast_tree_empty(struct qib_ibport *ibp)
|
|
{
|
|
return ibp->mcast_tree.rb_node == NULL;
|
|
}
|