mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 07:36:44 +07:00
RDMA/vmw_pvrdma: Avoid use after free due to QP/CQ/SRQ destroy
The use of wait queues in vmw_pvrdma for handling concurrent
access to a resource leaves a race condition which can cause a use
after free bug.
Fix this by using the pattern from other drivers, complete() protected by
dec_and_test to ensure complete() is called only once.
Fixes: 29c8d9eba5
("IB: Add vmw_pvrdma driver")
Signed-off-by: Bryan Tan <bryantan@vmware.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
30a366a9da
commit
e3524b269e
@ -94,7 +94,7 @@ struct pvrdma_cq {
|
||||
u32 cq_handle;
|
||||
bool is_kernel;
|
||||
atomic_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct pvrdma_id_table {
|
||||
@ -175,7 +175,7 @@ struct pvrdma_srq {
|
||||
u32 srq_handle;
|
||||
int npages;
|
||||
refcount_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct pvrdma_qp {
|
||||
@ -197,7 +197,7 @@ struct pvrdma_qp {
|
||||
bool is_kernel;
|
||||
struct mutex mutex; /* QP state mutex. */
|
||||
atomic_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct pvrdma_dev {
|
||||
|
@ -179,7 +179,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
||||
pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
|
||||
|
||||
atomic_set(&cq->refcnt, 1);
|
||||
init_waitqueue_head(&cq->wait);
|
||||
init_completion(&cq->free);
|
||||
spin_lock_init(&cq->cq_lock);
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
@ -230,8 +230,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
||||
|
||||
static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
|
||||
{
|
||||
atomic_dec(&cq->refcnt);
|
||||
wait_event(cq->wait, !atomic_read(&cq->refcnt));
|
||||
if (atomic_dec_and_test(&cq->refcnt))
|
||||
complete(&cq->free);
|
||||
wait_for_completion(&cq->free);
|
||||
|
||||
if (!cq->is_kernel)
|
||||
ib_umem_release(cq->umem);
|
||||
|
@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
|
||||
ibqp->event_handler(&e, ibqp->qp_context);
|
||||
}
|
||||
if (qp) {
|
||||
atomic_dec(&qp->refcnt);
|
||||
if (atomic_read(&qp->refcnt) == 0)
|
||||
wake_up(&qp->wait);
|
||||
if (atomic_dec_and_test(&qp->refcnt))
|
||||
complete(&qp->free);
|
||||
}
|
||||
}
|
||||
|
||||
@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
|
||||
ibcq->event_handler(&e, ibcq->cq_context);
|
||||
}
|
||||
if (cq) {
|
||||
atomic_dec(&cq->refcnt);
|
||||
if (atomic_read(&cq->refcnt) == 0)
|
||||
wake_up(&cq->wait);
|
||||
if (atomic_dec_and_test(&cq->refcnt))
|
||||
complete(&cq->free);
|
||||
}
|
||||
}
|
||||
|
||||
@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
|
||||
}
|
||||
if (srq) {
|
||||
if (refcount_dec_and_test(&srq->refcnt))
|
||||
wake_up(&srq->wait);
|
||||
complete(&srq->free);
|
||||
}
|
||||
}
|
||||
|
||||
@ -539,9 +537,8 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
|
||||
if (cq && cq->ibcq.comp_handler)
|
||||
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
|
||||
if (cq) {
|
||||
atomic_dec(&cq->refcnt);
|
||||
if (atomic_read(&cq->refcnt))
|
||||
wake_up(&cq->wait);
|
||||
if (atomic_dec_and_test(&cq->refcnt))
|
||||
complete(&cq->free);
|
||||
}
|
||||
pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
spin_lock_init(&qp->rq.lock);
|
||||
mutex_init(&qp->mutex);
|
||||
atomic_set(&qp->refcnt, 1);
|
||||
init_waitqueue_head(&qp->wait);
|
||||
init_completion(&qp->free);
|
||||
|
||||
qp->state = IB_QPS_RESET;
|
||||
|
||||
@ -428,8 +428,9 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
|
||||
|
||||
pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
|
||||
|
||||
atomic_dec(&qp->refcnt);
|
||||
wait_event(qp->wait, !atomic_read(&qp->refcnt));
|
||||
if (atomic_dec_and_test(&qp->refcnt))
|
||||
complete(&qp->free);
|
||||
wait_for_completion(&qp->free);
|
||||
|
||||
if (!qp->is_kernel) {
|
||||
if (qp->rumem)
|
||||
|
@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
|
||||
|
||||
spin_lock_init(&srq->lock);
|
||||
refcount_set(&srq->refcnt, 1);
|
||||
init_waitqueue_head(&srq->wait);
|
||||
init_completion(&srq->free);
|
||||
|
||||
dev_dbg(&dev->pdev->dev,
|
||||
"create shared receive queue from user space\n");
|
||||
@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
|
||||
dev->srq_tbl[srq->srq_handle] = NULL;
|
||||
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
|
||||
|
||||
if (!refcount_dec_and_test(&srq->refcnt))
|
||||
wait_event(srq->wait, !refcount_read(&srq->refcnt));
|
||||
if (refcount_dec_and_test(&srq->refcnt))
|
||||
complete(&srq->free);
|
||||
wait_for_completion(&srq->free);
|
||||
|
||||
/* There is no support for kernel clients, so this is safe. */
|
||||
ib_umem_release(srq->umem);
|
||||
|
Loading…
Reference in New Issue
Block a user