staging: rdma: hfi1: remove unnecessary out of memory messages

Out of memory messages are unnecssary in the drivers as they are
reported by memory management.

Addresses checkpatch.pl: WARNING: Possible unnecessary 'out of memory' message

Signed-off-by: Alison Schofield <amsfield22@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Alison Schofield 2015-10-12 14:28:36 -07:00 committed by Greg Kroah-Hartman
parent be036bbe2c
commit 806e6e1bec
4 changed files with 10 additions and 36 deletions

View File

@ -8991,7 +8991,6 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
if (!entries) {
dd_dev_err(dd, "cannot allocate msix table\n");
ret = -ENOMEM;
goto fail;
}

View File

@ -134,11 +134,8 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
dd->assigned_node_id = local_node_id;
dd->rcd = kcalloc(dd->num_rcv_contexts, sizeof(*dd->rcd), GFP_KERNEL);
if (!dd->rcd) {
dd_dev_err(dd,
"Unable to allocate receive context array, failing\n");
if (!dd->rcd)
goto nomem;
}
/* create one or more kernel contexts */
for (i = 0; i < dd->first_user_ctxt; ++i) {
@ -320,12 +317,8 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt)
if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
rcd->opstats = kzalloc(sizeof(*rcd->opstats),
GFP_KERNEL);
if (!rcd->opstats) {
dd_dev_err(dd,
"ctxt%u: Unable to allocate per ctxt stats buffer\n",
rcd->ctxt);
if (!rcd->opstats)
goto bail;
}
}
}
return rcd;

View File

@ -435,7 +435,6 @@ int init_send_contexts(struct hfi1_devdata *dd)
sizeof(struct send_context_info),
GFP_KERNEL);
if (!dd->send_contexts || !dd->hw_to_sw) {
dd_dev_err(dd, "Unable to allocate send context arrays\n");
kfree(dd->hw_to_sw);
kfree(dd->send_contexts);
free_credit_return(dd);
@ -684,10 +683,8 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
return NULL;
sc = kzalloc_node(sizeof(struct send_context), GFP_KERNEL, numa);
if (!sc) {
dd_dev_err(dd, "Cannot allocate send context structure\n");
if (!sc)
return NULL;
}
spin_lock_irqsave(&dd->sc_lock, flags);
ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
@ -813,8 +810,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) *
sc->sr_size, GFP_KERNEL, numa);
if (!sc->sr) {
dd_dev_err(dd,
"Cannot allocate send context shadow ring structure\n");
sc_free(sc);
return NULL;
}

View File

@ -378,20 +378,14 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
dd = uctxt->dd;
pq = kzalloc(sizeof(*pq), GFP_KERNEL);
if (!pq) {
dd_dev_err(dd,
"[%u:%u] Failed to allocate SDMA request struct\n",
uctxt->ctxt, subctxt_fp(fp));
if (!pq)
goto pq_nomem;
}
memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size;
pq->reqs = kmalloc(memsize, GFP_KERNEL);
if (!pq->reqs) {
dd_dev_err(dd,
"[%u:%u] Failed to allocate SDMA request queue (%u)\n",
uctxt->ctxt, subctxt_fp(fp), memsize);
if (!pq->reqs)
goto pq_reqs_nomem;
}
INIT_LIST_HEAD(&pq->list);
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
@ -417,22 +411,15 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
}
user_sdma_pkt_fp(fp) = pq;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
dd_dev_err(dd,
"[%u:%u] Failed to allocate SDMA completion queue\n",
uctxt->ctxt, subctxt_fp(fp));
if (!cq)
goto cq_nomem;
}
memsize = ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size,
PAGE_SIZE);
cq->comps = vmalloc_user(memsize);
if (!cq->comps) {
dd_dev_err(dd,
"[%u:%u] Failed to allocate SDMA completion queue entries\n",
uctxt->ctxt, subctxt_fp(fp));
if (!cq->comps)
goto cq_comps_nomem;
}
cq->nentries = hfi1_sdma_comp_ring_size;
user_sdma_comp_fp(fp) = cq;