dmaengine: core: Use dev_ functions for debug and error prints

According to dmaengine kerneldoc the struct dma_chan has always a non-NULL
pointer to DMA device and a test in dma_async_device_register()
validates that DMA device must also point to struct device.

All pr_ prints except one in dma_channel_table_init() have valid DMA
channel or DMA device pointer available which allow convert them to use
dev_ functions and thus able to show the associated DMA device.

Signed-off-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Jarkko Nikula 2016-03-14 16:51:09 +02:00 committed by Vinod Koul
parent f55532a0c0
commit ef859312c3

View File

@ -289,7 +289,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
do {
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
pr_err("%s: timeout!\n", __func__);
dev_err(chan->device->dev, "%s: timeout!\n", __func__);
return DMA_ERROR;
}
if (status != DMA_IN_PROGRESS)
@ -518,7 +518,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
struct dma_chan *chan;
if (mask && !__dma_device_satisfies_mask(dev, mask)) {
pr_debug("%s: wrong capabilities\n", __func__);
dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
return NULL;
}
/* devices with multiple channels need special handling as we need to
@ -533,12 +533,12 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
list_for_each_entry(chan, &dev->channels, device_node) {
if (chan->client_count) {
pr_debug("%s: %s busy\n",
dev_dbg(dev->dev, "%s: %s busy\n",
__func__, dma_chan_name(chan));
continue;
}
if (fn && !fn(chan, fn_param)) {
pr_debug("%s: %s filter said false\n",
dev_dbg(dev->dev, "%s: %s filter said false\n",
__func__, dma_chan_name(chan));
continue;
}
@ -567,11 +567,12 @@ static struct dma_chan *find_candidate(struct dma_device *device,
if (err) {
if (err == -ENODEV) {
pr_debug("%s: %s module removed\n", __func__,
dma_chan_name(chan));
dev_dbg(device->dev, "%s: %s module removed\n",
__func__, dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else
pr_debug("%s: failed to get %s: (%d)\n",
dev_dbg(device->dev,
"%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
if (--device->privatecnt == 0)
@ -602,7 +603,8 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
device->privatecnt++;
err = dma_chan_get(chan);
if (err) {
pr_debug("%s: failed to get %s: (%d)\n",
dev_dbg(chan->device->dev,
"%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
chan = NULL;
if (--device->privatecnt == 0)
@ -662,7 +664,7 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
}
mutex_unlock(&dma_list_mutex);
pr_debug("%s: %s (%s)\n",
dev_dbg(chan->device->dev, "%s: %s (%s)\n",
__func__,
chan ? "success" : "fail",
chan ? dma_chan_name(chan) : NULL);
@ -814,8 +816,9 @@ void dmaengine_get(void)
list_del_rcu(&device->global_node);
break;
} else if (err)
pr_debug("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
dev_dbg(chan->device->dev,
"%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
}
}
@ -1222,8 +1225,9 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
while (tx->cookie == -EBUSY) {
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
pr_err("%s timeout waiting for descriptor submission\n",
__func__);
dev_err(tx->chan->device->dev,
"%s timeout waiting for descriptor submission\n",
__func__);
return DMA_ERROR;
}
cpu_relax();