2019-01-07 18:07:41 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2008-07-09 01:59:42 +07:00
|
|
|
/*
|
2012-10-18 21:34:08 +07:00
|
|
|
* Core driver for the Synopsys DesignWare DMA Controller
|
2008-07-09 01:59:42 +07:00
|
|
|
*
|
|
|
|
* Copyright (C) 2007-2008 Atmel Corporation
|
2011-05-24 15:34:09 +07:00
|
|
|
* Copyright (C) 2010-2011 ST Microelectronics
|
2013-06-05 19:26:45 +07:00
|
|
|
* Copyright (C) 2013 Intel Corporation
|
2008-07-09 01:59:42 +07:00
|
|
|
*/
|
2012-10-18 21:34:08 +07:00
|
|
|
|
2012-02-01 17:42:26 +07:00
|
|
|
#include <linux/bitops.h>
|
2008-07-09 01:59:42 +07:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2013-01-16 20:48:50 +07:00
|
|
|
#include <linux/dmapool.h>
|
2013-01-21 17:09:00 +07:00
|
|
|
#include <linux/err.h>
|
2008-07-09 01:59:42 +07:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
2014-11-05 23:34:48 +07:00
|
|
|
#include <linux/pm_runtime.h>
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2013-06-05 19:26:44 +07:00
|
|
|
#include "../dmaengine.h"
|
2013-06-05 19:26:45 +07:00
|
|
|
#include "internal.h"
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This supports the Synopsys "DesignWare AHB Central DMA Controller",
|
|
|
|
* (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
|
|
|
|
* of which use ARM any more). See the "Databook" from Synopsys for
|
|
|
|
* information beyond what licensees probably provide.
|
|
|
|
*
|
2014-02-12 16:16:17 +07:00
|
|
|
* The driver has been tested with the Atmel AT32AP7000, which does not
|
|
|
|
* support descriptor writeback.
|
2008-07-09 01:59:42 +07:00
|
|
|
*/
|
|
|
|
|
2015-01-02 21:17:24 +07:00
|
|
|
/* The set of bus widths supported by the DMA controller */
|
|
|
|
#define DW_DMA_BUSWIDTHS \
|
|
|
|
BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
|
|
|
|
BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
|
|
|
|
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
|
|
|
|
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
|
2009-01-07 01:38:21 +07:00
|
|
|
static struct device *chan2dev(struct dma_chan *chan)
|
|
|
|
{
|
|
|
|
return &chan->dev->device;
|
|
|
|
}
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
|
|
|
|
{
|
2012-10-18 21:34:12 +07:00
|
|
|
return to_dw_desc(dwc->active_list.next);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
2016-04-14 23:11:01 +07:00
|
|
|
static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
|
2008-07-09 01:59:42 +07:00
|
|
|
{
|
2016-04-14 23:11:01 +07:00
|
|
|
struct dw_desc *desc = txd_to_dw_desc(tx);
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
|
|
|
|
dma_cookie_t cookie;
|
|
|
|
unsigned long flags;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
2016-04-14 23:11:01 +07:00
|
|
|
cookie = dma_cookie_assign(tx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* REVISIT: We should attempt to chain as many descriptors as
|
|
|
|
* possible, perhaps even appending to those already submitted
|
|
|
|
* for DMA. But this is hard to do in a race-free manner.
|
|
|
|
*/
|
|
|
|
|
|
|
|
list_add_tail(&desc->desc_node, &dwc->queue);
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2016-04-14 23:11:01 +07:00
|
|
|
dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
|
|
|
|
__func__, desc->txd.cookie);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2016-04-14 23:11:01 +07:00
|
|
|
return cookie;
|
|
|
|
}
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2016-04-14 23:11:01 +07:00
|
|
|
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
|
|
|
|
{
|
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
|
|
|
struct dw_desc *desc;
|
|
|
|
dma_addr_t phys;
|
|
|
|
|
|
|
|
desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
|
|
|
|
if (!desc)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
dwc->descs_allocated++;
|
|
|
|
INIT_LIST_HEAD(&desc->tx_list);
|
|
|
|
dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
|
|
|
|
desc->txd.tx_submit = dwc_tx_submit;
|
|
|
|
desc->txd.flags = DMA_CTRL_ACK;
|
|
|
|
desc->txd.phys = phys;
|
|
|
|
return desc;
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|
|
|
{
|
2016-04-14 23:11:01 +07:00
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
|
|
|
struct dw_desc *child, *_next;
|
2011-04-15 17:33:35 +07:00
|
|
|
|
2016-04-14 23:11:01 +07:00
|
|
|
if (unlikely(!desc))
|
|
|
|
return;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2016-04-14 23:11:01 +07:00
|
|
|
list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
|
|
|
|
list_del(&child->desc_node);
|
|
|
|
dma_pool_free(dw->desc_pool, child, child->txd.phys);
|
|
|
|
dwc->descs_allocated--;
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
2016-04-14 23:11:01 +07:00
|
|
|
|
|
|
|
dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
|
|
|
|
dwc->descs_allocated--;
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
2017-01-17 18:57:31 +07:00
|
|
|
static void dwc_initialize(struct dw_dma_chan *dwc)
|
|
|
|
{
|
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
|
|
|
|
|
|
|
if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
|
|
|
|
return;
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
dw->initialize_chan(dwc);
|
2011-11-17 17:31:29 +07:00
|
|
|
|
|
|
|
/* Enable interrupts */
|
|
|
|
channel_set_bit(dw, MASK.XFER, dwc->mask);
|
|
|
|
channel_set_bit(dw, MASK.ERROR, dwc->mask);
|
|
|
|
|
2016-03-18 21:24:52 +07:00
|
|
|
set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
|
2011-11-17 17:31:29 +07:00
|
|
|
}
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
|
2012-09-21 19:05:44 +07:00
|
|
|
static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
|
2012-06-19 17:34:03 +07:00
|
|
|
{
|
|
|
|
dev_err(chan2dev(&dwc->chan),
|
|
|
|
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
|
|
|
|
channel_readl(dwc, SAR),
|
|
|
|
channel_readl(dwc, DAR),
|
|
|
|
channel_readl(dwc, LLP),
|
|
|
|
channel_readl(dwc, CTL_HI),
|
|
|
|
channel_readl(dwc, CTL_LO));
|
|
|
|
}
|
|
|
|
|
2012-06-19 17:46:32 +07:00
|
|
|
static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
|
{
|
|
|
|
channel_clear_bit(dw, CH_EN, dwc->mask);
|
|
|
|
while (dma_readl(dw, CH_EN) & dwc->mask)
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
|
2012-06-19 17:34:03 +07:00
|
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
|
2012-09-21 19:05:49 +07:00
|
|
|
/* Perform single block transfer */
|
|
|
|
static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
|
|
|
|
struct dw_desc *desc)
|
|
|
|
{
|
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
|
|
|
u32 ctllo;
|
|
|
|
|
2014-01-13 19:04:48 +07:00
|
|
|
/*
|
|
|
|
* Software emulation of LLP mode relies on interrupts to continue
|
|
|
|
* multi block transfer.
|
|
|
|
*/
|
2016-03-18 21:24:43 +07:00
|
|
|
ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
|
2012-09-21 19:05:49 +07:00
|
|
|
|
2016-03-18 21:24:43 +07:00
|
|
|
channel_writel(dwc, SAR, lli_read(desc, sar));
|
|
|
|
channel_writel(dwc, DAR, lli_read(desc, dar));
|
2012-09-21 19:05:49 +07:00
|
|
|
channel_writel(dwc, CTL_LO, ctllo);
|
2016-03-18 21:24:43 +07:00
|
|
|
channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
|
2012-09-21 19:05:49 +07:00
|
|
|
channel_set_bit(dw, CH_EN, dwc->mask);
|
2013-01-09 15:17:13 +07:00
|
|
|
|
|
|
|
/* Move pointer to next descriptor */
|
|
|
|
dwc->tx_node_active = dwc->tx_node_active->next;
|
2012-09-21 19:05:49 +07:00
|
|
|
}
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
/* Called with dwc->lock held and bh disabled */
|
|
|
|
static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
|
|
|
|
{
|
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
2016-08-17 23:20:20 +07:00
|
|
|
u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
|
2012-09-21 19:05:49 +07:00
|
|
|
unsigned long was_soft_llp;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
/* ASSERT: channel is idle */
|
|
|
|
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
2009-01-07 01:38:21 +07:00
|
|
|
dev_err(chan2dev(&dwc->chan),
|
2015-03-10 16:37:23 +07:00
|
|
|
"%s: BUG: Attempted to start non-idle channel\n",
|
|
|
|
__func__);
|
2012-06-19 17:34:03 +07:00
|
|
|
dwc_dump_chan_regs(dwc);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
/* The tasklet will hopefully advance the queue... */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-09-21 19:05:49 +07:00
|
|
|
if (dwc->nollp) {
|
|
|
|
was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
|
|
|
|
&dwc->flags);
|
|
|
|
if (was_soft_llp) {
|
|
|
|
dev_err(chan2dev(&dwc->chan),
|
2014-01-13 19:04:49 +07:00
|
|
|
"BUG: Attempted to start new LLP transfer inside ongoing one\n");
|
2012-09-21 19:05:49 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
dwc_initialize(dwc);
|
|
|
|
|
2016-03-18 21:24:53 +07:00
|
|
|
first->residue = first->total_len;
|
2013-01-09 15:17:13 +07:00
|
|
|
dwc->tx_node_active = &first->tx_list;
|
2012-09-21 19:05:49 +07:00
|
|
|
|
2013-01-25 16:48:00 +07:00
|
|
|
/* Submit first block */
|
2012-09-21 19:05:49 +07:00
|
|
|
dwc_do_single_block(dwc, first);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-11-17 17:31:29 +07:00
|
|
|
dwc_initialize(dwc);
|
|
|
|
|
2016-03-18 21:24:44 +07:00
|
|
|
channel_writel(dwc, LLP, first->txd.phys | lms);
|
|
|
|
channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
|
2008-07-09 01:59:42 +07:00
|
|
|
channel_writel(dwc, CTL_HI, 0);
|
|
|
|
channel_set_bit(dw, CH_EN, dwc->mask);
|
|
|
|
}
|
|
|
|
|
2014-06-18 16:15:36 +07:00
|
|
|
static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
|
|
|
|
{
|
2014-06-18 16:15:37 +07:00
|
|
|
struct dw_desc *desc;
|
|
|
|
|
2014-06-18 16:15:36 +07:00
|
|
|
if (list_empty(&dwc->queue))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_move(dwc->queue.next, &dwc->active_list);
|
2014-06-18 16:15:37 +07:00
|
|
|
desc = dwc_first_active(dwc);
|
|
|
|
dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
|
|
|
|
dwc_dostart(dwc, desc);
|
2014-06-18 16:15:36 +07:00
|
|
|
}
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
static void
|
2011-04-15 17:33:35 +07:00
|
|
|
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
|
|
|
|
bool callback_required)
|
2008-07-09 01:59:42 +07:00
|
|
|
{
|
|
|
|
struct dma_async_tx_descriptor *txd = &desc->txd;
|
2011-03-03 17:17:20 +07:00
|
|
|
struct dw_desc *child;
|
2011-04-15 17:33:35 +07:00
|
|
|
unsigned long flags;
|
2016-07-21 03:11:00 +07:00
|
|
|
struct dmaengine_desc_callback cb;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2009-01-07 01:38:21 +07:00
|
|
|
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
2012-03-07 05:35:07 +07:00
|
|
|
dma_cookie_complete(txd);
|
2016-07-21 03:11:00 +07:00
|
|
|
if (callback_required)
|
|
|
|
dmaengine_desc_get_callback(txd, &cb);
|
|
|
|
else
|
|
|
|
memset(&cb, 0, sizeof(cb));
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2011-03-03 17:17:20 +07:00
|
|
|
/* async_tx_ack */
|
|
|
|
list_for_each_entry(child, &desc->tx_list, desc_node)
|
|
|
|
async_tx_ack(&child->txd);
|
|
|
|
async_tx_ack(&desc->txd);
|
2016-04-14 23:11:01 +07:00
|
|
|
dwc_desc_put(dwc, desc);
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
2016-07-21 03:11:00 +07:00
|
|
|
dmaengine_desc_callback_invoke(&cb, NULL);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
|
{
|
|
|
|
struct dw_desc *desc, *_desc;
|
|
|
|
LIST_HEAD(list);
|
2011-04-15 17:33:35 +07:00
|
|
|
unsigned long flags;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
2009-01-07 01:38:21 +07:00
|
|
|
dev_err(chan2dev(&dwc->chan),
|
2008-07-09 01:59:42 +07:00
|
|
|
"BUG: XFER bit set, but channel not idle!\n");
|
|
|
|
|
|
|
|
/* Try to continue after resetting the channel... */
|
2012-06-19 17:46:32 +07:00
|
|
|
dwc_chan_disable(dw, dwc);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Submit queued descriptors ASAP, i.e. before we go through
|
|
|
|
* the completed ones.
|
|
|
|
*/
|
|
|
|
list_splice_init(&dwc->active_list, &list);
|
2014-06-18 16:15:36 +07:00
|
|
|
dwc_dostart_first_queued(dwc);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
2011-04-15 17:33:35 +07:00
|
|
|
dwc_descriptor_complete(dwc, desc, true);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
2013-01-25 16:48:03 +07:00
|
|
|
/* Returns how many bytes were already received from source */
|
|
|
|
static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
|
|
|
|
{
|
2019-01-07 18:07:38 +07:00
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
2013-01-25 16:48:03 +07:00
|
|
|
u32 ctlhi = channel_readl(dwc, CTL_HI);
|
|
|
|
u32 ctllo = channel_readl(dwc, CTL_LO);
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
|
2013-01-25 16:48:03 +07:00
|
|
|
}
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
|
{
|
|
|
|
dma_addr_t llp;
|
|
|
|
struct dw_desc *desc, *_desc;
|
|
|
|
struct dw_desc *child;
|
|
|
|
u32 status_xfer;
|
2011-04-15 17:33:35 +07:00
|
|
|
unsigned long flags;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
llp = channel_readl(dwc, LLP);
|
|
|
|
status_xfer = dma_readl(dw, RAW.XFER);
|
|
|
|
|
|
|
|
if (status_xfer & dwc->mask) {
|
|
|
|
/* Everything we've submitted is done */
|
|
|
|
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
2013-01-18 19:14:15 +07:00
|
|
|
|
|
|
|
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
|
2013-01-25 16:48:00 +07:00
|
|
|
struct list_head *head, *active = dwc->tx_node_active;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are inside first active descriptor.
|
|
|
|
* Otherwise something is really wrong.
|
|
|
|
*/
|
|
|
|
desc = dwc_first_active(dwc);
|
|
|
|
|
|
|
|
head = &desc->tx_list;
|
|
|
|
if (active != head) {
|
2016-03-18 21:24:53 +07:00
|
|
|
/* Update residue to reflect last sent descriptor */
|
|
|
|
if (active == head->next)
|
|
|
|
desc->residue -= desc->len;
|
|
|
|
else
|
|
|
|
desc->residue -= to_dw_desc(active->prev)->len;
|
2013-01-25 16:48:03 +07:00
|
|
|
|
2013-01-25 16:48:00 +07:00
|
|
|
child = to_dw_desc(active);
|
2013-01-18 19:14:15 +07:00
|
|
|
|
|
|
|
/* Submit next block */
|
2013-01-25 16:48:00 +07:00
|
|
|
dwc_do_single_block(dwc, child);
|
2013-01-18 19:14:15 +07:00
|
|
|
|
2013-01-25 16:48:00 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2013-01-18 19:14:15 +07:00
|
|
|
return;
|
|
|
|
}
|
2013-01-25 16:48:00 +07:00
|
|
|
|
2013-01-18 19:14:15 +07:00
|
|
|
/* We are done here */
|
|
|
|
clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
|
|
|
|
}
|
2013-01-25 16:48:03 +07:00
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
dwc_complete_all(dw, dwc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
if (list_empty(&dwc->active_list)) {
|
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2011-01-21 21:11:52 +07:00
|
|
|
return;
|
2011-04-15 17:33:35 +07:00
|
|
|
}
|
2011-01-21 21:11:52 +07:00
|
|
|
|
2013-01-18 19:14:15 +07:00
|
|
|
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
|
|
|
|
dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2011-01-21 21:11:52 +07:00
|
|
|
return;
|
2011-04-15 17:33:35 +07:00
|
|
|
}
|
2011-01-21 21:11:52 +07:00
|
|
|
|
2014-01-13 19:04:50 +07:00
|
|
|
dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
|
2013-03-26 21:53:54 +07:00
|
|
|
/* Initial residue value */
|
2016-03-18 21:24:53 +07:00
|
|
|
desc->residue = desc->total_len;
|
2013-01-25 16:48:03 +07:00
|
|
|
|
2013-03-26 21:53:54 +07:00
|
|
|
/* Check first descriptors addr */
|
2016-03-18 21:24:44 +07:00
|
|
|
if (desc->txd.phys == DWC_LLP_LOC(llp)) {
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2011-03-24 13:02:15 +07:00
|
|
|
return;
|
2011-04-15 17:33:35 +07:00
|
|
|
}
|
2011-03-24 13:02:15 +07:00
|
|
|
|
2013-03-26 21:53:54 +07:00
|
|
|
/* Check first descriptors llp */
|
2016-03-18 21:24:43 +07:00
|
|
|
if (lli_read(desc, llp) == llp) {
|
2008-07-09 01:59:42 +07:00
|
|
|
/* This one is currently in progress */
|
2016-03-18 21:24:53 +07:00
|
|
|
desc->residue -= dwc_get_sent(dwc);
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
return;
|
2011-04-15 17:33:35 +07:00
|
|
|
}
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2016-03-18 21:24:53 +07:00
|
|
|
desc->residue -= desc->len;
|
2013-01-25 16:48:03 +07:00
|
|
|
list_for_each_entry(child, &desc->tx_list, desc_node) {
|
2016-03-18 21:24:43 +07:00
|
|
|
if (lli_read(child, llp) == llp) {
|
2008-07-09 01:59:42 +07:00
|
|
|
/* Currently in progress */
|
2016-03-18 21:24:53 +07:00
|
|
|
desc->residue -= dwc_get_sent(dwc);
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
return;
|
2011-04-15 17:33:35 +07:00
|
|
|
}
|
2016-03-18 21:24:53 +07:00
|
|
|
desc->residue -= child->len;
|
2013-01-25 16:48:03 +07:00
|
|
|
}
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* No descriptors so far seem to be in progress, i.e.
|
|
|
|
* this one must be done.
|
|
|
|
*/
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2011-04-15 17:33:35 +07:00
|
|
|
dwc_descriptor_complete(dwc, desc, true);
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
2009-01-07 01:38:21 +07:00
|
|
|
dev_err(chan2dev(&dwc->chan),
|
2008-07-09 01:59:42 +07:00
|
|
|
"BUG: All descriptors done, but channel not idle!\n");
|
|
|
|
|
|
|
|
/* Try to continue after resetting the channel... */
|
2012-06-19 17:46:32 +07:00
|
|
|
dwc_chan_disable(dw, dwc);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2014-06-18 16:15:36 +07:00
|
|
|
dwc_dostart_first_queued(dwc);
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
2016-03-18 21:24:43 +07:00
|
|
|
static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
2008-07-09 01:59:42 +07:00
|
|
|
{
|
2012-10-18 21:34:09 +07:00
|
|
|
dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
|
2016-03-18 21:24:43 +07:00
|
|
|
lli_read(desc, sar),
|
|
|
|
lli_read(desc, dar),
|
|
|
|
lli_read(desc, llp),
|
|
|
|
lli_read(desc, ctlhi),
|
|
|
|
lli_read(desc, ctllo));
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
|
{
|
|
|
|
struct dw_desc *bad_desc;
|
|
|
|
struct dw_desc *child;
|
2011-04-15 17:33:35 +07:00
|
|
|
unsigned long flags;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
dwc_scan_descriptors(dw, dwc);
|
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
/*
|
|
|
|
* The descriptor currently at the head of the active list is
|
|
|
|
* borked. Since we don't have any way to report errors, we'll
|
|
|
|
* just have to scream loudly and try to carry on.
|
|
|
|
*/
|
|
|
|
bad_desc = dwc_first_active(dwc);
|
|
|
|
list_del_init(&bad_desc->desc_node);
|
2011-03-03 17:17:16 +07:00
|
|
|
list_move(dwc->queue.next, dwc->active_list.prev);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
/* Clear the error flag and try to restart the controller */
|
|
|
|
dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
|
|
|
if (!list_empty(&dwc->active_list))
|
|
|
|
dwc_dostart(dwc, dwc_first_active(dwc));
|
|
|
|
|
|
|
|
/*
|
2012-10-18 21:34:11 +07:00
|
|
|
* WARN may seem harsh, but since this only happens
|
2008-07-09 01:59:42 +07:00
|
|
|
* when someone submits a bad physical address in a
|
|
|
|
* descriptor, we should consider ourselves lucky that the
|
|
|
|
* controller flagged an error instead of scribbling over
|
|
|
|
* random memory locations.
|
|
|
|
*/
|
2012-10-18 21:34:11 +07:00
|
|
|
dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
|
|
|
|
" cookie: %d\n", bad_desc->txd.cookie);
|
2016-03-18 21:24:43 +07:00
|
|
|
dwc_dump_lli(dwc, bad_desc);
|
2009-09-09 07:53:02 +07:00
|
|
|
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
|
2016-03-18 21:24:43 +07:00
|
|
|
dwc_dump_lli(dwc, child);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
/* Pretend the descriptor completed successfully */
|
2011-04-15 17:33:35 +07:00
|
|
|
dwc_descriptor_complete(dwc, bad_desc, true);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_dma_tasklet(unsigned long data)
|
|
|
|
{
|
|
|
|
struct dw_dma *dw = (struct dw_dma *)data;
|
|
|
|
struct dw_dma_chan *dwc;
|
|
|
|
u32 status_xfer;
|
|
|
|
u32 status_err;
|
2016-03-18 21:24:48 +07:00
|
|
|
unsigned int i;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2008-10-04 05:23:46 +07:00
|
|
|
status_xfer = dma_readl(dw, RAW.XFER);
|
2008-07-09 01:59:42 +07:00
|
|
|
status_err = dma_readl(dw, RAW.ERROR);
|
|
|
|
|
2012-06-19 17:34:05 +07:00
|
|
|
dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
for (i = 0; i < dw->dma.chancnt; i++) {
|
|
|
|
dwc = &dw->chan[i];
|
2009-04-01 20:47:02 +07:00
|
|
|
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
|
2017-05-09 23:18:37 +07:00
|
|
|
dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
|
2009-04-01 20:47:02 +07:00
|
|
|
else if (status_err & (1 << i))
|
2008-07-09 01:59:42 +07:00
|
|
|
dwc_handle_error(dw, dwc);
|
2013-01-18 19:14:15 +07:00
|
|
|
else if (status_xfer & (1 << i))
|
2008-07-09 01:59:42 +07:00
|
|
|
dwc_scan_descriptors(dw, dwc);
|
|
|
|
}
|
|
|
|
|
2016-02-10 20:59:42 +07:00
|
|
|
/* Re-enable interrupts */
|
2008-07-09 01:59:42 +07:00
|
|
|
channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
|
|
|
|
channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct dw_dma *dw = dev_id;
|
2015-12-05 04:49:24 +07:00
|
|
|
u32 status;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2015-12-05 04:49:24 +07:00
|
|
|
/* Check if we have any interrupt from the DMAC which is not in use */
|
|
|
|
if (!dw->in_use)
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
|
|
status = dma_readl(dw, STATUS_INT);
|
2013-07-15 19:04:39 +07:00
|
|
|
dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
|
|
|
|
|
|
|
|
/* Check if we have any interrupt from the DMAC */
|
2015-12-05 04:49:24 +07:00
|
|
|
if (!status)
|
2013-07-15 19:04:39 +07:00
|
|
|
return IRQ_NONE;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Just disable the interrupts. We'll turn them back on in the
|
|
|
|
* softirq handler.
|
|
|
|
*/
|
|
|
|
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
|
2016-01-11 20:04:29 +07:00
|
|
|
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
|
2008-07-09 01:59:42 +07:00
|
|
|
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
|
|
|
|
|
|
|
|
status = dma_readl(dw, STATUS_INT);
|
|
|
|
if (status) {
|
|
|
|
dev_err(dw->dma.dev,
|
|
|
|
"BUG: Unexpected interrupts pending: 0x%x\n",
|
|
|
|
status);
|
|
|
|
|
|
|
|
/* Try to recover */
|
|
|
|
channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
|
2016-01-11 20:04:29 +07:00
|
|
|
channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
|
2008-07-09 01:59:42 +07:00
|
|
|
channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
|
|
|
|
channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
|
|
|
|
channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
tasklet_schedule(&dw->tasklet);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
static struct dma_async_tx_descriptor *
|
|
|
|
dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|
|
|
size_t len, unsigned long flags)
|
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
2013-03-26 21:53:57 +07:00
|
|
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
2008-07-09 01:59:42 +07:00
|
|
|
struct dw_desc *desc;
|
|
|
|
struct dw_desc *first;
|
|
|
|
struct dw_desc *prev;
|
|
|
|
size_t xfer_count;
|
|
|
|
size_t offset;
|
2016-08-17 23:20:20 +07:00
|
|
|
u8 m_master = dwc->dws.m_master;
|
2008-07-09 01:59:42 +07:00
|
|
|
unsigned int src_width;
|
|
|
|
unsigned int dst_width;
|
2016-04-27 18:15:39 +07:00
|
|
|
unsigned int data_width = dw->pdata->data_width[m_master];
|
2019-01-07 18:07:38 +07:00
|
|
|
u32 ctllo, ctlhi;
|
2016-04-27 18:15:38 +07:00
|
|
|
u8 lms = DWC_LLP_LMS(m_master);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2012-06-19 17:34:02 +07:00
|
|
|
dev_vdbg(chan2dev(chan),
|
2014-01-13 19:04:50 +07:00
|
|
|
"%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
|
|
|
|
&dest, &src, len, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
if (unlikely(!len)) {
|
2012-06-19 17:34:05 +07:00
|
|
|
dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
|
2008-07-09 01:59:42 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-01-10 15:53:03 +07:00
|
|
|
dwc->direction = DMA_MEM_TO_MEM;
|
|
|
|
|
2016-04-27 18:15:38 +07:00
|
|
|
src_width = dst_width = __ffs(data_width | src | dest | len);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2019-01-07 18:07:40 +07:00
|
|
|
ctllo = dw->prepare_ctllo(dwc)
|
2008-07-09 01:59:42 +07:00
|
|
|
| DWC_CTLL_DST_WIDTH(dst_width)
|
|
|
|
| DWC_CTLL_SRC_WIDTH(src_width)
|
|
|
|
| DWC_CTLL_DST_INC
|
|
|
|
| DWC_CTLL_SRC_INC
|
|
|
|
| DWC_CTLL_FC_M2M;
|
|
|
|
prev = first = NULL;
|
|
|
|
|
2017-01-17 18:57:29 +07:00
|
|
|
for (offset = 0; offset < len; offset += xfer_count) {
|
2008-07-09 01:59:42 +07:00
|
|
|
desc = dwc_desc_get(dwc);
|
|
|
|
if (!desc)
|
|
|
|
goto err_desc_get;
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
|
|
|
|
|
2016-03-18 21:24:43 +07:00
|
|
|
lli_write(desc, sar, src + offset);
|
|
|
|
lli_write(desc, dar, dest + offset);
|
|
|
|
lli_write(desc, ctllo, ctllo);
|
2019-01-07 18:07:38 +07:00
|
|
|
lli_write(desc, ctlhi, ctlhi);
|
2017-01-17 18:57:29 +07:00
|
|
|
desc->len = xfer_count;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
if (!first) {
|
|
|
|
first = desc;
|
|
|
|
} else {
|
2016-03-18 21:24:44 +07:00
|
|
|
lli_write(prev, llp, desc->txd.phys | lms);
|
2016-03-18 21:24:43 +07:00
|
|
|
list_add_tail(&desc->desc_node, &first->tx_list);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
prev = desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & DMA_PREP_INTERRUPT)
|
|
|
|
/* Trigger interrupt after last block */
|
2016-03-18 21:24:43 +07:00
|
|
|
lli_set(prev, ctllo, DWC_CTLL_INT_EN);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
prev->lli.llp = 0;
|
2016-03-18 21:24:45 +07:00
|
|
|
lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
|
2008-07-09 01:59:42 +07:00
|
|
|
first->txd.flags = flags;
|
2013-01-25 16:48:01 +07:00
|
|
|
first->total_len = len;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
return &first->txd;
|
|
|
|
|
|
|
|
err_desc_get:
|
|
|
|
dwc_desc_put(dwc, first);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dma_async_tx_descriptor *
|
|
|
|
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
2011-10-14 00:04:23 +07:00
|
|
|
unsigned int sg_len, enum dma_transfer_direction direction,
|
2012-03-09 03:35:13 +07:00
|
|
|
unsigned long flags, void *context)
|
2008-07-09 01:59:42 +07:00
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
2013-03-26 21:53:57 +07:00
|
|
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
2012-02-01 17:42:26 +07:00
|
|
|
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
|
2008-07-09 01:59:42 +07:00
|
|
|
struct dw_desc *prev;
|
|
|
|
struct dw_desc *first;
|
2019-01-07 18:07:38 +07:00
|
|
|
u32 ctllo, ctlhi;
|
2016-08-17 23:20:20 +07:00
|
|
|
u8 m_master = dwc->dws.m_master;
|
2016-04-27 18:15:38 +07:00
|
|
|
u8 lms = DWC_LLP_LMS(m_master);
|
2008-07-09 01:59:42 +07:00
|
|
|
dma_addr_t reg;
|
|
|
|
unsigned int reg_width;
|
|
|
|
unsigned int mem_width;
|
2016-04-27 18:15:39 +07:00
|
|
|
unsigned int data_width = dw->pdata->data_width[m_master];
|
2008-07-09 01:59:42 +07:00
|
|
|
unsigned int i;
|
|
|
|
struct scatterlist *sg;
|
|
|
|
size_t total_len = 0;
|
|
|
|
|
2012-06-19 17:34:05 +07:00
|
|
|
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2013-01-10 16:11:41 +07:00
|
|
|
if (unlikely(!is_slave_direction(direction) || !sg_len))
|
2008-07-09 01:59:42 +07:00
|
|
|
return NULL;
|
|
|
|
|
2013-01-10 15:53:03 +07:00
|
|
|
dwc->direction = direction;
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
prev = first = NULL;
|
|
|
|
|
|
|
|
switch (direction) {
|
2011-10-14 00:04:23 +07:00
|
|
|
case DMA_MEM_TO_DEV:
|
2015-09-28 22:57:04 +07:00
|
|
|
reg_width = __ffs(sconfig->dst_addr_width);
|
2012-02-01 17:42:26 +07:00
|
|
|
reg = sconfig->dst_addr;
|
2019-01-07 18:07:40 +07:00
|
|
|
ctllo = dw->prepare_ctllo(dwc)
|
2008-07-09 01:59:42 +07:00
|
|
|
| DWC_CTLL_DST_WIDTH(reg_width)
|
|
|
|
| DWC_CTLL_DST_FIX
|
2019-01-07 18:07:40 +07:00
|
|
|
| DWC_CTLL_SRC_INC;
|
2012-02-01 17:42:26 +07:00
|
|
|
|
|
|
|
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
|
|
|
|
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
for_each_sg(sgl, sg, sg_len, i) {
|
|
|
|
struct dw_desc *desc;
|
2017-01-17 18:57:29 +07:00
|
|
|
u32 len, mem;
|
|
|
|
size_t dlen;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2012-04-26 01:50:51 +07:00
|
|
|
mem = sg_dma_address(sg);
|
2011-04-18 16:24:56 +07:00
|
|
|
len = sg_dma_len(sg);
|
2012-02-01 17:42:25 +07:00
|
|
|
|
2016-04-27 18:15:38 +07:00
|
|
|
mem_width = __ffs(data_width | mem | len);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2011-04-18 16:24:56 +07:00
|
|
|
slave_sg_todev_fill_desc:
|
2008-07-09 01:59:42 +07:00
|
|
|
desc = dwc_desc_get(dwc);
|
2015-03-10 16:37:24 +07:00
|
|
|
if (!desc)
|
2008-07-09 01:59:42 +07:00
|
|
|
goto err_desc_get;
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
|
|
|
|
|
2016-03-18 21:24:43 +07:00
|
|
|
lli_write(desc, sar, mem);
|
|
|
|
lli_write(desc, dar, reg);
|
2019-01-07 18:07:38 +07:00
|
|
|
lli_write(desc, ctlhi, ctlhi);
|
2017-01-17 18:57:25 +07:00
|
|
|
lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
|
2013-01-25 16:48:02 +07:00
|
|
|
desc->len = dlen;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
if (!first) {
|
|
|
|
first = desc;
|
|
|
|
} else {
|
2016-03-18 21:24:44 +07:00
|
|
|
lli_write(prev, llp, desc->txd.phys | lms);
|
2016-03-18 21:24:43 +07:00
|
|
|
list_add_tail(&desc->desc_node, &first->tx_list);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
prev = desc;
|
2017-01-17 18:57:25 +07:00
|
|
|
|
|
|
|
mem += dlen;
|
|
|
|
len -= dlen;
|
2011-04-18 16:24:56 +07:00
|
|
|
total_len += dlen;
|
|
|
|
|
|
|
|
if (len)
|
|
|
|
goto slave_sg_todev_fill_desc;
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
break;
|
2011-10-14 00:04:23 +07:00
|
|
|
case DMA_DEV_TO_MEM:
|
2015-09-28 22:57:04 +07:00
|
|
|
reg_width = __ffs(sconfig->src_addr_width);
|
2012-02-01 17:42:26 +07:00
|
|
|
reg = sconfig->src_addr;
|
2019-01-07 18:07:40 +07:00
|
|
|
ctllo = dw->prepare_ctllo(dwc)
|
2008-07-09 01:59:42 +07:00
|
|
|
| DWC_CTLL_SRC_WIDTH(reg_width)
|
|
|
|
| DWC_CTLL_DST_INC
|
2019-01-07 18:07:40 +07:00
|
|
|
| DWC_CTLL_SRC_FIX;
|
2012-02-01 17:42:26 +07:00
|
|
|
|
|
|
|
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
|
|
|
|
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
for_each_sg(sgl, sg, sg_len, i) {
|
|
|
|
struct dw_desc *desc;
|
2017-01-17 18:57:29 +07:00
|
|
|
u32 len, mem;
|
|
|
|
size_t dlen;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2012-04-26 01:50:51 +07:00
|
|
|
mem = sg_dma_address(sg);
|
2008-07-09 01:59:42 +07:00
|
|
|
len = sg_dma_len(sg);
|
2012-02-01 17:42:25 +07:00
|
|
|
|
2011-04-18 16:24:56 +07:00
|
|
|
slave_sg_fromdev_fill_desc:
|
|
|
|
desc = dwc_desc_get(dwc);
|
2015-03-10 16:37:24 +07:00
|
|
|
if (!desc)
|
2011-04-18 16:24:56 +07:00
|
|
|
goto err_desc_get;
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
|
|
|
|
|
2016-03-18 21:24:43 +07:00
|
|
|
lli_write(desc, sar, reg);
|
|
|
|
lli_write(desc, dar, mem);
|
2019-01-07 18:07:38 +07:00
|
|
|
lli_write(desc, ctlhi, ctlhi);
|
2017-01-17 18:57:25 +07:00
|
|
|
mem_width = __ffs(data_width | mem | dlen);
|
|
|
|
lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
|
2013-01-25 16:48:02 +07:00
|
|
|
desc->len = dlen;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
if (!first) {
|
|
|
|
first = desc;
|
|
|
|
} else {
|
2016-03-18 21:24:44 +07:00
|
|
|
lli_write(prev, llp, desc->txd.phys | lms);
|
2016-03-18 21:24:43 +07:00
|
|
|
list_add_tail(&desc->desc_node, &first->tx_list);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
prev = desc;
|
2017-01-17 18:57:25 +07:00
|
|
|
|
|
|
|
mem += dlen;
|
|
|
|
len -= dlen;
|
2011-04-18 16:24:56 +07:00
|
|
|
total_len += dlen;
|
|
|
|
|
|
|
|
if (len)
|
|
|
|
goto slave_sg_fromdev_fill_desc;
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & DMA_PREP_INTERRUPT)
|
|
|
|
/* Trigger interrupt after last block */
|
2016-03-18 21:24:43 +07:00
|
|
|
lli_set(prev, ctllo, DWC_CTLL_INT_EN);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
prev->lli.llp = 0;
|
2016-03-18 21:24:45 +07:00
|
|
|
lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
|
2013-01-25 16:48:01 +07:00
|
|
|
first->total_len = total_len;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
return &first->txd;
|
|
|
|
|
|
|
|
err_desc_get:
|
2015-03-10 16:37:24 +07:00
|
|
|
dev_err(chan2dev(chan),
|
|
|
|
"not enough descriptors available. Direction %d\n", direction);
|
2008-07-09 01:59:42 +07:00
|
|
|
dwc_desc_put(dwc, first);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-08-20 00:29:16 +07:00
|
|
|
bool dw_dma_filter(struct dma_chan *chan, void *param)
|
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
|
struct dw_dma_slave *dws = param;
|
|
|
|
|
2016-04-08 20:22:17 +07:00
|
|
|
if (dws->dma_dev != chan->device->dev)
|
2014-08-20 00:29:16 +07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* We have to copy data since dws can be temporary storage */
|
2016-08-17 23:20:20 +07:00
|
|
|
memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
|
2014-08-20 00:29:16 +07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dw_dma_filter);
|
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
|
2012-02-01 17:42:26 +07:00
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
2017-01-17 18:57:31 +07:00
|
|
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
2012-02-01 17:42:26 +07:00
|
|
|
|
|
|
|
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
|
|
|
|
dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
|
2012-02-01 17:42:26 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-17 18:57:31 +07:00
|
|
|
static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
|
2013-01-09 15:17:14 +07:00
|
|
|
{
|
2017-01-17 18:57:31 +07:00
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
2014-11-17 20:42:12 +07:00
|
|
|
unsigned int count = 20; /* timeout iterations */
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
dw->suspend_chan(dwc, drain);
|
|
|
|
|
2013-03-21 16:49:17 +07:00
|
|
|
while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
|
|
|
|
udelay(2);
|
2013-01-09 15:17:14 +07:00
|
|
|
|
2016-03-18 21:24:51 +07:00
|
|
|
set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
|
2017-01-17 18:57:28 +07:00
|
|
|
}
|
2014-11-17 20:42:12 +07:00
|
|
|
|
2017-01-17 18:57:28 +07:00
|
|
|
static int dwc_pause(struct dma_chan *chan)
|
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
2017-01-17 18:57:31 +07:00
|
|
|
dwc_chan_pause(dwc, false);
|
2014-11-17 20:42:12 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
2013-01-09 15:17:14 +07:00
|
|
|
}
|
|
|
|
|
2019-01-07 18:07:39 +07:00
|
|
|
static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
|
2013-01-09 15:17:14 +07:00
|
|
|
{
|
2019-01-07 18:07:39 +07:00
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
2013-01-09 15:17:14 +07:00
|
|
|
|
2019-01-07 18:07:39 +07:00
|
|
|
dw->resume_chan(dwc, drain);
|
2013-01-09 15:17:14 +07:00
|
|
|
|
2016-03-18 21:24:51 +07:00
|
|
|
clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
|
2013-01-09 15:17:14 +07:00
|
|
|
}
|
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
static int dwc_resume(struct dma_chan *chan)
|
2008-07-09 01:59:42 +07:00
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
2011-04-15 17:33:35 +07:00
|
|
|
unsigned long flags;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2016-03-18 21:24:51 +07:00
|
|
|
if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
|
2019-01-07 18:07:39 +07:00
|
|
|
dwc_chan_resume(dwc, false);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
static int dwc_terminate_all(struct dma_chan *chan)
|
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
|
|
|
struct dw_desc *desc, *_desc;
|
|
|
|
unsigned long flags;
|
|
|
|
LIST_HEAD(list);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
2012-09-21 19:05:49 +07:00
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
|
2012-09-21 19:05:49 +07:00
|
|
|
|
2017-01-17 18:57:31 +07:00
|
|
|
dwc_chan_pause(dwc, true);
|
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
dwc_chan_disable(dw, dwc);
|
2011-04-19 07:31:32 +07:00
|
|
|
|
2019-01-07 18:07:39 +07:00
|
|
|
dwc_chan_resume(dwc, true);
|
2011-04-19 07:31:32 +07:00
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
/* active_list entries will end up before queued entries */
|
|
|
|
list_splice_init(&dwc->queue, &list);
|
|
|
|
list_splice_init(&dwc->active_list, &list);
|
2011-04-19 07:31:32 +07:00
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2011-04-19 07:31:32 +07:00
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
/* Flush all pending and queued descriptors */
|
|
|
|
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
|
|
|
dwc_descriptor_complete(dwc, desc, false);
|
2010-03-27 06:44:01 +07:00
|
|
|
|
|
|
|
return 0;
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
2016-03-18 21:24:53 +07:00
|
|
|
static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
|
|
|
|
{
|
|
|
|
struct dw_desc *desc;
|
|
|
|
|
|
|
|
list_for_each_entry(desc, &dwc->active_list, desc_node)
|
|
|
|
if (desc->txd.cookie == c)
|
|
|
|
return desc;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
|
2013-01-25 16:48:03 +07:00
|
|
|
{
|
2016-03-18 21:24:53 +07:00
|
|
|
struct dw_desc *desc;
|
2013-01-25 16:48:03 +07:00
|
|
|
unsigned long flags;
|
|
|
|
u32 residue;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
|
|
|
|
2016-03-18 21:24:53 +07:00
|
|
|
desc = dwc_find_desc(dwc, cookie);
|
|
|
|
if (desc) {
|
|
|
|
if (desc == dwc_first_active(dwc)) {
|
|
|
|
residue = desc->residue;
|
|
|
|
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
|
|
|
|
residue -= dwc_get_sent(dwc);
|
|
|
|
} else {
|
|
|
|
residue = desc->total_len;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
residue = 0;
|
|
|
|
}
|
2013-01-25 16:48:03 +07:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
return residue;
|
|
|
|
}
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
static enum dma_status
|
2010-03-27 06:50:49 +07:00
|
|
|
dwc_tx_status(struct dma_chan *chan,
|
|
|
|
dma_cookie_t cookie,
|
|
|
|
struct dma_tx_state *txstate)
|
2008-07-09 01:59:42 +07:00
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
2012-03-07 05:35:27 +07:00
|
|
|
enum dma_status ret;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2012-03-07 05:35:27 +07:00
|
|
|
ret = dma_cookie_status(chan, cookie, txstate);
|
2013-10-16 15:11:15 +07:00
|
|
|
if (ret == DMA_COMPLETE)
|
2013-07-15 19:04:40 +07:00
|
|
|
return ret;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2013-07-15 19:04:40 +07:00
|
|
|
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2013-07-15 19:04:40 +07:00
|
|
|
ret = dma_cookie_status(chan, cookie, txstate);
|
2016-03-18 21:24:53 +07:00
|
|
|
if (ret == DMA_COMPLETE)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2016-03-18 21:24:51 +07:00
|
|
|
if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
|
2011-04-19 07:31:32 +07:00
|
|
|
return DMA_PAUSED;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dwc_issue_pending(struct dma_chan *chan)
|
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
2014-06-18 16:15:38 +07:00
|
|
|
unsigned long flags;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2014-06-18 16:15:38 +07:00
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
|
|
|
if (list_empty(&dwc->active_list))
|
|
|
|
dwc_dostart_first_queued(dwc);
|
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
2014-09-23 21:18:14 +07:00
|
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
void do_dw_dma_off(struct dw_dma *dw)
|
2014-09-23 21:18:14 +07:00
|
|
|
{
|
2016-03-18 21:24:48 +07:00
|
|
|
unsigned int i;
|
2014-09-23 21:18:14 +07:00
|
|
|
|
|
|
|
dma_writel(dw, CFG, 0);
|
|
|
|
|
|
|
|
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
|
2016-01-11 20:04:29 +07:00
|
|
|
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
|
2014-09-23 21:18:14 +07:00
|
|
|
channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
|
|
|
|
channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
|
|
|
|
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
|
|
|
|
|
|
|
|
while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
for (i = 0; i < dw->dma.chancnt; i++)
|
2016-03-18 21:24:52 +07:00
|
|
|
clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
|
2014-09-23 21:18:14 +07:00
|
|
|
}
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
void do_dw_dma_on(struct dw_dma *dw)
|
2014-09-23 21:18:14 +07:00
|
|
|
{
|
|
|
|
dma_writel(dw, CFG, DW_CFG_DMA_EN);
|
|
|
|
}
|
|
|
|
|
2009-01-07 01:38:17 +07:00
|
|
|
static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
2008-07-09 01:59:42 +07:00
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
|
|
|
|
2012-06-19 17:34:05 +07:00
|
|
|
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
/* ASSERT: channel is idle */
|
|
|
|
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
2009-01-07 01:38:21 +07:00
|
|
|
dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
|
2008-07-09 01:59:42 +07:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2012-03-07 05:35:47 +07:00
|
|
|
dma_cookie_init(chan);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE: some controllers may have additional features that we
|
|
|
|
* need to initialize here, like "scatter-gather" (which
|
|
|
|
* doesn't mean what you think it means), and status writeback.
|
|
|
|
*/
|
|
|
|
|
2016-04-08 20:22:17 +07:00
|
|
|
/*
|
|
|
|
* We need controller-specific data to set up slave transfers.
|
|
|
|
*/
|
|
|
|
if (chan->private && !dw_dma_filter(chan, chan->private)) {
|
|
|
|
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-09-23 21:18:14 +07:00
|
|
|
/* Enable controller here if needed */
|
|
|
|
if (!dw->in_use)
|
2019-01-07 18:07:38 +07:00
|
|
|
do_dw_dma_on(dw);
|
2014-09-23 21:18:14 +07:00
|
|
|
dw->in_use |= dwc->mask;
|
|
|
|
|
2016-04-14 23:11:01 +07:00
|
|
|
return 0;
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dwc_free_chan_resources(struct dma_chan *chan)
|
|
|
|
{
|
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
2011-04-15 17:33:35 +07:00
|
|
|
unsigned long flags;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2012-06-19 17:34:05 +07:00
|
|
|
dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
|
2008-07-09 01:59:42 +07:00
|
|
|
dwc->descs_allocated);
|
|
|
|
|
|
|
|
/* ASSERT: channel is idle */
|
|
|
|
BUG_ON(!list_empty(&dwc->active_list));
|
|
|
|
BUG_ON(!list_empty(&dwc->queue));
|
|
|
|
BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
|
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
2016-04-08 20:22:17 +07:00
|
|
|
|
|
|
|
/* Clear custom channel configuration */
|
2016-08-17 23:20:20 +07:00
|
|
|
memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
|
2016-04-08 20:22:17 +07:00
|
|
|
|
2016-03-18 21:24:52 +07:00
|
|
|
clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
/* Disable interrupts */
|
|
|
|
channel_clear_bit(dw, MASK.XFER, dwc->mask);
|
2016-01-11 20:04:29 +07:00
|
|
|
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
|
2008-07-09 01:59:42 +07:00
|
|
|
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
|
|
|
|
|
2011-04-15 17:33:35 +07:00
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2014-09-23 21:18:14 +07:00
|
|
|
/* Disable controller in case it was a last user */
|
|
|
|
dw->in_use &= ~dwc->mask;
|
|
|
|
if (!dw->in_use)
|
2019-01-07 18:07:38 +07:00
|
|
|
do_dw_dma_off(dw);
|
2014-09-23 21:18:14 +07:00
|
|
|
|
2012-06-19 17:34:05 +07:00
|
|
|
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
int do_dma_probe(struct dw_dma_chip *chip)
|
2012-10-16 11:19:17 +07:00
|
|
|
{
|
2019-01-07 18:07:38 +07:00
|
|
|
struct dw_dma *dw = chip->dw;
|
2016-04-27 18:15:40 +07:00
|
|
|
struct dw_dma_platform_data *pdata;
|
2015-10-14 00:09:17 +07:00
|
|
|
bool autocfg = false;
|
2012-09-21 19:05:46 +07:00
|
|
|
unsigned int dw_params;
|
2016-03-18 21:24:48 +07:00
|
|
|
unsigned int i;
|
2008-07-09 01:59:42 +07:00
|
|
|
int err;
|
|
|
|
|
2016-04-27 18:15:39 +07:00
|
|
|
dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
|
|
|
|
if (!dw->pdata)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-03-05 20:48:12 +07:00
|
|
|
dw->regs = chip->regs;
|
|
|
|
|
2014-11-05 23:34:48 +07:00
|
|
|
pm_runtime_get_sync(chip->dev);
|
|
|
|
|
2016-04-27 18:15:40 +07:00
|
|
|
if (!chip->pdata) {
|
2016-03-18 21:24:46 +07:00
|
|
|
dw_params = dma_readl(dw, DW_PARAMS);
|
2015-10-14 00:09:17 +07:00
|
|
|
dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
|
2012-09-21 19:05:46 +07:00
|
|
|
|
2015-10-14 00:09:17 +07:00
|
|
|
autocfg = dw_params >> DW_PARAMS_EN & 1;
|
|
|
|
if (!autocfg) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err_pdata;
|
|
|
|
}
|
2013-01-09 15:17:01 +07:00
|
|
|
|
2016-04-27 18:15:39 +07:00
|
|
|
/* Reassign the platform data pointer */
|
|
|
|
pdata = dw->pdata;
|
2013-01-09 15:17:01 +07:00
|
|
|
|
2015-10-14 00:09:17 +07:00
|
|
|
/* Get hardware configuration parameters */
|
|
|
|
pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
|
|
|
|
pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
|
|
|
|
for (i = 0; i < pdata->nr_masters; i++) {
|
|
|
|
pdata->data_width[i] =
|
2016-04-27 18:15:38 +07:00
|
|
|
4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
|
2015-10-14 00:09:17 +07:00
|
|
|
}
|
2016-04-27 18:15:39 +07:00
|
|
|
pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
|
2015-10-14 00:09:17 +07:00
|
|
|
|
2013-01-09 15:17:01 +07:00
|
|
|
/* Fill platform data with the default values */
|
|
|
|
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
|
|
|
|
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
|
2016-04-27 18:15:40 +07:00
|
|
|
} else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
|
2014-05-08 16:01:49 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto err_pdata;
|
2016-04-27 18:15:39 +07:00
|
|
|
} else {
|
2016-04-27 18:15:40 +07:00
|
|
|
memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
|
2016-04-27 18:15:39 +07:00
|
|
|
|
|
|
|
/* Reassign the platform data pointer */
|
|
|
|
pdata = dw->pdata;
|
2014-05-08 16:01:49 +07:00
|
|
|
}
|
2013-01-09 15:17:01 +07:00
|
|
|
|
2015-10-14 00:09:17 +07:00
|
|
|
dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
|
2014-03-05 20:48:12 +07:00
|
|
|
GFP_KERNEL);
|
2014-05-08 16:01:49 +07:00
|
|
|
if (!dw->chan) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_pdata;
|
|
|
|
}
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2012-06-19 17:34:06 +07:00
|
|
|
/* Calculate all channel mask before DMA setup */
|
2015-10-14 00:09:17 +07:00
|
|
|
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
|
2012-06-19 17:34:06 +07:00
|
|
|
|
2013-03-26 21:53:54 +07:00
|
|
|
/* Force dma off, just in case */
|
2019-01-07 18:07:38 +07:00
|
|
|
dw->disable(dw);
|
2017-01-17 18:57:31 +07:00
|
|
|
|
2017-01-17 18:57:26 +07:00
|
|
|
/* Device and instance ID for IRQ and DMA pool */
|
2019-01-07 18:07:38 +07:00
|
|
|
dw->set_device_name(dw, chip->id);
|
2017-01-17 18:57:26 +07:00
|
|
|
|
2013-03-26 21:53:54 +07:00
|
|
|
/* Create a pool of consistent memory blocks for hardware descriptors */
|
2017-01-17 18:57:26 +07:00
|
|
|
dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
|
2013-01-16 20:48:50 +07:00
|
|
|
sizeof(struct dw_desc), 4, 0);
|
|
|
|
if (!dw->desc_pool) {
|
2013-06-05 19:26:45 +07:00
|
|
|
dev_err(chip->dev, "No memory for descriptors dma pool\n");
|
2014-05-08 16:01:49 +07:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_pdata;
|
2013-01-16 20:48:50 +07:00
|
|
|
}
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
|
|
|
|
|
2014-05-07 14:56:24 +07:00
|
|
|
err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
|
2017-01-17 18:57:26 +07:00
|
|
|
dw->name, dw);
|
2014-05-07 14:56:24 +07:00
|
|
|
if (err)
|
2014-05-08 16:01:49 +07:00
|
|
|
goto err_pdata;
|
2014-05-07 14:56:24 +07:00
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
INIT_LIST_HEAD(&dw->dma.channels);
|
2015-10-14 00:09:17 +07:00
|
|
|
for (i = 0; i < pdata->nr_channels; i++) {
|
2008-07-09 01:59:42 +07:00
|
|
|
struct dw_dma_chan *dwc = &dw->chan[i];
|
|
|
|
|
|
|
|
dwc->chan.device = &dw->dma;
|
2012-03-07 05:35:47 +07:00
|
|
|
dma_cookie_init(&dwc->chan);
|
2011-03-03 17:17:21 +07:00
|
|
|
if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
|
|
|
|
list_add_tail(&dwc->chan.device_node,
|
|
|
|
&dw->dma.channels);
|
|
|
|
else
|
|
|
|
list_add(&dwc->chan.device_node, &dw->dma.channels);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2011-03-03 17:17:22 +07:00
|
|
|
/* 7 is highest priority & 0 is lowest. */
|
|
|
|
if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
|
2015-10-14 00:09:17 +07:00
|
|
|
dwc->priority = pdata->nr_channels - i - 1;
|
2011-03-03 17:17:22 +07:00
|
|
|
else
|
|
|
|
dwc->priority = i;
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
|
|
|
|
spin_lock_init(&dwc->lock);
|
|
|
|
dwc->mask = 1 << i;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&dwc->active_list);
|
|
|
|
INIT_LIST_HEAD(&dwc->queue);
|
|
|
|
|
|
|
|
channel_clear_bit(dw, CH_EN, dwc->mask);
|
2012-09-21 19:05:47 +07:00
|
|
|
|
2013-01-10 15:53:03 +07:00
|
|
|
dwc->direction = DMA_TRANS_NONE;
|
2012-09-21 19:05:48 +07:00
|
|
|
|
2013-03-26 21:53:54 +07:00
|
|
|
/* Hardware configuration */
|
2012-09-21 19:05:49 +07:00
|
|
|
if (autocfg) {
|
2015-09-28 22:57:03 +07:00
|
|
|
unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
|
2016-03-18 21:24:46 +07:00
|
|
|
void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
|
2017-05-09 23:18:37 +07:00
|
|
|
unsigned int dwc_params = readl(addr);
|
2012-09-21 19:05:49 +07:00
|
|
|
|
2013-06-05 19:26:45 +07:00
|
|
|
dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
|
|
|
|
dwc_params);
|
2013-01-18 22:10:59 +07:00
|
|
|
|
2014-01-13 19:04:48 +07:00
|
|
|
/*
|
|
|
|
* Decode maximum block size for given channel. The
|
2012-09-21 19:05:47 +07:00
|
|
|
* stored 4 bit value represents blocks from 0x00 for 3
|
2014-01-13 19:04:48 +07:00
|
|
|
* up to 0x0a for 4095.
|
|
|
|
*/
|
2012-09-21 19:05:47 +07:00
|
|
|
dwc->block_size =
|
2016-04-27 18:15:39 +07:00
|
|
|
(4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
|
2012-09-21 19:05:49 +07:00
|
|
|
dwc->nollp =
|
|
|
|
(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
|
|
|
|
} else {
|
2012-09-21 19:05:47 +07:00
|
|
|
dwc->block_size = pdata->block_size;
|
2016-11-25 21:59:07 +07:00
|
|
|
dwc->nollp = !pdata->multi_block[i];
|
2012-09-21 19:05:49 +07:00
|
|
|
}
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
2012-06-19 17:34:06 +07:00
|
|
|
/* Clear all interrupts on all channels. */
|
2008-07-09 01:59:42 +07:00
|
|
|
dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
|
2012-06-19 17:34:07 +07:00
|
|
|
dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
|
2008-07-09 01:59:42 +07:00
|
|
|
dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
|
|
|
|
dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
|
|
|
|
dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
|
|
|
|
|
2015-10-14 00:09:19 +07:00
|
|
|
/* Set capabilities */
|
2008-07-09 01:59:42 +07:00
|
|
|
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
|
dmaengine: dw: Remove misleading is_private property
The commit a9ddb575d6d6
("dmaengine: dw_dmac: Enhance device tree support")
introduces is_private property in uncertain understanding what does it mean.
First of all, documentation defines DMA_PRIVATE capability as
Documentation/crypto/async-tx-api.txt:
The DMA_PRIVATE capability flag is used to tag dma devices that should not be
used by the general-purpose allocator. It can be set at initialization time
if it is known that a channel will always be private. Alternatively,
it is set when dma_request_channel() finds an unused "public" channel.
A couple caveats to note when implementing a driver and consumer:
1/ Once a channel has been privately allocated it will no longer be
considered by the general-purpose allocator even after a call to
dma_release_channel().
2/ Since capabilities are specified at the device level a dma_device with
multiple channels will either have all channels public, or all channels
private.
Documentation/driver-api/dmaengine/provider.rst:
- DMA_PRIVATE
The devices only supports slave transfers, and as such isn't available
for async transfers.
The capability had been introduced by the commit 59b5ec21446b
("dmaengine: introduce dma_request_channel and private channels")
and some code didn't changed from that times ever.
Taking into consideration above and the fact that on all known platforms
Synopsys DesignWare DMA engine is attached to serve slave transfers,
the DMA_PRIVATE capability must be enabled for this device unconditionally.
Otherwise, as rightfully noticed in drivers/dma/at_xdmac.c:
/*
* Without DMA_PRIVATE the driver is not able to allocate more than
* one channel, second allocation fails in private_candidate.
*/
because of of a caveats mentioned in above documentation excerpts.
So, remove conditional around DMA_PRIVATE followed by removal leftovers.
If someone wonders, DMA_PRIVATE can be not used if and only if the all channels
of the DMA controller are supposed to serve memory-to-memory like operations.
For example, EP93xx has two controllers, one of which can only perform
memory-to-memory transfers
Note, this change doesn't affect dmatest to be able to test such controllers.
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> (maintainer:SERIAL DRIVERS)
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Vinod Koul <vkoul@kernel.org>
2019-01-07 18:07:36 +07:00
|
|
|
dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
|
2019-01-07 18:07:37 +07:00
|
|
|
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
|
2015-10-14 00:09:19 +07:00
|
|
|
|
2013-06-05 19:26:45 +07:00
|
|
|
dw->dma.dev = chip->dev;
|
2008-07-09 01:59:42 +07:00
|
|
|
dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
|
|
|
|
dw->dma.device_free_chan_resources = dwc_free_chan_resources;
|
|
|
|
|
|
|
|
dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
|
|
|
|
dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
|
2015-01-02 21:17:24 +07:00
|
|
|
|
2014-11-17 20:42:12 +07:00
|
|
|
dw->dma.device_config = dwc_config;
|
|
|
|
dw->dma.device_pause = dwc_pause;
|
|
|
|
dw->dma.device_resume = dwc_resume;
|
|
|
|
dw->dma.device_terminate_all = dwc_terminate_all;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2010-03-27 06:50:49 +07:00
|
|
|
dw->dma.device_tx_status = dwc_tx_status;
|
2008-07-09 01:59:42 +07:00
|
|
|
dw->dma.device_issue_pending = dwc_issue_pending;
|
|
|
|
|
2015-01-02 21:17:24 +07:00
|
|
|
/* DMA capabilities */
|
|
|
|
dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
|
|
|
|
dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
|
|
|
|
dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
|
|
|
|
BIT(DMA_MEM_TO_MEM);
|
|
|
|
dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
|
|
|
|
2014-05-08 16:01:50 +07:00
|
|
|
err = dma_async_device_register(&dw->dma);
|
|
|
|
if (err)
|
|
|
|
goto err_dma_register;
|
|
|
|
|
2013-06-05 19:26:45 +07:00
|
|
|
dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
|
2015-10-14 00:09:17 +07:00
|
|
|
pdata->nr_channels);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2014-11-05 23:34:48 +07:00
|
|
|
pm_runtime_put_sync_suspend(chip->dev);
|
|
|
|
|
2008-07-09 01:59:42 +07:00
|
|
|
return 0;
|
2014-05-08 16:01:49 +07:00
|
|
|
|
2014-05-08 16:01:50 +07:00
|
|
|
err_dma_register:
|
|
|
|
free_irq(chip->irq, dw);
|
2014-05-08 16:01:49 +07:00
|
|
|
err_pdata:
|
2014-11-05 23:34:48 +07:00
|
|
|
pm_runtime_put_sync_suspend(chip->dev);
|
2014-05-08 16:01:49 +07:00
|
|
|
return err;
|
2008-07-09 01:59:42 +07:00
|
|
|
}
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
int do_dma_remove(struct dw_dma_chip *chip)
|
2008-07-09 01:59:42 +07:00
|
|
|
{
|
2013-06-05 19:26:45 +07:00
|
|
|
struct dw_dma *dw = chip->dw;
|
2008-07-09 01:59:42 +07:00
|
|
|
struct dw_dma_chan *dwc, *_dwc;
|
|
|
|
|
2014-11-05 23:34:48 +07:00
|
|
|
pm_runtime_get_sync(chip->dev);
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
do_dw_dma_off(dw);
|
2008-07-09 01:59:42 +07:00
|
|
|
dma_async_device_unregister(&dw->dma);
|
|
|
|
|
2014-05-07 14:56:24 +07:00
|
|
|
free_irq(chip->irq, dw);
|
2008-07-09 01:59:42 +07:00
|
|
|
tasklet_kill(&dw->tasklet);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
|
|
|
|
chan.device_node) {
|
|
|
|
list_del(&dwc->chan.device_node);
|
|
|
|
channel_clear_bit(dw, CH_EN, dwc->mask);
|
|
|
|
}
|
|
|
|
|
2014-11-05 23:34:48 +07:00
|
|
|
pm_runtime_put_sync_suspend(chip->dev);
|
2008-07-09 01:59:42 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
int do_dw_dma_disable(struct dw_dma_chip *chip)
|
2008-07-09 01:59:42 +07:00
|
|
|
{
|
2013-06-05 19:26:45 +07:00
|
|
|
struct dw_dma *dw = chip->dw;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
dw->disable(dw);
|
2008-07-09 01:59:42 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2019-01-07 18:07:38 +07:00
|
|
|
EXPORT_SYMBOL_GPL(do_dw_dma_disable);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
int do_dw_dma_enable(struct dw_dma_chip *chip)
|
2008-07-09 01:59:42 +07:00
|
|
|
{
|
2013-06-05 19:26:45 +07:00
|
|
|
struct dw_dma *dw = chip->dw;
|
2008-07-09 01:59:42 +07:00
|
|
|
|
2019-01-07 18:07:38 +07:00
|
|
|
dw->enable(dw);
|
2008-07-09 01:59:42 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2019-01-07 18:07:38 +07:00
|
|
|
EXPORT_SYMBOL_GPL(do_dw_dma_enable);
|
2008-07-09 01:59:42 +07:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL v2");
|
2013-06-05 19:26:45 +07:00
|
|
|
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
|
2011-05-18 21:49:24 +07:00
|
|
|
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
|
2015-07-18 06:23:50 +07:00
|
|
|
MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
|