2012-05-09 22:09:13 +07:00
|
|
|
/*
|
|
|
|
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
|
|
|
|
*
|
|
|
|
* extracted from shdma.c and headers
|
|
|
|
*
|
|
|
|
* Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
|
|
|
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
|
|
|
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
|
|
|
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef SHDMA_BASE_H
|
|
|
|
#define SHDMA_BASE_H
|
|
|
|
|
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
/**
|
|
|
|
* shdma_pm_state - DMA channel PM state
|
|
|
|
* SHDMA_PM_ESTABLISHED: either idle or during data transfer
|
|
|
|
* SHDMA_PM_BUSY: during the transfer preparation, when we have to
|
|
|
|
* drop the lock temporarily
|
|
|
|
* SHDMA_PM_PENDING: transfers pending
|
|
|
|
*/
|
|
|
|
enum shdma_pm_state {
|
|
|
|
SHDMA_PM_ESTABLISHED,
|
|
|
|
SHDMA_PM_BUSY,
|
|
|
|
SHDMA_PM_PENDING,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct device;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drivers, using this library are expected to embed struct shdma_dev,
|
|
|
|
* struct shdma_chan, struct shdma_desc, and struct shdma_slave
|
|
|
|
* in their respective device, channel, descriptor and slave objects.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct shdma_slave {
|
2012-07-05 17:29:41 +07:00
|
|
|
int slave_id;
|
2012-05-09 22:09:13 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct shdma_desc {
|
|
|
|
struct list_head node;
|
|
|
|
struct dma_async_tx_descriptor async_tx;
|
|
|
|
enum dma_transfer_direction direction;
|
2012-07-31 02:28:27 +07:00
|
|
|
size_t partial;
|
2012-05-09 22:09:13 +07:00
|
|
|
dma_cookie_t cookie;
|
|
|
|
int chunks;
|
|
|
|
int mark;
|
2014-04-03 10:17:00 +07:00
|
|
|
bool cyclic; /* used as cyclic transfer */
|
2012-05-09 22:09:13 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct shdma_chan {
|
|
|
|
spinlock_t chan_lock; /* Channel operation lock */
|
|
|
|
struct list_head ld_queue; /* Link descriptors queue */
|
|
|
|
struct list_head ld_free; /* Free link descriptors */
|
|
|
|
struct dma_chan dma_chan; /* DMA channel */
|
|
|
|
struct device *dev; /* Channel device */
|
|
|
|
void *desc; /* buffer for descriptor array */
|
|
|
|
int desc_num; /* desc count */
|
|
|
|
size_t max_xfer_len; /* max transfer length */
|
|
|
|
int id; /* Raw id of this channel */
|
|
|
|
int irq; /* Channel IRQ */
|
2012-07-05 17:29:41 +07:00
|
|
|
int slave_id; /* Client ID for slave DMA */
|
2015-02-17 08:46:49 +07:00
|
|
|
int real_slave_id; /* argument passed to filter function */
|
2013-06-18 23:16:57 +07:00
|
|
|
int hw_req; /* DMA request line for slave DMA - same
|
|
|
|
* as MID/RID, used with DT */
|
2012-05-09 22:09:13 +07:00
|
|
|
enum shdma_pm_state pm_state;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct shdma_ops - simple DMA driver operations
|
|
|
|
* desc_completed: return true, if this is the descriptor, that just has
|
|
|
|
* completed (atomic)
|
|
|
|
* halt_channel: stop DMA channel operation (atomic)
|
|
|
|
* channel_busy: return true, if the channel is busy (atomic)
|
|
|
|
* slave_addr: return slave DMA address
|
|
|
|
* desc_setup: set up the hardware specific descriptor portion (atomic)
|
|
|
|
* set_slave: bind channel to a slave
|
|
|
|
* setup_xfer: configure channel hardware for operation (atomic)
|
|
|
|
* start_xfer: start the DMA transfer (atomic)
|
|
|
|
* embedded_desc: return Nth struct shdma_desc pointer from the
|
|
|
|
* descriptor array
|
|
|
|
* chan_irq: process channel IRQ, return true if a transfer has
|
|
|
|
* completed (atomic)
|
|
|
|
*/
|
|
|
|
struct shdma_ops {
|
|
|
|
bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *);
|
|
|
|
void (*halt_channel)(struct shdma_chan *);
|
|
|
|
bool (*channel_busy)(struct shdma_chan *);
|
|
|
|
dma_addr_t (*slave_addr)(struct shdma_chan *);
|
|
|
|
int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
|
|
|
|
dma_addr_t, dma_addr_t, size_t *);
|
2013-08-02 21:50:36 +07:00
|
|
|
int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
|
2012-07-05 17:29:41 +07:00
|
|
|
void (*setup_xfer)(struct shdma_chan *, int);
|
2012-05-09 22:09:13 +07:00
|
|
|
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
|
|
|
|
struct shdma_desc *(*embedded_desc)(void *, int);
|
|
|
|
bool (*chan_irq)(struct shdma_chan *, int);
|
2012-07-31 02:28:27 +07:00
|
|
|
size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
|
2012-05-09 22:09:13 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct shdma_dev {
|
|
|
|
struct dma_device dma_dev;
|
|
|
|
struct shdma_chan **schan;
|
|
|
|
const struct shdma_ops *ops;
|
|
|
|
size_t desc_size;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
|
|
|
|
i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
|
|
|
|
|
|
|
|
int shdma_request_irq(struct shdma_chan *, int,
|
|
|
|
unsigned long, const char *);
|
|
|
|
bool shdma_reset(struct shdma_dev *sdev);
|
|
|
|
void shdma_chan_probe(struct shdma_dev *sdev,
|
|
|
|
struct shdma_chan *schan, int id);
|
|
|
|
void shdma_chan_remove(struct shdma_chan *schan);
|
|
|
|
int shdma_init(struct device *dev, struct shdma_dev *sdev,
|
|
|
|
int chan_num);
|
|
|
|
void shdma_cleanup(struct shdma_dev *sdev);
|
2013-07-10 09:09:12 +07:00
|
|
|
#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
|
2013-06-06 22:37:14 +07:00
|
|
|
bool shdma_chan_filter(struct dma_chan *chan, void *arg);
|
2013-07-10 09:09:12 +07:00
|
|
|
#else
|
dmaengine: shdma: Make dummy shdma_chan_filter() always return false
If CONFIG_SH_DMAE_BASE (which is required for DMA engine support for
legacy SH, SH/R-Mobile, and R-Car Gen1, but not for R-Car Gen2) is not
enabled, but CONFIG_RCAR_DMAC (for R-Car Gen2 DMA engine support) is,
and the DTS doesn't provide a "dmas" property for a device,
dma_request_slave_channel_compat() incorrectly succeeds, and returns a
DMA channel.
However, when trying to use that DMA channel later, it fails with:
rcar-dmac e6700000.dma-controller: rcar_dmac_prep_slave_sg: bad parameter: len=1, id=-22
(Fortunately most drivers can handle this failure, and fall back to
PIO)
The reason for this is that a NULL legacy filter function is used, which
actually means "all channels are OK", not "do not match".
If CONFIG_SH_DMAE_BASE is enabled (like in shmobile_defconfig, which
supports other SoCs besides R-Car Gen2), shdma_chan_filter() correctly
returns false, as no available channel on R-Car Gen2 matches a
shdma-base channel.
If the DTS does provide a "dmas" property, dma_request_slave_channel()
succeeds, and legacy filter-based matching is not used.
To fix this, change shdma_chan_filter from being NULL to a dummy
function that always returns false, like is done on other platforms.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2015-07-10 17:07:25 +07:00
|
|
|
static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2013-07-10 09:09:12 +07:00
|
|
|
#endif
|
2012-05-09 22:09:13 +07:00
|
|
|
|
|
|
|
#endif
|