Merge commit 'd293875' into mvebu/boards

Pulled in mmc/mmc-next up to:

d293875 mmc: mvsdio: add pinctrl integration
This commit is contained in:
Jason Cooper 2013-01-31 17:39:01 +00:00
commit 04ee16bdf4
22 changed files with 869 additions and 330 deletions

View File

@ -0,0 +1,18 @@
Broadcom BCM2835 SDHCI controller
This file documents differences between the core properties described
by mmc.txt and the properties that represent the BCM2835 controller.
Required properties:
- compatible : Should be "brcm,bcm2835-sdhci".
- clocks : The clock feeding the SDHCI controller.
Example:
sdhci: sdhci {
compatible = "brcm,bcm2835-sdhci";
reg = <0x7e300000 0x100>;
interrupts = <2 30>;
clocks = <&clk_mmc>;
bus-width = <4>;
};

View File

@ -0,0 +1,17 @@
* Marvell orion-sdio controller
This file documents differences between the core properties in mmc.txt
and the properties used by the orion-sdio driver.
- compatible: Should be "marvell,orion-sdio"
- clocks: reference to the clock of the SDIO interface
Example:
mvsdio@d00d4000 {
compatible = "marvell,orion-sdio";
reg = <0xd00d4000 0x200>;
interrupts = <54>;
clocks = <&gateclk 17>;
status = "disabled";
};

View File

@ -113,17 +113,6 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
enum mmc_blk_status {
MMC_BLK_SUCCESS = 0,
MMC_BLK_PARTIAL,
MMC_BLK_CMD_ERR,
MMC_BLK_RETRY,
MMC_BLK_ABORT,
MMC_BLK_DATA_ERR,
MMC_BLK_ECC_ERR,
MMC_BLK_NOMEDIUM,
};
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
@ -1364,8 +1353,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
} else
areq = NULL;
areq = mmc_start_req(card->host, areq, (int *) &status);
if (!areq)
if (!areq) {
if (status == MMC_BLK_NEW_REQUEST)
mq->flags |= MMC_QUEUE_NEW_REQUEST;
return 0;
}
mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
brq = &mq_rq->brq;
@ -1438,6 +1430,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
break;
case MMC_BLK_NOMEDIUM:
goto cmd_abort;
default:
pr_err("%s: Unhandled return value (%d)",
req->rq_disk->disk_name, status);
goto cmd_abort;
}
if (ret) {
@ -1472,6 +1468,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
int ret;
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
@ -1486,6 +1484,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto out;
}
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
if (req && req->cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
@ -1501,11 +1500,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
mmc_blk_issue_rw_rq(mq, NULL);
ret = mmc_blk_issue_flush(mq, req);
} else {
if (!req && host->areq) {
spin_lock_irqsave(&host->context_info.lock, flags);
host->context_info.is_waiting_last_req = true;
spin_unlock_irqrestore(&host->context_info.lock, flags);
}
ret = mmc_blk_issue_rw_rq(mq, req);
}
out:
if (!req)
if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST))
/* release host only when there are no more requests */
mmc_release_host(card->host);
return ret;

View File

@ -22,7 +22,8 @@
#define MMC_QUEUE_BOUNCESZ 65536
#define MMC_QUEUE_SUSPENDED (1 << 0)
#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
/*
* Prepare a MMC request. This just filters out odd stuff.
@ -58,6 +59,7 @@ static int mmc_queue_thread(void *d)
do {
struct request *req = NULL;
struct mmc_queue_req *tmp;
unsigned int cmd_flags = 0;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
@ -67,12 +69,23 @@ static int mmc_queue_thread(void *d)
if (req || mq->mqrq_prev->req) {
set_current_state(TASK_RUNNING);
cmd_flags = req ? req->cmd_flags : 0;
mq->issue_fn(mq, req);
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
continue; /* fetch again */
}
/*
* Current request becomes previous request
* and vice versa.
* In case of special requests, current request
* has been finished. Do not assign it to previous
* request.
*/
if (cmd_flags & MMC_REQ_SPECIAL_MASK)
mq->mqrq_cur->req = NULL;
mq->mqrq_prev->brq.mrq.data = NULL;
mq->mqrq_prev->req = NULL;
tmp = mq->mqrq_prev;
@ -103,6 +116,8 @@ static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
unsigned long flags;
struct mmc_context_info *cntx;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
@ -112,7 +127,20 @@ static void mmc_request_fn(struct request_queue *q)
return;
}
if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
cntx = &mq->card->host->context_info;
if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
/*
* New MMC request arrived when MMC thread may be
* blocked on the previous request to be complete
* with no current request fetched
*/
spin_lock_irqsave(&cntx->lock, flags);
if (cntx->is_waiting_last_req) {
cntx->is_new_req = true;
wake_up_interruptible(&cntx->wait);
}
spin_unlock_irqrestore(&cntx->lock, flags);
} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
wake_up_process(mq->thread);
}

View File

@ -27,6 +27,9 @@ struct mmc_queue {
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
#define MMC_QUEUE_SUSPENDED (1 << 0)
#define MMC_QUEUE_NEW_REQUEST (1 << 1)
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;

View File

@ -321,6 +321,7 @@ int mmc_add_card(struct mmc_card *card)
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
mmc_init_context_info(card->host);
ret = device_add(&card->dev);
if (ret)

View File

@ -319,11 +319,44 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
}
EXPORT_SYMBOL(mmc_start_bkops);
/*
* mmc_wait_data_done() - done callback for data request
* @mrq: done data request
*
* Wakes up mmc context, passed as a callback to host controller driver
*/
static void mmc_wait_data_done(struct mmc_request *mrq)
{
mrq->host->context_info.is_done_rcv = true;
wake_up_interruptible(&mrq->host->context_info.wait);
}
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
/*
*__mmc_start_data_req() - starts data request
* @host: MMC host to start the request
* @mrq: data request to start
*
* Sets the done callback to be called when request is completed by the card.
* Starts data mmc request execution
*/
static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
{
mrq->done = mmc_wait_data_done;
mrq->host = host;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
return -ENOMEDIUM;
}
mmc_start_request(host, mrq);
return 0;
}
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
@ -337,6 +370,62 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
/*
* mmc_wait_for_data_req_done() - wait for request completed
* @host: MMC host to prepare the command.
* @mrq: MMC request to wait for
*
* Blocks MMC context till host controller will ack end of data request
* execution or new request notification arrives from the block layer.
* Handles command retries.
*
* Returns enum mmc_blk_status after checking errors.
*/
static int mmc_wait_for_data_req_done(struct mmc_host *host,
struct mmc_request *mrq,
struct mmc_async_req *next_req)
{
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
int err;
unsigned long flags;
while (1) {
wait_event_interruptible(context_info->wait,
(context_info->is_done_rcv ||
context_info->is_new_req));
spin_lock_irqsave(&context_info->lock, flags);
context_info->is_waiting_last_req = false;
spin_unlock_irqrestore(&context_info->lock, flags);
if (context_info->is_done_rcv) {
context_info->is_done_rcv = false;
context_info->is_new_req = false;
cmd = mrq->cmd;
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card)) {
err = host->areq->err_check(host->card,
host->areq);
break; /* return err */
} else {
pr_info("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host),
cmd->opcode, cmd->error);
cmd->retries--;
cmd->error = 0;
host->ops->request(host, mrq);
continue; /* wait for done/new event again */
}
} else if (context_info->is_new_req) {
context_info->is_new_req = false;
if (!next_req) {
err = MMC_BLK_NEW_REQUEST;
break; /* return err */
}
}
}
return err;
}
static void mmc_wait_for_req_done(struct mmc_host *host,
struct mmc_request *mrq)
{
@ -426,8 +515,17 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
mmc_pre_req(host, areq->mrq, !host->areq);
if (host->areq) {
mmc_wait_for_req_done(host, host->areq->mrq);
err = host->areq->err_check(host->card, host->areq);
err = mmc_wait_for_data_req_done(host, host->areq->mrq,
areq);
if (err == MMC_BLK_NEW_REQUEST) {
if (error)
*error = err;
/*
* The previous request was not completed,
* nothing to return
*/
return NULL;
}
/*
* Check BKOPS urgency for each R1 response
*/
@ -439,7 +537,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
}
if (!err && areq)
start_err = __mmc_start_req(host, areq->mrq);
start_err = __mmc_start_data_req(host, areq->mrq);
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
@ -2581,6 +2679,23 @@ int mmc_pm_notify(struct notifier_block *notify_block,
}
#endif
/**
* mmc_init_context_info() - init synchronization context
* @host: mmc host
*
* Init struct context_info needed to implement asynchronous
* request mechanism, used by mmc core, host driver and mmc requests
* supplier.
*/
void mmc_init_context_info(struct mmc_host *host)
{
spin_lock_init(&host->context_info.lock);
host->context_info.is_new_req = false;
host->context_info.is_done_rcv = false;
host->context_info.is_waiting_last_req = false;
init_waitqueue_head(&host->context_info.wait);
}
static int __init mmc_init(void)
{
int ret;

View File

@ -76,5 +76,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);
void mmc_init_context_info(struct mmc_host *host);
#endif

View File

@ -157,10 +157,7 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
if (ret)
goto out;
if (card->host->caps &
(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_DDR50)) {
if (mmc_host_uhs(card->host)) {
if (data & SDIO_UHS_DDR50)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_DDR50;
@ -478,8 +475,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
if (!mmc_host_uhs(card->host))
return 0;
bus_speed = SDIO_SPEED_SDR12;
@ -489,23 +485,27 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
bus_speed = SDIO_SPEED_SDR104;
timing = MMC_TIMING_UHS_SDR104;
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
bus_speed = SDIO_SPEED_DDR50;
timing = MMC_TIMING_UHS_DDR50;
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50)) {
bus_speed = SDIO_SPEED_SDR50;
timing = MMC_TIMING_UHS_SDR50;
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
bus_speed = SDIO_SPEED_SDR25;
timing = MMC_TIMING_UHS_SDR25;
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
@ -513,6 +513,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
bus_speed = SDIO_SPEED_SDR12;
timing = MMC_TIMING_UHS_SDR12;
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
@ -645,11 +646,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
* systems that claim 1.8v signalling in fact do not support
* it.
*/
if ((ocr & R4_18V_PRESENT) &&
(host->caps &
(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_DDR50))) {
if ((ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) {
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
true);
if (err) {
@ -937,10 +934,12 @@ static int mmc_sdio_resume(struct mmc_host *host)
mmc_claim_host(host);
/* No need to reinitialize powered-resumed nonremovable cards */
if (mmc_card_is_removable(host) || !mmc_card_keep_power(host))
if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) {
sdio_reset(host);
mmc_go_idle(host);
err = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
} else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
/* We may have switched to 1-bit mode during suspend */
err = sdio_enable_4bit_bus(host->card);
if (err > 0) {
@ -1020,6 +1019,10 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
goto out;
}
if (mmc_host_uhs(host))
/* to query card if 1.8V signalling is supported */
host->ocr |= R4_18V_PRESENT;
ret = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
@ -1085,6 +1088,10 @@ int mmc_attach_sdio(struct mmc_host *host)
/*
* Detect and init the card.
*/
if (mmc_host_uhs(host))
/* to query card if 1.8V signalling is supported */
host->ocr |= R4_18V_PRESENT;
err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
if (err) {
if (err == -EAGAIN) {

View File

@ -92,6 +92,20 @@ int mmc_gpio_get_cd(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_gpio_get_cd);
/**
* mmc_gpio_request_ro - request a gpio for write-protection
* @host: mmc host
* @gpio: gpio number requested
*
* As devm_* managed functions are used in mmc_gpio_request_ro(), client
* drivers do not need to explicitly call mmc_gpio_free_ro() for freeing up,
* if the requesting and freeing are only needed at probing and unbinding time
* for once. However, if client drivers do something special like runtime
* switching for write-protection, they are responsible for calling
* mmc_gpio_request_ro() and mmc_gpio_free_ro() as a pair on their own.
*
* Returns zero on success, else an error.
*/
int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
{
struct mmc_gpio *ctx;
@ -106,7 +120,8 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
ctx = host->slot.handler_priv;
ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label);
ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
ctx->ro_label);
if (ret < 0)
return ret;
@ -116,6 +131,20 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
}
EXPORT_SYMBOL(mmc_gpio_request_ro);
/**
* mmc_gpio_request_cd - request a gpio for card-detection
* @host: mmc host
* @gpio: gpio number requested
*
* As devm_* managed functions are used in mmc_gpio_request_cd(), client
* drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up,
* if the requesting and freeing are only needed at probing and unbinding time
* for once. However, if client drivers do something special like runtime
* switching for card-detection, they are responsible for calling
* mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own.
*
* Returns zero on success, else an error.
*/
int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
{
struct mmc_gpio *ctx;
@ -128,7 +157,8 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
ctx = host->slot.handler_priv;
ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->cd_label);
ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
ctx->cd_label);
if (ret < 0)
/*
* don't bother freeing memory. It might still get used by other
@ -146,7 +176,8 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
irq = -EINVAL;
if (irq >= 0) {
ret = request_threaded_irq(irq, NULL, mmc_gpio_cd_irqt,
ret = devm_request_threaded_irq(&host->class_dev, irq,
NULL, mmc_gpio_cd_irqt,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
ctx->cd_label, host);
if (ret < 0)
@ -164,6 +195,13 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
}
EXPORT_SYMBOL(mmc_gpio_request_cd);
/**
* mmc_gpio_free_ro - free the write-protection gpio
* @host: mmc host
*
* It's provided only for cases that client drivers need to manually free
* up the write-protection gpio requested by mmc_gpio_request_ro().
*/
void mmc_gpio_free_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
@ -175,10 +213,17 @@ void mmc_gpio_free_ro(struct mmc_host *host)
gpio = ctx->ro_gpio;
ctx->ro_gpio = -EINVAL;
gpio_free(gpio);
devm_gpio_free(&host->class_dev, gpio);
}
EXPORT_SYMBOL(mmc_gpio_free_ro);
/**
* mmc_gpio_free_cd - free the card-detection gpio
* @host: mmc host
*
* It's provided only for cases that client drivers need to manually free
* up the card-detection gpio requested by mmc_gpio_request_cd().
*/
void mmc_gpio_free_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
@ -188,13 +233,13 @@ void mmc_gpio_free_cd(struct mmc_host *host)
return;
if (host->slot.cd_irq >= 0) {
free_irq(host->slot.cd_irq, host);
devm_free_irq(&host->class_dev, host->slot.cd_irq, host);
host->slot.cd_irq = -EINVAL;
}
gpio = ctx->cd_gpio;
ctx->cd_gpio = -EINVAL;
gpio_free(gpio);
devm_gpio_free(&host->class_dev, gpio);
}
EXPORT_SYMBOL(mmc_gpio_free_cd);

View File

@ -241,6 +241,17 @@ config MMC_SDHCI_S3C_DMA
YMMV.
config MMC_SDHCI_BCM2835
tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
depends on ARCH_BCM2835
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
This selects the BCM2835 SD/MMC controller. If you have a BCM2835
platform with SD or MMC devices, say Y or M here.
If unsure, say N.
config MMC_OMAP
tristate "TI OMAP Multimedia Card Interface support"
depends on ARCH_OMAP

View File

@ -58,6 +58,7 @@ obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG

View File

@ -21,7 +21,11 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/pinctrl/consumer.h>
#include <asm/sizes.h>
#include <asm/unaligned.h>
@ -51,8 +55,6 @@ struct mvsd_host {
struct mmc_host *mmc;
struct device *dev;
struct clk *clk;
int gpio_card_detect;
int gpio_write_protect;
};
#define mvsd_write(offs, val) writel(val, iobase + (offs))
@ -538,13 +540,6 @@ static void mvsd_timeout_timer(unsigned long data)
mmc_request_done(host->mmc, mrq);
}
static irqreturn_t mvsd_card_detect_irq(int irq, void *dev)
{
struct mvsd_host *host = dev;
mmc_detect_change(host->mmc, msecs_to_jiffies(100));
return IRQ_HANDLED;
}
static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct mvsd_host *host = mmc_priv(mmc);
@ -564,20 +559,6 @@ static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_unlock_irqrestore(&host->lock, flags);
}
static int mvsd_get_ro(struct mmc_host *mmc)
{
struct mvsd_host *host = mmc_priv(mmc);
if (host->gpio_write_protect)
return gpio_get_value(host->gpio_write_protect);
/*
* Board doesn't support read only detection; let the mmc core
* decide what to do.
*/
return -ENOSYS;
}
static void mvsd_power_up(struct mvsd_host *host)
{
void __iomem *iobase = host->base;
@ -674,7 +655,7 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static const struct mmc_host_ops mvsd_ops = {
.request = mvsd_request,
.get_ro = mvsd_get_ro,
.get_ro = mmc_gpio_get_ro,
.set_ios = mvsd_set_ios,
.enable_sdio_irq = mvsd_enable_sdio_irq,
};
@ -703,17 +684,18 @@ mv_conf_mbus_windows(struct mvsd_host *host,
static int __init mvsd_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mvsdio_platform_data *mvsd_data;
const struct mbus_dram_target_info *dram;
struct resource *r;
int ret, irq;
int gpio_card_detect, gpio_write_protect;
struct pinctrl *pinctrl;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
mvsd_data = pdev->dev.platform_data;
if (!r || irq < 0 || !mvsd_data)
if (!r || irq < 0)
return -ENXIO;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
@ -725,8 +707,43 @@ static int __init mvsd_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
host->dev = &pdev->dev;
host->base_clock = mvsd_data->clock / 2;
host->clk = ERR_PTR(-EINVAL);
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl))
dev_warn(&pdev->dev, "no pins associated\n");
/*
* Some non-DT platforms do not pass a clock, and the clock
* frequency is passed through platform_data. On DT platforms,
* a clock must always be passed, even if there is no gatable
* clock associated to the SDIO interface (it can simply be a
* fixed rate clock).
*/
host->clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(host->clk))
clk_prepare_enable(host->clk);
if (np) {
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "DT platforms must have a clock associated\n");
ret = -EINVAL;
goto out;
}
host->base_clock = clk_get_rate(host->clk) / 2;
gpio_card_detect = of_get_named_gpio(np, "cd-gpios", 0);
gpio_write_protect = of_get_named_gpio(np, "wp-gpios", 0);
} else {
const struct mvsdio_platform_data *mvsd_data;
mvsd_data = pdev->dev.platform_data;
if (!mvsd_data) {
ret = -ENXIO;
goto out;
}
host->base_clock = mvsd_data->clock / 2;
gpio_card_detect = mvsd_data->gpio_card_detect;
gpio_write_protect = mvsd_data->gpio_write_protect;
}
mmc->ops = &mvsd_ops;
@ -765,43 +782,14 @@ static int __init mvsd_probe(struct platform_device *pdev)
goto out;
}
/* Not all platforms can gate the clock, so it is not
an error if the clock does not exists. */
host->clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(host->clk))
clk_prepare_enable(host->clk);
if (mvsd_data->gpio_card_detect) {
ret = devm_gpio_request_one(&pdev->dev,
mvsd_data->gpio_card_detect,
GPIOF_IN, DRIVER_NAME " cd");
if (ret == 0) {
irq = gpio_to_irq(mvsd_data->gpio_card_detect);
ret = devm_request_irq(&pdev->dev, irq,
mvsd_card_detect_irq,
IRQ_TYPE_EDGE_RISING |
IRQ_TYPE_EDGE_FALLING,
DRIVER_NAME " cd", host);
if (ret == 0)
host->gpio_card_detect =
mvsd_data->gpio_card_detect;
else
devm_gpio_free(&pdev->dev,
mvsd_data->gpio_card_detect);
}
}
if (!host->gpio_card_detect)
if (gpio_is_valid(gpio_card_detect)) {
ret = mmc_gpio_request_cd(mmc, gpio_card_detect);
if (ret)
goto out;
} else
mmc->caps |= MMC_CAP_NEEDS_POLL;
if (mvsd_data->gpio_write_protect) {
ret = devm_gpio_request_one(&pdev->dev,
mvsd_data->gpio_write_protect,
GPIOF_IN, DRIVER_NAME " wp");
if (ret == 0) {
host->gpio_write_protect =
mvsd_data->gpio_write_protect;
}
}
mmc_gpio_request_ro(mmc, gpio_write_protect);
setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
platform_set_drvdata(pdev, mmc);
@ -811,15 +799,17 @@ static int __init mvsd_probe(struct platform_device *pdev)
pr_notice("%s: %s driver initialized, ",
mmc_hostname(mmc), DRIVER_NAME);
if (host->gpio_card_detect)
if (!(mmc->caps & MMC_CAP_NEEDS_POLL))
printk("using GPIO %d for card detection\n",
host->gpio_card_detect);
gpio_card_detect);
else
printk("lacking card detect (fall back to polling)\n");
return 0;
out:
if (mmc) {
mmc_gpio_free_cd(mmc);
mmc_gpio_free_ro(mmc);
if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
@ -834,6 +824,8 @@ static int __exit mvsd_remove(struct platform_device *pdev)
struct mvsd_host *host = mmc_priv(mmc);
mmc_gpio_free_cd(mmc);
mmc_gpio_free_ro(mmc);
mmc_remove_host(mmc);
del_timer_sync(&host->timer);
mvsd_power_down(host);
@ -873,12 +865,19 @@ static int mvsd_resume(struct platform_device *dev)
#define mvsd_resume NULL
#endif
static const struct of_device_id mvsdio_dt_ids[] = {
{ .compatible = "marvell,orion-sdio" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
static struct platform_driver mvsd_driver = {
.remove = __exit_p(mvsd_remove),
.suspend = mvsd_suspend,
.resume = mvsd_resume,
.driver = {
.name = DRIVER_NAME,
.of_match_table = mvsdio_dt_ids,
},
};

View File

@ -0,0 +1,227 @@
/*
* BCM2835 SDHCI
* Copyright (C) 2012 Stephen Warren
* Based on U-Boot's MMC driver for the BCM2835 by Oleksandr Tymoshenko & me
* Portions of the code there were obviously based on the Linux kernel at:
* git://github.com/raspberrypi/linux.git rpi-3.6.y
* commit f5b930b "Main bcm2708 linux port" signed-off-by Dom Cobley.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mmc/host.h>
#include "sdhci-pltfm.h"
/*
* 400KHz is max freq for card ID etc. Use that as min card clock. We need to
* know the min to enable static calculation of max BCM2835_SDHCI_WRITE_DELAY.
*/
#define MIN_FREQ 400000
/*
* The Arasan has a bugette whereby it may lose the content of successive
* writes to registers that are within two SD-card clock cycles of each other
* (a clock domain crossing problem). It seems, however, that the data
* register does not have this problem, which is just as well - otherwise we'd
* have to nobble the DMA engine too.
*
* This should probably be dynamically calculated based on the actual card
* frequency. However, this is the longest we'll have to wait, and doesn't
* seem to slow access down too much, so the added complexity doesn't seem
* worth it for now.
*
* 1/MIN_FREQ is (max) time per tick of eMMC clock.
* 2/MIN_FREQ is time for two ticks.
* Multiply by 1000000 to get uS per two ticks.
* *1000000 for uSecs.
* +1 for hack rounding.
*/
#define BCM2835_SDHCI_WRITE_DELAY (((2 * 1000000) / MIN_FREQ) + 1)
struct bcm2835_sdhci {
struct clk *clk;
u32 shadow;
};
static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
{
writel(val, host->ioaddr + reg);
udelay(BCM2835_SDHCI_WRITE_DELAY);
}
static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg)
{
u32 val = readl(host->ioaddr + reg);
if (reg == SDHCI_CAPABILITIES)
val |= SDHCI_CAN_VDD_330;
return val;
}
static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow :
bcm2835_sdhci_readl(host, reg & ~3);
u32 word_num = (reg >> 1) & 1;
u32 word_shift = word_num * 16;
u32 mask = 0xffff << word_shift;
u32 newval = (oldval & ~mask) | (val << word_shift);
if (reg == SDHCI_TRANSFER_MODE)
bcm2835_host->shadow = newval;
else
bcm2835_sdhci_writel(host, newval, reg & ~3);
}
static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg)
{
u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
u32 word_num = (reg >> 1) & 1;
u32 word_shift = word_num * 16;
u32 word = (val >> word_shift) & 0xffff;
return word;
}
static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
{
u32 oldval = bcm2835_sdhci_readl(host, reg & ~3);
u32 byte_num = reg & 3;
u32 byte_shift = byte_num * 8;
u32 mask = 0xff << byte_shift;
u32 newval = (oldval & ~mask) | (val << byte_shift);
bcm2835_sdhci_writel(host, newval, reg & ~3);
}
static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg)
{
u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
u32 byte_num = reg & 3;
u32 byte_shift = byte_num * 8;
u32 byte = (val >> byte_shift) & 0xff;
return byte;
}
static unsigned int bcm2835_sdhci_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
return clk_get_rate(bcm2835_host->clk);
}
unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
{
return MIN_FREQ;
}
unsigned int bcm2835_sdhci_get_timeout_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
return clk_get_rate(bcm2835_host->clk);
}
static struct sdhci_ops bcm2835_sdhci_ops = {
.write_l = bcm2835_sdhci_writel,
.write_w = bcm2835_sdhci_writew,
.write_b = bcm2835_sdhci_writeb,
.read_l = bcm2835_sdhci_readl,
.read_w = bcm2835_sdhci_readw,
.read_b = bcm2835_sdhci_readb,
.get_max_clock = bcm2835_sdhci_get_max_clock,
.get_min_clock = bcm2835_sdhci_get_min_clock,
.get_timeout_clock = bcm2835_sdhci_get_timeout_clock,
};
static struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
.ops = &bcm2835_sdhci_ops,
};
static int bcm2835_sdhci_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct bcm2835_sdhci *bcm2835_host;
struct sdhci_pltfm_host *pltfm_host;
int ret;
host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata);
if (IS_ERR(host))
return PTR_ERR(host);
bcm2835_host = devm_kzalloc(&pdev->dev, sizeof(*bcm2835_host),
GFP_KERNEL);
if (!bcm2835_host) {
dev_err(mmc_dev(host->mmc),
"failed to allocate bcm2835_sdhci\n");
return -ENOMEM;
}
pltfm_host = sdhci_priv(host);
pltfm_host->priv = bcm2835_host;
bcm2835_host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bcm2835_host->clk)) {
ret = PTR_ERR(bcm2835_host->clk);
goto err;
}
return sdhci_add_host(host);
err:
sdhci_pltfm_free(pdev);
return ret;
}
static int bcm2835_sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
sdhci_remove_host(host, dead);
sdhci_pltfm_free(pdev);
return 0;
}
static const struct of_device_id bcm2835_sdhci_of_match[] = {
{ .compatible = "brcm,bcm2835-sdhci" },
{ }
};
MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match);
static struct platform_driver bcm2835_sdhci_driver = {
.driver = {
.name = "sdhci-bcm2835",
.owner = THIS_MODULE,
.of_match_table = bcm2835_sdhci_of_match,
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = bcm2835_sdhci_probe,
.remove = bcm2835_sdhci_remove,
};
module_platform_driver(bcm2835_sdhci_driver);
MODULE_DESCRIPTION("BCM2835 SDHCI driver");
MODULE_AUTHOR("Stephen Warren");
MODULE_LICENSE("GPL v2");

View File

@ -21,6 +21,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@ -147,19 +148,8 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
struct pltfm_imx_data *imx_data = pltfm_host->priv;
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
/* fake CARD_PRESENT flag */
u32 val = readl(host->ioaddr + reg);
if (unlikely((reg == SDHCI_PRESENT_STATE)
&& gpio_is_valid(boarddata->cd_gpio))) {
if (gpio_get_value(boarddata->cd_gpio))
/* no card, if a valid gpio says so... */
val &= ~SDHCI_CARD_PRESENT;
else
/* ... in all other cases assume card is present */
val |= SDHCI_CARD_PRESENT;
}
if (unlikely(reg == SDHCI_CAPABILITIES)) {
/* In FSL esdhc IC module, only bit20 is used to indicate the
* ADMA2 capability of esdhc, but this bit is messed up on
@ -192,13 +182,6 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
u32 data;
if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
if (boarddata->cd_type == ESDHC_CD_GPIO)
/*
* These interrupts won't work with a custom
* card_detect gpio (only applied to mx25/35)
*/
val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
if (val & SDHCI_INT_CARD_INT) {
/*
* Clear and then set D3CD bit to avoid missing the
@ -362,8 +345,7 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
switch (boarddata->wp_type) {
case ESDHC_WP_GPIO:
if (gpio_is_valid(boarddata->wp_gpio))
return gpio_get_value(boarddata->wp_gpio);
return mmc_gpio_get_ro(host->mmc);
case ESDHC_WP_CONTROLLER:
return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
SDHCI_WRITE_PROTECT);
@ -394,14 +376,6 @@ static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
.ops = &sdhci_esdhc_ops,
};
static irqreturn_t cd_irq(int irq, void *data)
{
struct sdhci_host *sdhost = (struct sdhci_host *)data;
tasklet_schedule(&sdhost->card_tasklet);
return IRQ_HANDLED;
};
#ifdef CONFIG_OF
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
@ -527,37 +501,22 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
/* write_protect */
if (boarddata->wp_type == ESDHC_WP_GPIO) {
err = devm_gpio_request_one(&pdev->dev, boarddata->wp_gpio,
GPIOF_IN, "ESDHC_WP");
err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
if (err) {
dev_warn(mmc_dev(host->mmc),
"no write-protect pin available!\n");
boarddata->wp_gpio = -EINVAL;
dev_err(mmc_dev(host->mmc),
"failed to request write-protect gpio!\n");
goto disable_clk;
}
} else {
boarddata->wp_gpio = -EINVAL;
host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
}
/* card_detect */
if (boarddata->cd_type != ESDHC_CD_GPIO)
boarddata->cd_gpio = -EINVAL;
switch (boarddata->cd_type) {
case ESDHC_CD_GPIO:
err = devm_gpio_request_one(&pdev->dev, boarddata->cd_gpio,
GPIOF_IN, "ESDHC_CD");
err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio);
if (err) {
dev_err(mmc_dev(host->mmc),
"no card-detect pin available!\n");
goto disable_clk;
}
err = devm_request_irq(&pdev->dev,
gpio_to_irq(boarddata->cd_gpio), cd_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
mmc_hostname(host->mmc), host);
if (err) {
dev_err(mmc_dev(host->mmc), "request irq error\n");
"failed to request card-detect gpio!\n");
goto disable_clk;
}
/* fall through */

View File

@ -311,12 +311,18 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
if (pdata->pm_caps & MMC_PM_KEEP_POWER) {
device_init_wakeup(&pdev->dev, 1);
host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ;
} else {
device_init_wakeup(&pdev->dev, 0);
}
return 0;
err_add_host:
clk_disable_unprepare(clk);
clk_put(clk);
mmc_gpio_free_cd(host->mmc);
err_cd_req:
err_clk_get:
sdhci_pltfm_free(pdev);
@ -329,16 +335,12 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_pxa *pxa = pltfm_host->priv;
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
sdhci_remove_host(host, 1);
clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
if (gpio_is_valid(pdata->ext_cd_gpio))
mmc_gpio_free_cd(host->mmc);
sdhci_pltfm_free(pdev);
kfree(pxa);

View File

@ -1189,6 +1189,15 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
host->clock = clock;
}
static inline void sdhci_update_clock(struct sdhci_host *host)
{
unsigned int clock;
clock = host->clock;
host->clock = 0;
sdhci_set_clock(host, clock);
}
static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
u8 pwr = 0;
@ -1258,7 +1267,7 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
bool present;
int present;
unsigned long flags;
u32 tuning_opcode;
@ -1287,18 +1296,21 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
/* If polling, assume that the card is always present. */
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
present = true;
else
present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
SDHCI_CARD_PRESENT;
/* If we're using a cd-gpio, testing the presence bit might fail. */
if (!present) {
int ret = mmc_gpio_get_cd(host->mmc);
if (ret > 0)
present = true;
/*
* Firstly check card presence from cd-gpio. The return could
* be one of the following possibilities:
* negative: cd-gpio is not available
* zero: cd-gpio is used, and card is removed
* one: cd-gpio is used, and card is present
*/
present = mmc_gpio_get_cd(host->mmc);
if (present < 0) {
/* If polling, assume that the card is always present. */
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
present = 1;
else
present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
SDHCI_CARD_PRESENT;
}
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
@ -1415,7 +1427,6 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
if (host->version >= SDHCI_SPEC_300) {
u16 clk, ctrl_2;
unsigned int clock;
/* In case of UHS-I modes, set High Speed Enable */
if ((ios->timing == MMC_TIMING_MMC_HS200) ||
@ -1455,9 +1466,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/* Re-enable SD Clock */
clock = host->clock;
host->clock = 0;
sdhci_set_clock(host, clock);
sdhci_update_clock(host);
}
@ -1488,9 +1497,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
}
/* Re-enable SD Clock */
clock = host->clock;
host->clock = 0;
sdhci_set_clock(host, clock);
sdhci_update_clock(host);
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
@ -2080,14 +2087,9 @@ static void sdhci_tasklet_finish(unsigned long param)
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
unsigned int clock;
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
clock = host->clock;
host->clock = 0;
sdhci_set_clock(host, clock);
}
sdhci_update_clock(host);
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
@ -2455,6 +2457,32 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
\*****************************************************************************/
#ifdef CONFIG_PM
void sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
u8 val;
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
| SDHCI_WAKE_ON_INT;
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val |= mask ;
/* Avoid fake wake up */
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
u8 val;
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
| SDHCI_WAKE_ON_INT;
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val &= ~mask;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
int sdhci_suspend_host(struct sdhci_host *host)
{
@ -2484,8 +2512,13 @@ int sdhci_suspend_host(struct sdhci_host *host)
return ret;
}
free_irq(host->irq, host);
if (!device_may_wakeup(mmc_dev(host->mmc))) {
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
} else {
sdhci_enable_irq_wakeups(host);
enable_irq_wake(host->irq);
}
return ret;
}
@ -2500,10 +2533,15 @@ int sdhci_resume_host(struct sdhci_host *host)
host->ops->enable_dma(host);
}
ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
mmc_hostname(host->mmc), host);
if (ret)
return ret;
if (!device_may_wakeup(mmc_dev(host->mmc))) {
ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
mmc_hostname(host->mmc), host);
if (ret)
return ret;
} else {
sdhci_disable_irq_wakeups(host);
disable_irq_wake(host->irq);
}
if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
(host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
@ -2531,17 +2569,6 @@ int sdhci_resume_host(struct sdhci_host *host)
}
EXPORT_SYMBOL_GPL(sdhci_resume_host);
void sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
u8 val;
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val |= SDHCI_WAKE_ON_INT;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
@ -3139,6 +3166,7 @@ int sdhci_add_host(struct sdhci_host *host)
#ifdef SDHCI_USE_LEDS_CLASS
reset:
sdhci_reset(host, SDHCI_RESET_ALL);
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
#endif
untasklet:
@ -3181,6 +3209,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
if (!dead)
sdhci_reset(host, SDHCI_RESET_ALL);
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
del_timer_sync(&host->timer);

View File

@ -56,6 +56,7 @@
#include <linux/mmc/sh_mmcif.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
@ -88,6 +89,7 @@
#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
#define CMD_SET_CCSH (1 << 5)
#define CMD_SET_DARS (1 << 2) /* Dual Data Rate */
#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
@ -127,6 +129,10 @@
INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
INT_RDATTO | INT_RBSYTO | INT_RSPTO)
#define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
/* CE_INT_MASK */
#define MASK_ALL 0x00000000
#define MASK_MCCSDE (1 << 29)
@ -158,6 +164,11 @@
MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
MASK_MBUFREN | MASK_MBUFWEN | \
MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
MASK_MCMD12RBE | MASK_MCMD12CRE)
/* CE_HOST_STS1 */
#define STS1_CMDSEQ (1 << 31)
@ -195,6 +206,7 @@ enum mmcif_state {
STATE_IDLE,
STATE_REQUEST,
STATE_IOS,
STATE_TIMEOUT,
};
enum mmcif_wait_for {
@ -216,6 +228,7 @@ struct sh_mmcif_host {
struct clk *hclk;
unsigned int clk;
int bus_width;
unsigned char timing;
bool sd_error;
bool dying;
long timeout;
@ -230,6 +243,7 @@ struct sh_mmcif_host {
int sg_blkidx;
bool power;
bool card_present;
struct mutex thread_lock;
/* DMA support */
struct dma_chan *chan_rx;
@ -253,23 +267,14 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
static void mmcif_dma_complete(void *arg)
{
struct sh_mmcif_host *host = arg;
struct mmc_data *data = host->mrq->data;
struct mmc_request *mrq = host->mrq;
dev_dbg(&host->pd->dev, "Command completed\n");
if (WARN(!data, "%s: NULL data in DMA completion!\n",
if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
dev_name(&host->pd->dev)))
return;
if (data->flags & MMC_DATA_READ)
dma_unmap_sg(host->chan_rx->device->dev,
data->sg, data->sg_len,
DMA_FROM_DEVICE);
else
dma_unmap_sg(host->chan_tx->device->dev,
data->sg, data->sg_len,
DMA_TO_DEVICE);
complete(&host->dma_complete);
}
@ -423,8 +428,6 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
if (ret < 0)
goto ecfgrx;
init_completion(&host->dma_complete);
return;
ecfgrx:
@ -520,13 +523,16 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
}
if (state2 & STS2_CRC_ERR) {
dev_dbg(&host->pd->dev, ": CRC error\n");
dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
host->state, host->wait_for);
ret = -EIO;
} else if (state2 & STS2_TIMEOUT_ERR) {
dev_dbg(&host->pd->dev, ": Timeout\n");
dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
host->state, host->wait_for);
ret = -ETIMEDOUT;
} else {
dev_dbg(&host->pd->dev, ": End/Index error\n");
dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
host->state, host->wait_for);
ret = -EIO;
}
return ret;
@ -549,10 +555,7 @@ static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
host->pio_ptr = p;
}
if (host->sg_idx == data->sg_len)
return false;
return true;
return host->sg_idx != data->sg_len;
}
static void sh_mmcif_single_read(struct sh_mmcif_host *host,
@ -562,7 +565,6 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
BLOCK_SIZE_MASK) + 3;
host->wait_for = MMCIF_WAIT_FOR_READ;
schedule_delayed_work(&host->timeout_work, host->timeout);
/* buf read enable */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
@ -576,6 +578,7 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@ -604,7 +607,7 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
host->sg_idx = 0;
host->sg_blkidx = 0;
host->pio_ptr = sg_virt(data->sg);
schedule_delayed_work(&host->timeout_work, host->timeout);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
}
@ -616,6 +619,7 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@ -627,7 +631,6 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
if (!sh_mmcif_next_block(host, p))
return false;
schedule_delayed_work(&host->timeout_work, host->timeout);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
return true;
@ -640,7 +643,6 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
BLOCK_SIZE_MASK) + 3;
host->wait_for = MMCIF_WAIT_FOR_WRITE;
schedule_delayed_work(&host->timeout_work, host->timeout);
/* buf write enable */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
@ -654,6 +656,7 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@ -682,7 +685,7 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
host->sg_idx = 0;
host->sg_blkidx = 0;
host->pio_ptr = sg_virt(data->sg);
schedule_delayed_work(&host->timeout_work, host->timeout);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}
@ -694,6 +697,7 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@ -705,7 +709,6 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
if (!sh_mmcif_next_block(host, p))
return false;
schedule_delayed_work(&host->timeout_work, host->timeout);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
return true;
@ -756,6 +759,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
}
switch (opc) {
/* RBSY */
case MMC_SLEEP_AWAKE:
case MMC_SWITCH:
case MMC_STOP_TRANSMISSION:
case MMC_SET_WRITE_PROT:
@ -781,6 +785,17 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
dev_err(&host->pd->dev, "Unsupported bus width.\n");
break;
}
switch (host->timing) {
case MMC_TIMING_UHS_DDR50:
/*
* MMC core will only set this timing, if the host
* advertises the MMC_CAP_UHS_DDR50 capability. MMCIF
* implementations with this capability, e.g. sh73a0,
* will have to set it in their platform data.
*/
tmp |= CMD_SET_DARS;
break;
}
}
/* DWEN */
if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
@ -824,7 +839,7 @@ static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
sh_mmcif_single_read(host, mrq);
return 0;
default:
dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
return -EINVAL;
}
}
@ -838,6 +853,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
switch (opc) {
/* response busy check */
case MMC_SLEEP_AWAKE:
case MMC_SWITCH:
case MMC_STOP_TRANSMISSION:
case MMC_SET_WRITE_PROT:
@ -885,7 +901,6 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
}
host->wait_for = MMCIF_WAIT_FOR_STOP;
schedule_delayed_work(&host->timeout_work, host->timeout);
}
static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@ -895,6 +910,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_lock_irqsave(&host->lock, flags);
if (host->state != STATE_IDLE) {
dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
spin_unlock_irqrestore(&host->lock, flags);
mrq->cmd->error = -EAGAIN;
mmc_request_done(mmc, mrq);
@ -911,6 +927,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
break;
case MMC_APP_CMD:
case SD_IO_RW_DIRECT:
host->state = STATE_IDLE;
mrq->cmd->error = -ETIMEDOUT;
mmc_request_done(mmc, mrq);
@ -957,6 +974,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_irqsave(&host->lock, flags);
if (host->state != STATE_IDLE) {
dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
spin_unlock_irqrestore(&host->lock, flags);
return;
}
@ -981,7 +999,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
if (host->power) {
pm_runtime_put(&host->pd->dev);
pm_runtime_put_sync(&host->pd->dev);
clk_disable(host->hclk);
host->power = false;
if (ios->power_mode == MMC_POWER_OFF)
@ -1001,6 +1019,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sh_mmcif_clock_control(host, ios->clock);
}
host->timing = ios->timing;
host->bus_width = ios->bus_width;
host->state = STATE_IDLE;
}
@ -1038,14 +1057,14 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
case MMC_SELECT_CARD:
case MMC_APP_CMD:
cmd->error = -ETIMEDOUT;
host->sd_error = false;
break;
default:
cmd->error = sh_mmcif_error_manage(host);
dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
cmd->opcode, cmd->error);
break;
}
dev_dbg(&host->pd->dev, "CMD%d error %d\n",
cmd->opcode, cmd->error);
host->sd_error = false;
return false;
}
if (!(cmd->flags & MMC_RSP_PRESENT)) {
@ -1058,6 +1077,12 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
if (!data)
return false;
/*
* Completion can be signalled from DMA callback and error, so, have to
* reset here, before setting .dma_active
*/
init_completion(&host->dma_complete);
if (data->flags & MMC_DATA_READ) {
if (host->chan_rx)
sh_mmcif_start_dma_rx(host);
@ -1068,34 +1093,47 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
if (!host->dma_active) {
data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
if (!data->error)
return true;
return false;
return !data->error;
}
/* Running in the IRQ thread, can sleep */
time = wait_for_completion_interruptible_timeout(&host->dma_complete,
host->timeout);
if (data->flags & MMC_DATA_READ)
dma_unmap_sg(host->chan_rx->device->dev,
data->sg, data->sg_len,
DMA_FROM_DEVICE);
else
dma_unmap_sg(host->chan_tx->device->dev,
data->sg, data->sg_len,
DMA_TO_DEVICE);
if (host->sd_error) {
dev_err(host->mmc->parent,
"Error IRQ while waiting for DMA completion!\n");
/* Woken up by an error IRQ: abort DMA */
if (data->flags & MMC_DATA_READ)
dmaengine_terminate_all(host->chan_rx);
else
dmaengine_terminate_all(host->chan_tx);
data->error = sh_mmcif_error_manage(host);
} else if (!time) {
dev_err(host->mmc->parent, "DMA timeout!\n");
data->error = -ETIMEDOUT;
} else if (time < 0) {
dev_err(host->mmc->parent,
"wait_for_completion_...() error %ld!\n", time);
data->error = time;
}
sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
host->dma_active = false;
if (data->error)
if (data->error) {
data->bytes_xfered = 0;
/* Abort DMA */
if (data->flags & MMC_DATA_READ)
dmaengine_terminate_all(host->chan_rx);
else
dmaengine_terminate_all(host->chan_tx);
}
return false;
}
@ -1103,10 +1141,21 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
struct mmc_request *mrq = host->mrq;
struct mmc_request *mrq;
bool wait = false;
cancel_delayed_work_sync(&host->timeout_work);
mutex_lock(&host->thread_lock);
mrq = host->mrq;
if (!mrq) {
dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
host->state, host->wait_for);
mutex_unlock(&host->thread_lock);
return IRQ_HANDLED;
}
/*
* All handlers return true, if processing continues, and false, if the
* request has to be completed - successfully or not
@ -1114,35 +1163,32 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
switch (host->wait_for) {
case MMCIF_WAIT_FOR_REQUEST:
/* We're too late, the timeout has already kicked in */
mutex_unlock(&host->thread_lock);
return IRQ_HANDLED;
case MMCIF_WAIT_FOR_CMD:
if (sh_mmcif_end_cmd(host))
/* Wait for data */
return IRQ_HANDLED;
/* Wait for data? */
wait = sh_mmcif_end_cmd(host);
break;
case MMCIF_WAIT_FOR_MREAD:
if (sh_mmcif_mread_block(host))
/* Wait for more data */
return IRQ_HANDLED;
/* Wait for more data? */
wait = sh_mmcif_mread_block(host);
break;
case MMCIF_WAIT_FOR_READ:
if (sh_mmcif_read_block(host))
/* Wait for data end */
return IRQ_HANDLED;
/* Wait for data end? */
wait = sh_mmcif_read_block(host);
break;
case MMCIF_WAIT_FOR_MWRITE:
if (sh_mmcif_mwrite_block(host))
/* Wait data to write */
return IRQ_HANDLED;
/* Wait data to write? */
wait = sh_mmcif_mwrite_block(host);
break;
case MMCIF_WAIT_FOR_WRITE:
if (sh_mmcif_write_block(host))
/* Wait for data end */
return IRQ_HANDLED;
/* Wait for data end? */
wait = sh_mmcif_write_block(host);
break;
case MMCIF_WAIT_FOR_STOP:
if (host->sd_error) {
mrq->stop->error = sh_mmcif_error_manage(host);
dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
break;
}
sh_mmcif_get_cmd12response(host, mrq->stop);
@ -1150,13 +1196,22 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
break;
case MMCIF_WAIT_FOR_READ_END:
case MMCIF_WAIT_FOR_WRITE_END:
if (host->sd_error)
if (host->sd_error) {
mrq->data->error = sh_mmcif_error_manage(host);
dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
}
break;
default:
BUG();
}
if (wait) {
schedule_delayed_work(&host->timeout_work, host->timeout);
/* Wait for more data */
mutex_unlock(&host->thread_lock);
return IRQ_HANDLED;
}
if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
struct mmc_data *data = mrq->data;
if (!mrq->cmd->error && data && !data->error)
@ -1165,8 +1220,11 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
sh_mmcif_stop_cmd(host, mrq);
if (!mrq->stop->error)
if (!mrq->stop->error) {
schedule_delayed_work(&host->timeout_work, host->timeout);
mutex_unlock(&host->thread_lock);
return IRQ_HANDLED;
}
}
}
@ -1175,6 +1233,8 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
mutex_unlock(&host->thread_lock);
return IRQ_HANDLED;
}
@ -1182,56 +1242,22 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
u32 state;
int err = 0;
state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
if (state & INT_ERR_STS) {
/* error interrupts - process first */
sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
err = 1;
} else if (state & INT_RBSYE) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT,
~(INT_RBSYE | INT_CRSPE));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
} else if (state & INT_CRSPE) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
} else if (state & INT_BUFREN) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
} else if (state & INT_BUFWEN) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
} else if (state & INT_CMD12DRE) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT,
~(INT_CMD12DRE | INT_CMD12RBE |
INT_CMD12CRE | INT_BUFRE));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
} else if (state & INT_BUFRE) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
} else if (state & INT_DTRANE) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT,
~(INT_CMD12DRE | INT_CMD12RBE |
INT_CMD12CRE | INT_DTRANE));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
} else if (state & INT_CMD12RBE) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT,
~(INT_CMD12RBE | INT_CMD12CRE));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
} else {
dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
err = 1;
}
if (err) {
if (state & ~MASK_CLEAN)
dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
state);
if (state & INT_ERR_STS || state & ~INT_ALL) {
host->sd_error = true;
dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
}
if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
if (!host->mrq)
dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
if (!host->dma_active)
return IRQ_WAKE_THREAD;
else if (host->sd_error)
@ -1248,11 +1274,24 @@ static void mmcif_timeout_work(struct work_struct *work)
struct delayed_work *d = container_of(work, struct delayed_work, work);
struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
struct mmc_request *mrq = host->mrq;
unsigned long flags;
if (host->dying)
/* Don't run after mmc_remove_host() */
return;
dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
host->wait_for, mrq->cmd->opcode);
spin_lock_irqsave(&host->lock, flags);
if (host->state == STATE_IDLE) {
spin_unlock_irqrestore(&host->lock, flags);
return;
}
host->state = STATE_TIMEOUT;
spin_unlock_irqrestore(&host->lock, flags);
/*
* Handle races with cancel_delayed_work(), unless
* cancel_delayed_work_sync() is used
@ -1306,10 +1345,11 @@ static int sh_mmcif_probe(struct platform_device *pdev)
struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
struct resource *res;
void __iomem *reg;
const char *name;
irq[0] = platform_get_irq(pdev, 0);
irq[1] = platform_get_irq(pdev, 1);
if (irq[0] < 0 || irq[1] < 0) {
if (irq[0] < 0) {
dev_err(&pdev->dev, "Get irq error\n");
return -ENXIO;
}
@ -1332,7 +1372,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
host->addr = reg;
host->timeout = 1000;
host->timeout = msecs_to_jiffies(1000);
host->pd = pdev;
@ -1341,7 +1381,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
mmc->ops = &sh_mmcif_ops;
sh_mmcif_init_ocr(host);
mmc->caps = MMC_CAP_MMC_HIGHSPEED;
mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
if (pd && pd->caps)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
@ -1374,15 +1414,19 @@ static int sh_mmcif_probe(struct platform_device *pdev)
sh_mmcif_sync_reset(host);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host);
if (ret) {
dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
dev_err(&pdev->dev, "request_irq error (%s)\n", name);
goto ereqirq0;
}
ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
if (ret) {
dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
goto ereqirq1;
if (irq[1] >= 0) {
ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt,
0, "sh_mmc:int", host);
if (ret) {
dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
goto ereqirq1;
}
}
if (pd && pd->use_cd_gpio) {
@ -1391,6 +1435,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
goto erqcd;
}
mutex_init(&host->thread_lock);
clk_disable(host->hclk);
ret = mmc_add_host(mmc);
if (ret < 0)
@ -1404,10 +1450,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
return ret;
emmcaddh:
if (pd && pd->use_cd_gpio)
mmc_gpio_free_cd(mmc);
erqcd:
free_irq(irq[1], host);
if (irq[1] >= 0)
free_irq(irq[1], host);
ereqirq1:
free_irq(irq[0], host);
ereqirq0:
@ -1427,7 +1472,6 @@ static int sh_mmcif_probe(struct platform_device *pdev)
static int sh_mmcif_remove(struct platform_device *pdev)
{
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
int irq[2];
host->dying = true;
@ -1436,9 +1480,6 @@ static int sh_mmcif_remove(struct platform_device *pdev)
dev_pm_qos_hide_latency_limit(&pdev->dev);
if (pd && pd->use_cd_gpio)
mmc_gpio_free_cd(host->mmc);
mmc_remove_host(host->mmc);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
@ -1456,7 +1497,8 @@ static int sh_mmcif_remove(struct platform_device *pdev)
irq[1] = platform_get_irq(pdev, 1);
free_irq(irq[0], host);
free_irq(irq[1], host);
if (irq[1] >= 0)
free_irq(irq[1], host);
platform_set_drvdata(pdev, NULL);

View File

@ -1060,16 +1060,8 @@ EXPORT_SYMBOL(tmio_mmc_host_probe);
void tmio_mmc_host_remove(struct tmio_mmc_host *host)
{
struct platform_device *pdev = host->pdev;
struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc;
if (pdata->flags & TMIO_MMC_USE_GPIO_CD)
/*
* This means we can miss a card-eject, but this is anyway
* possible, because of delayed processing of hotplug events.
*/
mmc_gpio_free_cd(mmc);
if (!host->native_hotplug)
pm_runtime_get_sync(&pdev->dev);

View File

@ -187,6 +187,18 @@ struct sdio_func_tuple;
#define SDIO_MAX_FUNCS 7
enum mmc_blk_status {
MMC_BLK_SUCCESS = 0,
MMC_BLK_PARTIAL,
MMC_BLK_CMD_ERR,
MMC_BLK_RETRY,
MMC_BLK_ABORT,
MMC_BLK_DATA_ERR,
MMC_BLK_ECC_ERR,
MMC_BLK_NOMEDIUM,
MMC_BLK_NEW_REQUEST,
};
/* The number of MMC physical partitions. These consist of:
* boot partitions (2), general purpose partitions (4) in MMC v4.4.
*/

View File

@ -120,6 +120,7 @@ struct mmc_data {
s32 host_cookie; /* host private data */
};
struct mmc_host;
struct mmc_request {
struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */
struct mmc_command *cmd;
@ -128,9 +129,9 @@ struct mmc_request {
struct completion completion;
void (*done)(struct mmc_request *);/* completion function */
struct mmc_host *host;
};
struct mmc_host;
struct mmc_card;
struct mmc_async_req;

View File

@ -170,6 +170,22 @@ struct mmc_slot {
void *handler_priv;
};
/**
* mmc_context_info - synchronization details for mmc context
* @is_done_rcv wake up reason was done request
* @is_new_req wake up reason was new request
* @is_waiting_last_req mmc context waiting for single running request
* @wait wait queue
* @lock lock to protect data fields
*/
struct mmc_context_info {
bool is_done_rcv;
bool is_new_req;
bool is_waiting_last_req;
wait_queue_head_t wait;
spinlock_t lock;
};
struct regulator;
struct mmc_supply {
@ -331,6 +347,7 @@ struct mmc_host {
struct dentry *debugfs_root;
struct mmc_async_req *areq; /* active async req */
struct mmc_context_info context_info; /* async synchronization info */
#ifdef CONFIG_FAIL_MMC_REQUEST
struct fault_attr fail_mmc_request;
@ -434,6 +451,14 @@ static inline int mmc_boot_partition_access(struct mmc_host *host)
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
}
static inline int mmc_host_uhs(struct mmc_host *host)
{
return host->caps &
(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_DDR50);
}
#ifdef CONFIG_MMC_CLKGATE
void mmc_host_clk_hold(struct mmc_host *host);
void mmc_host_clk_release(struct mmc_host *host);