2019-06-04 15:11:33 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2006-12-31 06:11:32 +07:00
|
|
|
/*
|
2007-07-12 01:04:50 +07:00
|
|
|
* linux/drivers/mmc/core/mmc.c
|
2006-12-31 06:11:32 +07:00
|
|
|
*
|
|
|
|
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
|
|
|
|
* Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
|
|
|
|
* MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/err.h>
|
2015-04-01 22:26:23 +07:00
|
|
|
#include <linux/of.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/slab.h>
|
2011-07-15 23:01:27 +07:00
|
|
|
#include <linux/stat.h>
|
2013-10-10 19:20:05 +07:00
|
|
|
#include <linux/pm_runtime.h>
|
2006-12-31 06:11:32 +07:00
|
|
|
|
|
|
|
#include <linux/mmc/host.h>
|
|
|
|
#include <linux/mmc/card.h>
|
|
|
|
#include <linux/mmc/mmc.h>
|
|
|
|
|
|
|
|
#include "core.h"
|
2017-01-13 20:14:14 +07:00
|
|
|
#include "card.h"
|
2015-05-07 17:10:18 +07:00
|
|
|
#include "host.h"
|
2007-05-19 18:39:01 +07:00
|
|
|
#include "bus.h"
|
2006-12-31 06:11:32 +07:00
|
|
|
#include "mmc_ops.h"
|
2017-02-15 15:35:28 +07:00
|
|
|
#include "quirks.h"
|
2011-05-13 12:47:18 +07:00
|
|
|
#include "sd_ops.h"
|
2017-05-09 04:52:04 +07:00
|
|
|
#include "pwrseq.h"
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2016-11-05 00:32:33 +07:00
|
|
|
#define DEFAULT_CMD6_TIMEOUT_MS 500
|
2018-12-10 23:52:38 +07:00
|
|
|
#define MIN_CACHE_EN_TIMEOUT_MS 1600
|
2016-11-05 00:32:33 +07:00
|
|
|
|
2006-12-31 06:11:32 +07:00
|
|
|
static const unsigned int tran_exp[] = {
|
|
|
|
10000, 100000, 1000000, 10000000,
|
|
|
|
0, 0, 0, 0
|
|
|
|
};
|
|
|
|
|
|
|
|
static const unsigned char tran_mant[] = {
|
|
|
|
0, 10, 12, 13, 15, 20, 25, 30,
|
|
|
|
35, 40, 45, 50, 55, 60, 70, 80,
|
|
|
|
};
|
|
|
|
|
2017-08-02 10:12:42 +07:00
|
|
|
static const unsigned int taac_exp[] = {
|
2006-12-31 06:11:32 +07:00
|
|
|
1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
|
|
|
|
};
|
|
|
|
|
2017-08-02 10:12:42 +07:00
|
|
|
static const unsigned int taac_mant[] = {
|
2006-12-31 06:11:32 +07:00
|
|
|
0, 10, 12, 13, 15, 20, 25, 30,
|
|
|
|
35, 40, 45, 50, 55, 60, 70, 80,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define UNSTUFF_BITS(resp,start,size) \
|
|
|
|
({ \
|
|
|
|
const int __size = size; \
|
|
|
|
const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
|
|
|
|
const int __off = 3 - ((start) / 32); \
|
|
|
|
const int __shft = (start) & 31; \
|
|
|
|
u32 __res; \
|
|
|
|
\
|
|
|
|
__res = resp[__off] >> __shft; \
|
|
|
|
if (__size + __shft > 32) \
|
|
|
|
__res |= resp[__off-1] << ((32 - __shft) % 32); \
|
|
|
|
__res & __mask; \
|
|
|
|
})
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given the decoded CSD structure, decode the raw CID to our CID structure.
|
|
|
|
*/
|
2007-05-01 21:11:57 +07:00
|
|
|
static int mmc_decode_cid(struct mmc_card *card)
|
2006-12-31 06:11:32 +07:00
|
|
|
{
|
|
|
|
u32 *resp = card->raw_cid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The selection of the format here is based upon published
|
|
|
|
* specs from sandisk and from what people have reported.
|
|
|
|
*/
|
|
|
|
switch (card->csd.mmca_vsn) {
|
|
|
|
case 0: /* MMC v1.0 - v1.2 */
|
|
|
|
case 1: /* MMC v1.4 */
|
|
|
|
card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
|
|
|
|
card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
|
|
|
|
card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
|
|
|
|
card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
|
|
|
|
card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
|
|
|
|
card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
|
|
|
|
card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
|
|
|
|
card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
|
|
|
|
card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
|
|
|
|
card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
|
|
|
|
card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
|
|
|
|
card->cid.month = UNSTUFF_BITS(resp, 12, 4);
|
|
|
|
card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2: /* MMC v2.0 - v2.2 */
|
|
|
|
case 3: /* MMC v3.1 - v3.3 */
|
|
|
|
case 4: /* MMC v4 */
|
|
|
|
card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
|
|
|
|
card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
|
|
|
|
card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
|
|
|
|
card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
|
|
|
|
card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
|
|
|
|
card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
|
|
|
|
card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
|
|
|
|
card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
|
2013-02-28 03:19:17 +07:00
|
|
|
card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
|
2006-12-31 06:11:32 +07:00
|
|
|
card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
|
|
|
|
card->cid.month = UNSTUFF_BITS(resp, 12, 4);
|
|
|
|
card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2011-10-11 13:14:09 +07:00
|
|
|
pr_err("%s: card has unknown MMCA version %d\n",
|
2006-12-31 06:11:32 +07:00
|
|
|
mmc_hostname(card->host), card->csd.mmca_vsn);
|
2007-05-01 21:11:57 +07:00
|
|
|
return -EINVAL;
|
2006-12-31 06:11:32 +07:00
|
|
|
}
|
2007-05-01 21:11:57 +07:00
|
|
|
|
|
|
|
return 0;
|
2006-12-31 06:11:32 +07:00
|
|
|
}
|
|
|
|
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
static void mmc_set_erase_size(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
if (card->ext_csd.erase_group_def & 1)
|
|
|
|
card->erase_size = card->ext_csd.hc_erase_size;
|
|
|
|
else
|
|
|
|
card->erase_size = card->csd.erase_size;
|
|
|
|
|
|
|
|
mmc_init_erase(card);
|
|
|
|
}
|
|
|
|
|
2006-12-31 06:11:32 +07:00
|
|
|
/*
|
|
|
|
* Given a 128-bit response, decode to our card CSD structure.
|
|
|
|
*/
|
2007-05-01 21:11:57 +07:00
|
|
|
static int mmc_decode_csd(struct mmc_card *card)
|
2006-12-31 06:11:32 +07:00
|
|
|
{
|
|
|
|
struct mmc_csd *csd = &card->csd;
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
unsigned int e, m, a, b;
|
2006-12-31 06:11:32 +07:00
|
|
|
u32 *resp = card->raw_csd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only understand CSD structure v1.1 and v1.2.
|
|
|
|
* v1.2 has extra information in bits 15, 11 and 10.
|
2010-08-11 08:01:36 +07:00
|
|
|
* We also support eMMC v4.4 & v4.41.
|
2006-12-31 06:11:32 +07:00
|
|
|
*/
|
2010-08-11 08:01:36 +07:00
|
|
|
csd->structure = UNSTUFF_BITS(resp, 126, 2);
|
|
|
|
if (csd->structure == 0) {
|
2011-10-11 13:14:09 +07:00
|
|
|
pr_err("%s: unrecognised CSD structure version %d\n",
|
2010-08-11 08:01:36 +07:00
|
|
|
mmc_hostname(card->host), csd->structure);
|
2007-05-01 21:11:57 +07:00
|
|
|
return -EINVAL;
|
2006-12-31 06:11:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
|
|
|
|
m = UNSTUFF_BITS(resp, 115, 4);
|
|
|
|
e = UNSTUFF_BITS(resp, 112, 3);
|
2017-08-02 10:12:42 +07:00
|
|
|
csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
|
|
|
|
csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
|
2006-12-31 06:11:32 +07:00
|
|
|
|
|
|
|
m = UNSTUFF_BITS(resp, 99, 4);
|
|
|
|
e = UNSTUFF_BITS(resp, 96, 3);
|
|
|
|
csd->max_dtr = tran_exp[e] * tran_mant[m];
|
|
|
|
csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
|
|
|
|
|
|
|
|
e = UNSTUFF_BITS(resp, 47, 3);
|
|
|
|
m = UNSTUFF_BITS(resp, 62, 12);
|
|
|
|
csd->capacity = (1 + m) << (e + 2);
|
|
|
|
|
|
|
|
csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
|
|
|
|
csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
|
|
|
|
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
|
|
|
|
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
|
2014-08-19 15:45:51 +07:00
|
|
|
csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
|
2006-12-31 06:11:32 +07:00
|
|
|
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
|
|
|
|
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
|
|
|
|
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
|
2007-05-01 21:11:57 +07:00
|
|
|
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
if (csd->write_blkbits >= 9) {
|
|
|
|
a = UNSTUFF_BITS(resp, 42, 5);
|
|
|
|
b = UNSTUFF_BITS(resp, 37, 5);
|
|
|
|
csd->erase_size = (a + 1) * (b + 1);
|
|
|
|
csd->erase_size <<= csd->write_blkbits - 9;
|
|
|
|
}
|
|
|
|
|
2007-05-01 21:11:57 +07:00
|
|
|
return 0;
|
2006-12-31 06:11:32 +07:00
|
|
|
}
|
|
|
|
|
2012-04-25 14:17:37 +07:00
|
|
|
static void mmc_select_card_type(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
struct mmc_host *host = card->host;
|
2014-04-23 15:14:58 +07:00
|
|
|
u8 card_type = card->ext_csd.raw_card_type;
|
2012-11-14 19:35:51 +07:00
|
|
|
u32 caps = host->caps, caps2 = host->caps2;
|
2014-04-23 15:08:44 +07:00
|
|
|
unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
|
2014-04-23 15:07:58 +07:00
|
|
|
unsigned int avail_type = 0;
|
2012-04-25 14:17:37 +07:00
|
|
|
|
2014-04-23 15:07:58 +07:00
|
|
|
if (caps & MMC_CAP_MMC_HIGHSPEED &&
|
|
|
|
card_type & EXT_CSD_CARD_TYPE_HS_26) {
|
2012-04-25 14:17:37 +07:00
|
|
|
hs_max_dtr = MMC_HIGH_26_MAX_DTR;
|
2014-04-23 15:07:58 +07:00
|
|
|
avail_type |= EXT_CSD_CARD_TYPE_HS_26;
|
|
|
|
}
|
2012-04-25 14:17:37 +07:00
|
|
|
|
|
|
|
if (caps & MMC_CAP_MMC_HIGHSPEED &&
|
2014-04-23 15:07:58 +07:00
|
|
|
card_type & EXT_CSD_CARD_TYPE_HS_52) {
|
2012-04-25 14:17:37 +07:00
|
|
|
hs_max_dtr = MMC_HIGH_52_MAX_DTR;
|
2014-04-23 15:07:58 +07:00
|
|
|
avail_type |= EXT_CSD_CARD_TYPE_HS_52;
|
|
|
|
}
|
2012-04-25 14:17:37 +07:00
|
|
|
|
mmc: core: Invent MMC_CAP_3_3V_DDR
According the JEDEC specification an eMMC card supporting 1.8V vccq in DDR
mode should also be capable of 3.3V. However, it's been reported that some
mmc hosts supports 3.3V, but not 1.8V.
Currently the mmc core implements an error handling when the host fails to
set 1.8V for vccq, by falling back to 3.3V. Unfortunate, this seems to be
insufficient for some mmc hosts. To enable these to use eMMC DDR mode let's
invent a new mmc cap, MMC_CAP_3_3V_DDR, which tells whether they support
the eMMC 3.3V DDR mode.
In case MMC_CAP_3_3V_DDR is set, but not MMC_CAP_1_8V_DDR, let's change to
remain on the 3.3V, as it's the default voltage level for vccq, set by the
earlier power up sequence.
As this change introduces MMC_CAP_3_3V_DDR, let's take the opportunity to
do some re-formatting of the related defines in the header file.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com>
Tested-by: Jan Glauber <jglauber@cavium.com>
Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
2017-01-25 18:04:17 +07:00
|
|
|
if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
|
2014-04-23 15:07:58 +07:00
|
|
|
card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
|
2012-04-25 14:17:37 +07:00
|
|
|
hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
|
2014-04-23 15:07:58 +07:00
|
|
|
avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (caps & MMC_CAP_1_2V_DDR &&
|
|
|
|
card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
|
|
|
|
hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
|
|
|
|
avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
|
|
|
|
}
|
2012-04-25 14:17:37 +07:00
|
|
|
|
2014-04-23 15:07:58 +07:00
|
|
|
if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
|
|
|
|
card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
|
2014-04-23 15:08:44 +07:00
|
|
|
hs200_max_dtr = MMC_HS200_MAX_DTR;
|
2014-04-23 15:07:58 +07:00
|
|
|
avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
|
|
|
|
card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
|
2014-04-23 15:08:44 +07:00
|
|
|
hs200_max_dtr = MMC_HS200_MAX_DTR;
|
2014-04-23 15:07:58 +07:00
|
|
|
avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
|
|
|
|
}
|
2012-04-25 14:17:37 +07:00
|
|
|
|
2014-04-23 15:14:58 +07:00
|
|
|
if (caps2 & MMC_CAP2_HS400_1_8V &&
|
|
|
|
card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
|
|
|
|
hs200_max_dtr = MMC_HS200_MAX_DTR;
|
|
|
|
avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (caps2 & MMC_CAP2_HS400_1_2V &&
|
|
|
|
card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
|
|
|
|
hs200_max_dtr = MMC_HS200_MAX_DTR;
|
|
|
|
avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
|
|
|
|
}
|
|
|
|
|
2016-05-26 08:56:22 +07:00
|
|
|
if ((caps2 & MMC_CAP2_HS400_ES) &&
|
|
|
|
card->ext_csd.strobe_support &&
|
|
|
|
(avail_type & EXT_CSD_CARD_TYPE_HS400))
|
|
|
|
avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
|
|
|
|
|
2012-04-25 14:17:37 +07:00
|
|
|
card->ext_csd.hs_max_dtr = hs_max_dtr;
|
2014-04-23 15:08:44 +07:00
|
|
|
card->ext_csd.hs200_max_dtr = hs200_max_dtr;
|
2014-04-23 15:07:58 +07:00
|
|
|
card->mmc_avail_type = avail_type;
|
2012-04-25 14:17:37 +07:00
|
|
|
}
|
|
|
|
|
2014-09-15 22:47:06 +07:00
|
|
|
static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
|
|
|
|
{
|
2014-09-15 22:47:11 +07:00
|
|
|
u8 hc_erase_grp_sz, hc_wp_grp_sz;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable these attributes by default
|
|
|
|
*/
|
|
|
|
card->ext_csd.enhanced_area_offset = -EINVAL;
|
|
|
|
card->ext_csd.enhanced_area_size = -EINVAL;
|
2014-09-15 22:47:06 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Enhanced area feature support -- check whether the eMMC
|
|
|
|
* card has the Enhanced area enabled. If so, export enhanced
|
|
|
|
* area offset and size to user by adding sysfs interface.
|
|
|
|
*/
|
|
|
|
if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
|
|
|
|
(ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
|
2014-09-15 22:47:11 +07:00
|
|
|
if (card->ext_csd.partition_setting_completed) {
|
|
|
|
hc_erase_grp_sz =
|
|
|
|
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
|
|
|
|
hc_wp_grp_sz =
|
|
|
|
ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
|
2014-09-15 22:47:06 +07:00
|
|
|
|
2014-09-15 22:47:11 +07:00
|
|
|
/*
|
|
|
|
* calculate the enhanced data area offset, in bytes
|
|
|
|
*/
|
|
|
|
card->ext_csd.enhanced_area_offset =
|
2015-05-11 14:34:53 +07:00
|
|
|
(((unsigned long long)ext_csd[139]) << 24) +
|
|
|
|
(((unsigned long long)ext_csd[138]) << 16) +
|
|
|
|
(((unsigned long long)ext_csd[137]) << 8) +
|
|
|
|
(((unsigned long long)ext_csd[136]));
|
2014-09-15 22:47:11 +07:00
|
|
|
if (mmc_card_blockaddr(card))
|
|
|
|
card->ext_csd.enhanced_area_offset <<= 9;
|
|
|
|
/*
|
|
|
|
* calculate the enhanced data area size, in kilobytes
|
|
|
|
*/
|
|
|
|
card->ext_csd.enhanced_area_size =
|
|
|
|
(ext_csd[142] << 16) + (ext_csd[141] << 8) +
|
|
|
|
ext_csd[140];
|
|
|
|
card->ext_csd.enhanced_area_size *=
|
|
|
|
(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
|
|
|
|
card->ext_csd.enhanced_area_size <<= 9;
|
|
|
|
} else {
|
|
|
|
pr_warn("%s: defines enhanced area without partition setting complete\n",
|
|
|
|
mmc_hostname(card->host));
|
|
|
|
}
|
2014-09-15 22:47:06 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-17 08:00:45 +07:00
|
|
|
static void mmc_part_add(struct mmc_card *card, u64 size,
|
2017-01-13 20:14:14 +07:00
|
|
|
unsigned int part_cfg, char *name, int idx, bool ro,
|
|
|
|
int area_type)
|
|
|
|
{
|
|
|
|
card->part[card->nr_parts].size = size;
|
|
|
|
card->part[card->nr_parts].part_cfg = part_cfg;
|
|
|
|
sprintf(card->part[card->nr_parts].name, name, idx);
|
|
|
|
card->part[card->nr_parts].force_ro = ro;
|
|
|
|
card->part[card->nr_parts].area_type = area_type;
|
|
|
|
card->nr_parts++;
|
|
|
|
}
|
|
|
|
|
2014-09-15 22:47:06 +07:00
|
|
|
static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
|
|
|
|
{
|
|
|
|
int idx;
|
2014-09-15 22:47:11 +07:00
|
|
|
u8 hc_erase_grp_sz, hc_wp_grp_sz;
|
2019-11-17 08:00:45 +07:00
|
|
|
u64 part_size;
|
2014-09-15 22:47:06 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* General purpose partition feature support --
|
|
|
|
* If ext_csd has the size of general purpose partitions,
|
|
|
|
* set size, part_cfg, partition name in mmc_part.
|
|
|
|
*/
|
|
|
|
if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
|
|
|
|
EXT_CSD_PART_SUPPORT_PART_EN) {
|
2014-09-15 22:47:11 +07:00
|
|
|
hc_erase_grp_sz =
|
|
|
|
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
|
|
|
|
hc_wp_grp_sz =
|
|
|
|
ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
|
2014-09-15 22:47:06 +07:00
|
|
|
|
|
|
|
for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
|
|
|
|
if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
|
|
|
|
!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
|
|
|
|
!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
|
|
|
|
continue;
|
2014-09-15 22:47:11 +07:00
|
|
|
if (card->ext_csd.partition_setting_completed == 0) {
|
|
|
|
pr_warn("%s: has partition size defined without partition complete\n",
|
|
|
|
mmc_hostname(card->host));
|
|
|
|
break;
|
|
|
|
}
|
2014-09-15 22:47:06 +07:00
|
|
|
part_size =
|
|
|
|
(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
|
|
|
|
<< 16) +
|
|
|
|
(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
|
|
|
|
<< 8) +
|
|
|
|
ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
|
2019-11-17 08:00:45 +07:00
|
|
|
part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
|
2014-09-15 22:47:06 +07:00
|
|
|
mmc_part_add(card, part_size << 19,
|
|
|
|
EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
|
|
|
|
"gp%d", idx, false,
|
|
|
|
MMC_BLK_DATA_AREA_GP);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-05 12:12:28 +07:00
|
|
|
/* Minimum partition switch timeout in milliseconds */
|
|
|
|
#define MMC_MIN_PART_SWITCH_TIME 300
|
|
|
|
|
2011-05-25 08:14:58 +07:00
|
|
|
/*
|
|
|
|
* Decode extended CSD.
|
|
|
|
*/
|
2014-10-20 18:37:24 +07:00
|
|
|
static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
|
2011-05-25 08:14:58 +07:00
|
|
|
{
|
2011-10-06 21:41:38 +07:00
|
|
|
int err = 0, idx;
|
2019-11-17 08:00:45 +07:00
|
|
|
u64 part_size;
|
2015-04-01 22:26:23 +07:00
|
|
|
struct device_node *np;
|
|
|
|
bool broken_hpi = false;
|
2011-05-25 08:14:58 +07:00
|
|
|
|
2010-08-11 08:01:36 +07:00
|
|
|
/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
|
2011-07-07 23:04:55 +07:00
|
|
|
card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
|
2010-08-11 08:01:36 +07:00
|
|
|
if (card->csd.structure == 3) {
|
2011-07-07 23:04:55 +07:00
|
|
|
if (card->ext_csd.raw_ext_csd_structure > 2) {
|
2011-10-11 13:14:09 +07:00
|
|
|
pr_err("%s: unrecognised EXT_CSD structure "
|
2010-08-11 08:01:36 +07:00
|
|
|
"version %d\n", mmc_hostname(card->host),
|
2011-07-07 23:04:55 +07:00
|
|
|
card->ext_csd.raw_ext_csd_structure);
|
2010-08-11 08:01:36 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-01 22:26:23 +07:00
|
|
|
np = mmc_of_find_child_device(card->host, 0);
|
|
|
|
if (np && of_device_is_compatible(np, "mmc-card"))
|
|
|
|
broken_hpi = of_property_read_bool(np, "broken-hpi");
|
|
|
|
of_node_put(np);
|
|
|
|
|
2014-06-27 15:51:07 +07:00
|
|
|
/*
|
|
|
|
* The EXT_CSD format is meant to be forward compatible. As long
|
|
|
|
* as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
|
|
|
|
* are authorized, see JEDEC JESD84-B50 section B.8.
|
|
|
|
*/
|
2009-09-23 06:44:34 +07:00
|
|
|
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
|
2007-07-23 05:34:07 +07:00
|
|
|
|
2016-06-10 05:09:31 +07:00
|
|
|
/* fixup device after ext_csd revision field is updated */
|
|
|
|
mmc_fixup_device(card, mmc_ext_csd_fixups);
|
|
|
|
|
2011-07-07 23:04:55 +07:00
|
|
|
card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
|
|
|
|
card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
|
|
|
|
card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
|
|
|
|
card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
|
2009-09-23 06:44:34 +07:00
|
|
|
if (card->ext_csd.rev >= 2) {
|
2007-07-23 05:34:07 +07:00
|
|
|
card->ext_csd.sectors =
|
|
|
|
ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
|
|
|
|
ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
|
|
|
|
ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
|
|
|
|
ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
|
2010-08-11 08:01:45 +07:00
|
|
|
|
|
|
|
/* Cards with density > 2GiB are sector addressed */
|
|
|
|
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
|
2007-07-23 05:34:07 +07:00
|
|
|
mmc_card_set_blockaddr(card);
|
|
|
|
}
|
2012-04-25 14:17:37 +07:00
|
|
|
|
2016-05-26 08:56:22 +07:00
|
|
|
card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
|
2011-07-07 23:04:55 +07:00
|
|
|
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
|
2012-04-25 14:17:37 +07:00
|
|
|
mmc_select_card_type(card);
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2011-07-07 23:04:55 +07:00
|
|
|
card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
|
|
|
|
card->ext_csd.raw_erase_timeout_mult =
|
|
|
|
ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
|
|
|
|
card->ext_csd.raw_hc_erase_grp_size =
|
|
|
|
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
|
2009-09-23 06:44:34 +07:00
|
|
|
if (card->ext_csd.rev >= 3) {
|
|
|
|
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
|
2011-04-12 06:10:25 +07:00
|
|
|
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
|
|
|
|
|
|
|
|
/* EXT_CSD value is in units of 10ms, but we store in ms */
|
|
|
|
card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
|
2016-05-05 12:12:28 +07:00
|
|
|
/* Some eMMC set the value too low so set a minimum */
|
|
|
|
if (card->ext_csd.part_time &&
|
|
|
|
card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
|
|
|
|
card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
|
2009-09-23 06:44:34 +07:00
|
|
|
|
|
|
|
/* Sleep / awake timeout in 100ns units */
|
|
|
|
if (sa_shift > 0 && sa_shift <= 0x17)
|
|
|
|
card->ext_csd.sa_timeout =
|
|
|
|
1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
card->ext_csd.erase_group_def =
|
|
|
|
ext_csd[EXT_CSD_ERASE_GROUP_DEF];
|
|
|
|
card->ext_csd.hc_erase_timeout = 300 *
|
|
|
|
ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
|
|
|
|
card->ext_csd.hc_erase_size =
|
|
|
|
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
|
2011-04-01 06:40:00 +07:00
|
|
|
|
|
|
|
card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
|
2011-04-12 06:10:25 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There are two boot regions of equal size, defined in
|
|
|
|
* multiples of 128K.
|
|
|
|
*/
|
2011-10-06 21:41:38 +07:00
|
|
|
if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
|
|
|
|
for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
|
|
|
|
part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
|
|
|
|
mmc_part_add(card, part_size,
|
|
|
|
EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
|
2011-12-02 14:51:06 +07:00
|
|
|
"boot%d", idx, true,
|
|
|
|
MMC_BLK_DATA_AREA_BOOT);
|
2011-10-06 21:41:38 +07:00
|
|
|
}
|
|
|
|
}
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
}
|
|
|
|
|
2011-07-07 23:04:55 +07:00
|
|
|
card->ext_csd.raw_hc_erase_gap_size =
|
2012-02-01 19:30:55 +07:00
|
|
|
ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
|
2011-07-07 23:04:55 +07:00
|
|
|
card->ext_csd.raw_sec_trim_mult =
|
|
|
|
ext_csd[EXT_CSD_SEC_TRIM_MULT];
|
|
|
|
card->ext_csd.raw_sec_erase_mult =
|
|
|
|
ext_csd[EXT_CSD_SEC_ERASE_MULT];
|
|
|
|
card->ext_csd.raw_sec_feature_support =
|
|
|
|
ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
|
|
|
|
card->ext_csd.raw_trim_mult =
|
|
|
|
ext_csd[EXT_CSD_TRIM_MULT];
|
2013-04-05 02:18:11 +07:00
|
|
|
card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
|
2015-02-06 19:12:57 +07:00
|
|
|
card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
if (card->ext_csd.rev >= 4) {
|
2014-09-15 22:47:09 +07:00
|
|
|
if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
|
|
|
|
EXT_CSD_PART_SETTING_COMPLETED)
|
|
|
|
card->ext_csd.partition_setting_completed = 1;
|
|
|
|
else
|
|
|
|
card->ext_csd.partition_setting_completed = 0;
|
|
|
|
|
2014-09-15 22:47:06 +07:00
|
|
|
mmc_manage_enhanced_area(card, ext_csd);
|
2011-01-22 03:09:41 +07:00
|
|
|
|
2014-09-15 22:47:06 +07:00
|
|
|
mmc_manage_gp_partitions(card, ext_csd);
|
2011-10-06 21:41:38 +07:00
|
|
|
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
card->ext_csd.sec_trim_mult =
|
|
|
|
ext_csd[EXT_CSD_SEC_TRIM_MULT];
|
|
|
|
card->ext_csd.sec_erase_mult =
|
|
|
|
ext_csd[EXT_CSD_SEC_ERASE_MULT];
|
|
|
|
card->ext_csd.sec_feature_support =
|
|
|
|
ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
|
|
|
|
card->ext_csd.trim_timeout = 300 *
|
|
|
|
ext_csd[EXT_CSD_TRIM_MULT];
|
2011-12-02 14:51:06 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that the call to mmc_part_add above defaults to read
|
|
|
|
* only. If this default assumption is changed, the call must
|
|
|
|
* take into account the value of boot_locked below.
|
|
|
|
*/
|
|
|
|
card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
|
|
|
|
card->ext_csd.boot_ro_lockable = true;
|
2013-04-23 21:27:07 +07:00
|
|
|
|
|
|
|
/* Save power class values */
|
|
|
|
card->ext_csd.raw_pwr_cl_52_195 =
|
|
|
|
ext_csd[EXT_CSD_PWR_CL_52_195];
|
|
|
|
card->ext_csd.raw_pwr_cl_26_195 =
|
|
|
|
ext_csd[EXT_CSD_PWR_CL_26_195];
|
|
|
|
card->ext_csd.raw_pwr_cl_52_360 =
|
|
|
|
ext_csd[EXT_CSD_PWR_CL_52_360];
|
|
|
|
card->ext_csd.raw_pwr_cl_26_360 =
|
|
|
|
ext_csd[EXT_CSD_PWR_CL_26_360];
|
|
|
|
card->ext_csd.raw_pwr_cl_200_195 =
|
|
|
|
ext_csd[EXT_CSD_PWR_CL_200_195];
|
|
|
|
card->ext_csd.raw_pwr_cl_200_360 =
|
|
|
|
ext_csd[EXT_CSD_PWR_CL_200_360];
|
|
|
|
card->ext_csd.raw_pwr_cl_ddr_52_195 =
|
|
|
|
ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
|
|
|
|
card->ext_csd.raw_pwr_cl_ddr_52_360 =
|
|
|
|
ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
|
2014-04-23 15:14:58 +07:00
|
|
|
card->ext_csd.raw_pwr_cl_ddr_200_360 =
|
|
|
|
ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
|
2009-09-23 06:44:34 +07:00
|
|
|
}
|
|
|
|
|
2011-08-29 20:42:11 +07:00
|
|
|
if (card->ext_csd.rev >= 5) {
|
2013-06-14 19:25:44 +07:00
|
|
|
/* Adjust production date as per JEDEC JESD84-B451 */
|
|
|
|
if (card->cid.year < 2010)
|
|
|
|
card->cid.year += 16;
|
|
|
|
|
2012-09-17 15:42:02 +07:00
|
|
|
/* check whether the eMMC card supports BKOPS */
|
2018-12-10 23:52:37 +07:00
|
|
|
if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
|
2012-09-17 15:42:02 +07:00
|
|
|
card->ext_csd.bkops = 1;
|
2015-01-29 15:49:43 +07:00
|
|
|
card->ext_csd.man_bkops_en =
|
|
|
|
(ext_csd[EXT_CSD_BKOPS_EN] &
|
|
|
|
EXT_CSD_MANUAL_BKOPS_MASK);
|
2012-09-17 15:42:02 +07:00
|
|
|
card->ext_csd.raw_bkops_status =
|
|
|
|
ext_csd[EXT_CSD_BKOPS_STATUS];
|
2017-02-07 23:00:00 +07:00
|
|
|
if (card->ext_csd.man_bkops_en)
|
|
|
|
pr_debug("%s: MAN_BKOPS_EN bit is set\n",
|
2012-09-17 15:42:02 +07:00
|
|
|
mmc_hostname(card->host));
|
2017-02-07 23:00:01 +07:00
|
|
|
card->ext_csd.auto_bkops_en =
|
|
|
|
(ext_csd[EXT_CSD_BKOPS_EN] &
|
|
|
|
EXT_CSD_AUTO_BKOPS_MASK);
|
|
|
|
if (card->ext_csd.auto_bkops_en)
|
|
|
|
pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
|
|
|
|
mmc_hostname(card->host));
|
2012-09-17 15:42:02 +07:00
|
|
|
}
|
|
|
|
|
2011-10-18 12:26:42 +07:00
|
|
|
/* check whether the eMMC card supports HPI */
|
2016-06-10 05:09:31 +07:00
|
|
|
if (!mmc_card_broken_hpi(card) &&
|
|
|
|
!broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
|
2011-10-18 12:26:42 +07:00
|
|
|
card->ext_csd.hpi = 1;
|
|
|
|
if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
|
|
|
|
card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
|
|
|
|
else
|
|
|
|
card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
|
|
|
|
/*
|
|
|
|
* Indicate the maximum timeout to close
|
|
|
|
* a command interrupted by HPI
|
|
|
|
*/
|
|
|
|
card->ext_csd.out_of_int_time =
|
|
|
|
ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
|
|
|
|
}
|
|
|
|
|
2011-04-01 06:40:00 +07:00
|
|
|
card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
|
2011-08-29 20:42:11 +07:00
|
|
|
card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
|
2012-11-18 06:08:24 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* RPMB regions are defined in multiples of 128K.
|
|
|
|
*/
|
|
|
|
card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
|
2013-01-25 18:30:30 +07:00
|
|
|
if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
|
2012-11-18 06:08:24 +07:00
|
|
|
mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
|
|
|
|
EXT_CSD_PART_CONFIG_ACC_RPMB,
|
|
|
|
"rpmb", 0, false,
|
|
|
|
MMC_BLK_DATA_AREA_RPMB);
|
|
|
|
}
|
2011-08-29 20:42:11 +07:00
|
|
|
}
|
2011-04-01 06:40:00 +07:00
|
|
|
|
2011-09-24 23:12:30 +07:00
|
|
|
card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
|
|
|
|
card->erased_byte = 0xFF;
|
|
|
|
else
|
|
|
|
card->erased_byte = 0x0;
|
|
|
|
|
2011-10-25 07:43:12 +07:00
|
|
|
/* eMMC v4.5 or later */
|
2016-11-05 00:32:33 +07:00
|
|
|
card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
|
2011-10-13 13:34:16 +07:00
|
|
|
if (card->ext_csd.rev >= 6) {
|
2011-10-25 07:43:12 +07:00
|
|
|
card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
|
|
|
|
|
2011-09-23 12:15:29 +07:00
|
|
|
card->ext_csd.generic_cmd6_time = 10 *
|
|
|
|
ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
|
2011-10-13 13:34:16 +07:00
|
|
|
card->ext_csd.power_off_longtime = 10 *
|
|
|
|
ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
|
2011-09-23 12:15:29 +07:00
|
|
|
|
2011-10-25 07:43:12 +07:00
|
|
|
card->ext_csd.cache_size =
|
|
|
|
ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
|
|
|
|
ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
|
|
|
|
ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
|
|
|
|
ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
|
2011-12-21 14:39:17 +07:00
|
|
|
|
|
|
|
if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
|
|
|
|
card->ext_csd.data_sector_size = 4096;
|
|
|
|
else
|
|
|
|
card->ext_csd.data_sector_size = 512;
|
|
|
|
|
|
|
|
if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
|
|
|
|
(ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
|
|
|
|
card->ext_csd.data_tag_unit_size =
|
|
|
|
((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
|
|
|
|
(card->ext_csd.data_sector_size);
|
|
|
|
} else {
|
|
|
|
card->ext_csd.data_tag_unit_size = 0;
|
|
|
|
}
|
2013-02-06 15:01:43 +07:00
|
|
|
|
|
|
|
card->ext_csd.max_packed_writes =
|
|
|
|
ext_csd[EXT_CSD_MAX_PACKED_WRITES];
|
|
|
|
card->ext_csd.max_packed_reads =
|
|
|
|
ext_csd[EXT_CSD_MAX_PACKED_READS];
|
2012-05-17 18:02:21 +07:00
|
|
|
} else {
|
|
|
|
card->ext_csd.data_sector_size = 512;
|
2011-10-25 07:43:12 +07:00
|
|
|
}
|
2011-10-14 12:03:21 +07:00
|
|
|
|
2014-10-17 01:27:16 +07:00
|
|
|
/* eMMC v5 or later */
|
|
|
|
if (card->ext_csd.rev >= 7) {
|
|
|
|
memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
|
|
|
|
MMC_FIRMWARE_LEN);
|
|
|
|
card->ext_csd.ffu_capable =
|
|
|
|
(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
|
|
|
|
!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
|
2016-12-22 10:37:34 +07:00
|
|
|
|
|
|
|
card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
|
|
|
|
card->ext_csd.device_life_time_est_typ_a =
|
|
|
|
ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
|
|
|
|
card->ext_csd.device_life_time_est_typ_b =
|
|
|
|
ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
|
2014-10-17 01:27:16 +07:00
|
|
|
}
|
2016-11-29 17:09:16 +07:00
|
|
|
|
|
|
|
/* eMMC v5.1 or later */
|
|
|
|
if (card->ext_csd.rev >= 8) {
|
|
|
|
card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
|
|
|
|
EXT_CSD_CMDQ_SUPPORTED;
|
|
|
|
card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
|
|
|
|
EXT_CSD_CMDQ_DEPTH_MASK) + 1;
|
|
|
|
/* Exclude inefficiently small queue depths */
|
|
|
|
if (card->ext_csd.cmdq_depth <= 2) {
|
|
|
|
card->ext_csd.cmdq_support = false;
|
|
|
|
card->ext_csd.cmdq_depth = 0;
|
|
|
|
}
|
|
|
|
if (card->ext_csd.cmdq_support) {
|
|
|
|
pr_debug("%s: Command Queue supported depth %u\n",
|
|
|
|
mmc_hostname(card->host),
|
|
|
|
card->ext_csd.cmdq_depth);
|
|
|
|
}
|
2020-05-01 20:53:01 +07:00
|
|
|
card->ext_csd.enhanced_rpmb_supported =
|
|
|
|
(card->ext_csd.rel_param &
|
|
|
|
EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
|
2016-11-29 17:09:16 +07:00
|
|
|
}
|
2006-12-31 06:11:32 +07:00
|
|
|
out:
|
2011-05-25 08:14:58 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-10-20 18:37:24 +07:00
|
|
|
static int mmc_read_ext_csd(struct mmc_card *card)
|
|
|
|
{
|
2014-10-20 19:08:16 +07:00
|
|
|
u8 *ext_csd;
|
2014-10-20 18:37:24 +07:00
|
|
|
int err;
|
|
|
|
|
2014-10-20 19:08:16 +07:00
|
|
|
if (!mmc_can_ext_csd(card))
|
|
|
|
return 0;
|
|
|
|
|
2014-10-20 18:37:24 +07:00
|
|
|
err = mmc_get_ext_csd(card, &ext_csd);
|
2014-10-20 19:08:16 +07:00
|
|
|
if (err) {
|
|
|
|
/* If the host or the card can't do the switch,
|
|
|
|
* fail more gracefully. */
|
|
|
|
if ((err != -EINVAL)
|
|
|
|
&& (err != -ENOSYS)
|
|
|
|
&& (err != -EFAULT))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* High capacity cards should have this "magic" size
|
|
|
|
* stored in their CSD.
|
|
|
|
*/
|
|
|
|
if (card->csd.capacity == (4096 * 512)) {
|
|
|
|
pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
|
|
|
|
mmc_hostname(card->host));
|
|
|
|
} else {
|
|
|
|
pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
|
|
|
|
mmc_hostname(card->host));
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
|
2014-10-20 18:37:24 +07:00
|
|
|
return err;
|
2014-10-20 19:08:16 +07:00
|
|
|
}
|
2014-10-20 18:37:24 +07:00
|
|
|
|
|
|
|
err = mmc_decode_ext_csd(card, ext_csd);
|
|
|
|
kfree(ext_csd);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-07-07 23:04:55 +07:00
|
|
|
static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
|
2011-05-25 08:14:58 +07:00
|
|
|
{
|
|
|
|
u8 *bw_ext_csd;
|
|
|
|
int err;
|
|
|
|
|
2011-07-07 23:04:55 +07:00
|
|
|
if (bus_width == MMC_BUS_WIDTH_1)
|
|
|
|
return 0;
|
|
|
|
|
2011-05-25 08:14:58 +07:00
|
|
|
err = mmc_get_ext_csd(card, &bw_ext_csd);
|
2014-10-20 19:08:16 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
2011-05-25 08:14:58 +07:00
|
|
|
|
|
|
|
/* only compare read only fields */
|
2012-02-01 19:30:55 +07:00
|
|
|
err = !((card->ext_csd.raw_partition_support ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_erased_mem_count ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.rev ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_REV]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_ext_csd_structure ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_STRUCTURE]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_card_type ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_s_a_timeout ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_hc_erase_gap_size ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_erase_timeout_mult ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_hc_erase_grp_size ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_sec_trim_mult ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_sec_erase_mult ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_sec_feature_support ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_trim_mult ==
|
2011-05-25 08:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
|
2011-07-07 23:04:55 +07:00
|
|
|
(card->ext_csd.raw_sectors[0] ==
|
|
|
|
bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
|
|
|
|
(card->ext_csd.raw_sectors[1] ==
|
|
|
|
bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
|
|
|
|
(card->ext_csd.raw_sectors[2] ==
|
|
|
|
bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
|
|
|
|
(card->ext_csd.raw_sectors[3] ==
|
2013-04-23 21:27:07 +07:00
|
|
|
bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
|
|
|
|
(card->ext_csd.raw_pwr_cl_52_195 ==
|
|
|
|
bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
|
|
|
|
(card->ext_csd.raw_pwr_cl_26_195 ==
|
|
|
|
bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
|
|
|
|
(card->ext_csd.raw_pwr_cl_52_360 ==
|
|
|
|
bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
|
|
|
|
(card->ext_csd.raw_pwr_cl_26_360 ==
|
|
|
|
bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
|
|
|
|
(card->ext_csd.raw_pwr_cl_200_195 ==
|
|
|
|
bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
|
|
|
|
(card->ext_csd.raw_pwr_cl_200_360 ==
|
|
|
|
bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
|
|
|
|
(card->ext_csd.raw_pwr_cl_ddr_52_195 ==
|
|
|
|
bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
|
|
|
|
(card->ext_csd.raw_pwr_cl_ddr_52_360 ==
|
2014-04-23 15:14:58 +07:00
|
|
|
bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
|
|
|
|
(card->ext_csd.raw_pwr_cl_ddr_200_360 ==
|
|
|
|
bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
|
|
|
|
|
2011-05-25 08:14:58 +07:00
|
|
|
if (err)
|
|
|
|
err = -EINVAL;
|
|
|
|
|
2014-10-16 21:18:51 +07:00
|
|
|
kfree(bw_ext_csd);
|
2006-12-31 06:11:32 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-03-22 05:54:50 +07:00
|
|
|
MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
|
|
|
|
card->raw_cid[2], card->raw_cid[3]);
|
|
|
|
MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
|
|
|
|
card->raw_csd[2], card->raw_csd[3]);
|
|
|
|
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
|
|
|
|
MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
|
2014-10-17 01:27:16 +07:00
|
|
|
MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
|
2008-03-22 05:54:50 +07:00
|
|
|
MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
|
|
|
|
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
|
|
|
|
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
|
|
|
|
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
|
2013-02-28 03:19:17 +07:00
|
|
|
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
|
2017-10-13 00:46:59 +07:00
|
|
|
MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
|
2017-11-28 15:24:06 +07:00
|
|
|
MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
|
2016-12-22 10:37:34 +07:00
|
|
|
MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
|
|
|
|
card->ext_csd.device_life_time_est_typ_a,
|
|
|
|
card->ext_csd.device_life_time_est_typ_b);
|
2008-03-22 05:54:50 +07:00
|
|
|
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
|
2011-01-22 03:09:41 +07:00
|
|
|
MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
|
|
|
|
card->ext_csd.enhanced_area_offset);
|
|
|
|
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
|
2012-08-06 22:12:29 +07:00
|
|
|
MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
|
2020-05-01 20:53:01 +07:00
|
|
|
MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
|
|
|
|
card->ext_csd.enhanced_rpmb_supported);
|
2012-08-06 22:12:29 +07:00
|
|
|
MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
|
2017-11-28 15:24:07 +07:00
|
|
|
MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
|
2018-03-07 12:01:42 +07:00
|
|
|
MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
|
2017-03-13 19:36:37 +07:00
|
|
|
MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
|
2008-03-22 05:54:50 +07:00
|
|
|
|
2014-10-17 01:27:16 +07:00
|
|
|
static ssize_t mmc_fwrev_show(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct mmc_card *card = mmc_dev_to_card(dev);
|
|
|
|
|
|
|
|
if (card->ext_csd.rev < 7) {
|
|
|
|
return sprintf(buf, "0x%x\n", card->cid.fwrev);
|
|
|
|
} else {
|
|
|
|
return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
|
|
|
|
card->ext_csd.fwrev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
|
|
|
|
|
2016-07-19 16:16:38 +07:00
|
|
|
static ssize_t mmc_dsr_show(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct mmc_card *card = mmc_dev_to_card(dev);
|
|
|
|
struct mmc_host *host = card->host;
|
|
|
|
|
|
|
|
if (card->csd.dsr_imp && host->dsr_req)
|
|
|
|
return sprintf(buf, "0x%x\n", host->dsr);
|
|
|
|
else
|
|
|
|
/* return default DSR value */
|
|
|
|
return sprintf(buf, "0x%x\n", 0x404);
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
|
|
|
|
|
2008-03-22 05:54:50 +07:00
|
|
|
static struct attribute *mmc_std_attrs[] = {
|
|
|
|
&dev_attr_cid.attr,
|
|
|
|
&dev_attr_csd.attr,
|
|
|
|
&dev_attr_date.attr,
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
&dev_attr_erase_size.attr,
|
|
|
|
&dev_attr_preferred_erase_size.attr,
|
2008-03-22 05:54:50 +07:00
|
|
|
&dev_attr_fwrev.attr,
|
2014-10-17 01:27:16 +07:00
|
|
|
&dev_attr_ffu_capable.attr,
|
2008-03-22 05:54:50 +07:00
|
|
|
&dev_attr_hwrev.attr,
|
|
|
|
&dev_attr_manfid.attr,
|
|
|
|
&dev_attr_name.attr,
|
|
|
|
&dev_attr_oemid.attr,
|
2013-02-28 03:19:17 +07:00
|
|
|
&dev_attr_prv.attr,
|
2017-10-13 00:46:59 +07:00
|
|
|
&dev_attr_rev.attr,
|
2016-12-22 10:37:34 +07:00
|
|
|
&dev_attr_pre_eol_info.attr,
|
|
|
|
&dev_attr_life_time.attr,
|
2008-03-22 05:54:50 +07:00
|
|
|
&dev_attr_serial.attr,
|
2011-01-22 03:09:41 +07:00
|
|
|
&dev_attr_enhanced_area_offset.attr,
|
|
|
|
&dev_attr_enhanced_area_size.attr,
|
2012-08-06 22:12:29 +07:00
|
|
|
&dev_attr_raw_rpmb_size_mult.attr,
|
2020-05-01 20:53:01 +07:00
|
|
|
&dev_attr_enhanced_rpmb_supported.attr,
|
2012-08-06 22:12:29 +07:00
|
|
|
&dev_attr_rel_sectors.attr,
|
2016-07-04 18:56:55 +07:00
|
|
|
&dev_attr_ocr.attr,
|
2018-03-07 12:01:42 +07:00
|
|
|
&dev_attr_rca.attr,
|
2016-07-19 16:16:38 +07:00
|
|
|
&dev_attr_dsr.attr,
|
2017-03-13 19:36:37 +07:00
|
|
|
&dev_attr_cmdq_en.attr,
|
2008-03-22 05:54:50 +07:00
|
|
|
NULL,
|
|
|
|
};
|
2014-03-08 14:05:27 +07:00
|
|
|
ATTRIBUTE_GROUPS(mmc_std);
|
2008-03-22 05:54:50 +07:00
|
|
|
|
|
|
|
static struct device_type mmc_type = {
|
2014-03-08 14:05:27 +07:00
|
|
|
.groups = mmc_std_groups,
|
2008-03-22 05:54:50 +07:00
|
|
|
};
|
|
|
|
|
2011-09-23 22:11:47 +07:00
|
|
|
/*
|
|
|
|
* Select the PowerClass for the current bus width
|
|
|
|
* If power class is defined for 4/8 bit bus in the
|
|
|
|
* extended CSD register, select it by executing the
|
|
|
|
* mmc_switch command.
|
|
|
|
*/
|
2014-04-23 15:08:05 +07:00
|
|
|
static int __mmc_select_powerclass(struct mmc_card *card,
|
|
|
|
unsigned int bus_width)
|
2011-09-23 22:11:47 +07:00
|
|
|
{
|
2014-04-23 15:08:05 +07:00
|
|
|
struct mmc_host *host = card->host;
|
|
|
|
struct mmc_ext_csd *ext_csd = &card->ext_csd;
|
2013-04-23 21:27:07 +07:00
|
|
|
unsigned int pwrclass_val = 0;
|
2014-04-23 15:08:05 +07:00
|
|
|
int err = 0;
|
2011-09-23 22:11:47 +07:00
|
|
|
|
|
|
|
switch (1 << host->ios.vdd) {
|
|
|
|
case MMC_VDD_165_195:
|
2014-04-23 15:08:05 +07:00
|
|
|
if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
|
|
|
|
pwrclass_val = ext_csd->raw_pwr_cl_26_195;
|
|
|
|
else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
|
2013-04-23 21:27:07 +07:00
|
|
|
pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
|
2014-04-23 15:08:05 +07:00
|
|
|
ext_csd->raw_pwr_cl_52_195 :
|
|
|
|
ext_csd->raw_pwr_cl_ddr_52_195;
|
|
|
|
else if (host->ios.clock <= MMC_HS200_MAX_DTR)
|
|
|
|
pwrclass_val = ext_csd->raw_pwr_cl_200_195;
|
2011-09-23 22:11:47 +07:00
|
|
|
break;
|
2012-04-03 13:55:58 +07:00
|
|
|
case MMC_VDD_27_28:
|
|
|
|
case MMC_VDD_28_29:
|
|
|
|
case MMC_VDD_29_30:
|
|
|
|
case MMC_VDD_30_31:
|
|
|
|
case MMC_VDD_31_32:
|
2011-09-23 22:11:47 +07:00
|
|
|
case MMC_VDD_32_33:
|
|
|
|
case MMC_VDD_33_34:
|
|
|
|
case MMC_VDD_34_35:
|
|
|
|
case MMC_VDD_35_36:
|
2014-04-23 15:08:05 +07:00
|
|
|
if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
|
|
|
|
pwrclass_val = ext_csd->raw_pwr_cl_26_360;
|
|
|
|
else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
|
2013-04-23 21:27:07 +07:00
|
|
|
pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
|
2014-04-23 15:08:05 +07:00
|
|
|
ext_csd->raw_pwr_cl_52_360 :
|
|
|
|
ext_csd->raw_pwr_cl_ddr_52_360;
|
|
|
|
else if (host->ios.clock <= MMC_HS200_MAX_DTR)
|
2014-04-23 15:14:58 +07:00
|
|
|
pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
|
|
|
|
ext_csd->raw_pwr_cl_ddr_200_360 :
|
|
|
|
ext_csd->raw_pwr_cl_200_360;
|
2011-09-23 22:11:47 +07:00
|
|
|
break;
|
|
|
|
default:
|
2014-09-13 04:56:56 +07:00
|
|
|
pr_warn("%s: Voltage range not supported for power class\n",
|
|
|
|
mmc_hostname(host));
|
2011-09-23 22:11:47 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
|
|
|
|
pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
|
|
|
|
EXT_CSD_PWR_CL_8BIT_SHIFT;
|
|
|
|
else
|
|
|
|
pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
|
|
|
|
EXT_CSD_PWR_CL_4BIT_SHIFT;
|
|
|
|
|
|
|
|
/* If the power class is different from the default value */
|
|
|
|
if (pwrclass_val > 0) {
|
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_POWER_CLASS,
|
|
|
|
pwrclass_val,
|
2011-10-18 11:20:57 +07:00
|
|
|
card->ext_csd.generic_cmd6_time);
|
2011-09-23 22:11:47 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-04-23 15:08:05 +07:00
|
|
|
static int mmc_select_powerclass(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
struct mmc_host *host = card->host;
|
|
|
|
u32 bus_width, ext_csd_bits;
|
|
|
|
int err, ddr;
|
|
|
|
|
|
|
|
/* Power class selection is supported for versions >= 4.0 */
|
2014-10-20 16:33:53 +07:00
|
|
|
if (!mmc_can_ext_csd(card))
|
2014-04-23 15:08:05 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
bus_width = host->ios.bus_width;
|
|
|
|
/* Power class values are defined only for 4/8 bit bus */
|
|
|
|
if (bus_width == MMC_BUS_WIDTH_1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
|
|
|
|
if (ddr)
|
|
|
|
ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
|
|
|
|
EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
|
|
|
|
else
|
|
|
|
ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
|
|
|
|
EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
|
|
|
|
|
|
|
|
err = __mmc_select_powerclass(card, ext_csd_bits);
|
|
|
|
if (err)
|
|
|
|
pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
|
|
|
|
mmc_hostname(host), 1 << bus_width, ddr);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-01-12 02:04:52 +07:00
|
|
|
/*
|
2014-04-23 15:08:44 +07:00
|
|
|
* Set the bus speed for the selected speed mode.
|
2012-01-12 02:04:52 +07:00
|
|
|
*/
|
2014-04-23 15:08:44 +07:00
|
|
|
static void mmc_set_bus_speed(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
unsigned int max_dtr = (unsigned int)-1;
|
|
|
|
|
2014-04-23 15:14:58 +07:00
|
|
|
if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
|
|
|
|
max_dtr > card->ext_csd.hs200_max_dtr)
|
2014-04-23 15:08:44 +07:00
|
|
|
max_dtr = card->ext_csd.hs200_max_dtr;
|
|
|
|
else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
|
|
|
|
max_dtr = card->ext_csd.hs_max_dtr;
|
|
|
|
else if (max_dtr > card->csd.max_dtr)
|
|
|
|
max_dtr = card->csd.max_dtr;
|
|
|
|
|
|
|
|
mmc_set_clock(card->host, max_dtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Select the bus width amoung 4-bit and 8-bit(SDR).
|
|
|
|
* If the bus width is changed successfully, return the selected width value.
|
|
|
|
* Zero is returned instead of error value if the wide width is not supported.
|
|
|
|
*/
|
|
|
|
static int mmc_select_bus_width(struct mmc_card *card)
|
2012-01-12 02:04:52 +07:00
|
|
|
{
|
|
|
|
static unsigned ext_csd_bits[] = {
|
|
|
|
EXT_CSD_BUS_WIDTH_8,
|
2014-04-23 15:08:44 +07:00
|
|
|
EXT_CSD_BUS_WIDTH_4,
|
2012-01-12 02:04:52 +07:00
|
|
|
};
|
|
|
|
static unsigned bus_widths[] = {
|
|
|
|
MMC_BUS_WIDTH_8,
|
2014-04-23 15:08:44 +07:00
|
|
|
MMC_BUS_WIDTH_4,
|
2012-01-12 02:04:52 +07:00
|
|
|
};
|
2014-04-23 15:08:44 +07:00
|
|
|
struct mmc_host *host = card->host;
|
|
|
|
unsigned idx, bus_width = 0;
|
|
|
|
int err = 0;
|
2012-01-12 02:04:52 +07:00
|
|
|
|
2014-12-18 01:32:06 +07:00
|
|
|
if (!mmc_can_ext_csd(card) ||
|
2014-04-23 15:08:44 +07:00
|
|
|
!(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
|
|
|
|
return 0;
|
2012-01-12 02:04:52 +07:00
|
|
|
|
2014-04-23 15:08:44 +07:00
|
|
|
idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
|
2012-01-12 02:04:52 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlike SD, MMC cards dont have a configuration register to notify
|
|
|
|
* supported bus width. So bus test command should be run to identify
|
|
|
|
* the supported bus width or compare the ext csd values of current
|
|
|
|
* bus width and ext csd values of 1 bit mode read earlier.
|
|
|
|
*/
|
2014-04-23 15:08:44 +07:00
|
|
|
for (; idx < ARRAY_SIZE(bus_widths); idx++) {
|
2012-01-12 02:04:52 +07:00
|
|
|
/*
|
|
|
|
* Host is capable of 8bit transfer, then switch
|
|
|
|
* the device to work in 8bit transfer mode. If the
|
|
|
|
* mmc switch command returns error then switch to
|
|
|
|
* 4bit transfer mode. On success set the corresponding
|
|
|
|
* bus width on the host.
|
|
|
|
*/
|
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_BUS_WIDTH,
|
|
|
|
ext_csd_bits[idx],
|
|
|
|
card->ext_csd.generic_cmd6_time);
|
|
|
|
if (err)
|
|
|
|
continue;
|
|
|
|
|
2014-04-23 15:08:44 +07:00
|
|
|
bus_width = bus_widths[idx];
|
|
|
|
mmc_set_bus_width(host, bus_width);
|
2012-01-12 02:04:52 +07:00
|
|
|
|
2014-04-23 15:08:44 +07:00
|
|
|
/*
|
|
|
|
* If controller can't handle bus width test,
|
|
|
|
* compare ext_csd previously read in 1 bit mode
|
|
|
|
* against ext_csd at new bus width
|
|
|
|
*/
|
2012-01-12 02:04:52 +07:00
|
|
|
if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
|
2014-04-23 15:08:44 +07:00
|
|
|
err = mmc_compare_ext_csds(card, bus_width);
|
2012-01-12 02:04:52 +07:00
|
|
|
else
|
2014-04-23 15:08:44 +07:00
|
|
|
err = mmc_bus_test(card, bus_width);
|
|
|
|
|
|
|
|
if (!err) {
|
|
|
|
err = bus_width;
|
2012-01-12 02:04:52 +07:00
|
|
|
break;
|
2014-04-23 15:08:44 +07:00
|
|
|
} else {
|
|
|
|
pr_warn("%s: switch to bus width %d failed\n",
|
2016-01-29 15:27:50 +07:00
|
|
|
mmc_hostname(host), 1 << bus_width);
|
2014-04-23 15:08:44 +07:00
|
|
|
}
|
2012-01-12 02:04:52 +07:00
|
|
|
}
|
|
|
|
|
2014-04-23 15:08:44 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch to the high-speed mode
|
|
|
|
*/
|
|
|
|
static int mmc_select_hs(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
|
mmc: core: Allow CMD13 polling when switching to HS mode for mmc
In cases when the mmc host doesn't support HW busy detection, polling for a
card being busy by using CMD13 is beneficial. That is because, instead of
waiting a fixed amount of time, 500ms or the generic CMD6 time from
EXT_CSD, we find out a lot sooner when the card stops signaling busy. This
leads to a significant decreased total initialization time for the mmc
card.
However, to allow polling with CMD13 during a bus timing change operation,
such as switching to HS mode, we first need to update the mmc host's bus
timing before starting to poll. Deal with that, simply by providing
MMC_TIMING_MMC_HS as the timing parameter to __mmc_switch() from
mmc_select_hs().
By telling __mmc_switch() to allow polling with CMD13, also makes it
validate the CMD6 status, thus we can remove the corresponding checks.
When switching to HS400ES, the mmc_select_hs() function is called in one of
the intermediate steps. To still prevent CMD13 polling for HS400ES, let's
call the __mmc_switch() function in this path as it enables us to keep
using the existing method.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-10 01:40:29 +07:00
|
|
|
card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
|
2020-02-04 15:54:41 +07:00
|
|
|
true, true);
|
2016-08-24 17:34:09 +07:00
|
|
|
if (err)
|
|
|
|
pr_warn("%s: switch to high-speed failed, err:%d\n",
|
|
|
|
mmc_hostname(card->host), err);
|
|
|
|
|
2014-04-23 15:08:44 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Activate wide bus and DDR if supported.
|
|
|
|
*/
|
|
|
|
static int mmc_select_hs_ddr(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
struct mmc_host *host = card->host;
|
|
|
|
u32 bus_width, ext_csd_bits;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bus_width = host->ios.bus_width;
|
|
|
|
if (bus_width == MMC_BUS_WIDTH_1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
|
|
|
|
EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
|
|
|
|
|
2016-11-10 03:00:26 +07:00
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_BUS_WIDTH,
|
|
|
|
ext_csd_bits,
|
|
|
|
card->ext_csd.generic_cmd6_time,
|
|
|
|
MMC_TIMING_MMC_DDR52,
|
2020-02-04 15:54:41 +07:00
|
|
|
true, true);
|
2014-04-23 15:08:44 +07:00
|
|
|
if (err) {
|
2014-10-01 19:14:11 +07:00
|
|
|
pr_err("%s: switch to bus width %d ddr failed\n",
|
2014-04-23 15:08:44 +07:00
|
|
|
mmc_hostname(host), 1 << bus_width);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* eMMC cards can support 3.3V to 1.2V i/o (vccq)
|
|
|
|
* signaling.
|
|
|
|
*
|
|
|
|
* EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
|
|
|
|
*
|
|
|
|
* 1.8V vccq at 3.3V core voltage (vcc) is not required
|
|
|
|
* in the JEDEC spec for DDR.
|
|
|
|
*
|
2014-08-15 10:28:07 +07:00
|
|
|
* Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
|
|
|
|
* host controller can support this, like some of the SDHCI
|
|
|
|
* controller which connect to an eMMC device. Some of these
|
|
|
|
* host controller still needs to use 1.8v vccq for supporting
|
|
|
|
* DDR mode.
|
|
|
|
*
|
|
|
|
* So the sequence will be:
|
|
|
|
* if (host and device can both support 1.2v IO)
|
|
|
|
* use 1.2v IO;
|
|
|
|
* else if (host and device can both support 1.8v IO)
|
|
|
|
* use 1.8v IO;
|
|
|
|
* so if host and device can only support 3.3v IO, this is the
|
|
|
|
* last choice.
|
2014-04-23 15:08:44 +07:00
|
|
|
*
|
|
|
|
* WARNING: eMMC rules are NOT the same as SD DDR
|
|
|
|
*/
|
mmc: core: Invent MMC_CAP_3_3V_DDR
According the JEDEC specification an eMMC card supporting 1.8V vccq in DDR
mode should also be capable of 3.3V. However, it's been reported that some
mmc hosts supports 3.3V, but not 1.8V.
Currently the mmc core implements an error handling when the host fails to
set 1.8V for vccq, by falling back to 3.3V. Unfortunate, this seems to be
insufficient for some mmc hosts. To enable these to use eMMC DDR mode let's
invent a new mmc cap, MMC_CAP_3_3V_DDR, which tells whether they support
the eMMC 3.3V DDR mode.
In case MMC_CAP_3_3V_DDR is set, but not MMC_CAP_1_8V_DDR, let's change to
remain on the 3.3V, as it's the default voltage level for vccq, set by the
earlier power up sequence.
As this change introduces MMC_CAP_3_3V_DDR, let's take the opportunity to
do some re-formatting of the related defines in the header file.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com>
Tested-by: Jan Glauber <jglauber@cavium.com>
Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
2017-01-25 18:04:17 +07:00
|
|
|
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
|
2017-01-25 17:12:34 +07:00
|
|
|
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
|
mmc: core: Invent MMC_CAP_3_3V_DDR
According the JEDEC specification an eMMC card supporting 1.8V vccq in DDR
mode should also be capable of 3.3V. However, it's been reported that some
mmc hosts supports 3.3V, but not 1.8V.
Currently the mmc core implements an error handling when the host fails to
set 1.8V for vccq, by falling back to 3.3V. Unfortunate, this seems to be
insufficient for some mmc hosts. To enable these to use eMMC DDR mode let's
invent a new mmc cap, MMC_CAP_3_3V_DDR, which tells whether they support
the eMMC 3.3V DDR mode.
In case MMC_CAP_3_3V_DDR is set, but not MMC_CAP_1_8V_DDR, let's change to
remain on the 3.3V, as it's the default voltage level for vccq, set by the
earlier power up sequence.
As this change introduces MMC_CAP_3_3V_DDR, let's take the opportunity to
do some re-formatting of the related defines in the header file.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com>
Tested-by: Jan Glauber <jglauber@cavium.com>
Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
2017-01-25 18:04:17 +07:00
|
|
|
if (!err)
|
|
|
|
return 0;
|
|
|
|
}
|
2014-04-23 15:08:44 +07:00
|
|
|
|
mmc: core: Invent MMC_CAP_3_3V_DDR
According the JEDEC specification an eMMC card supporting 1.8V vccq in DDR
mode should also be capable of 3.3V. However, it's been reported that some
mmc hosts supports 3.3V, but not 1.8V.
Currently the mmc core implements an error handling when the host fails to
set 1.8V for vccq, by falling back to 3.3V. Unfortunate, this seems to be
insufficient for some mmc hosts. To enable these to use eMMC DDR mode let's
invent a new mmc cap, MMC_CAP_3_3V_DDR, which tells whether they support
the eMMC 3.3V DDR mode.
In case MMC_CAP_3_3V_DDR is set, but not MMC_CAP_1_8V_DDR, let's change to
remain on the 3.3V, as it's the default voltage level for vccq, set by the
earlier power up sequence.
As this change introduces MMC_CAP_3_3V_DDR, let's take the opportunity to
do some re-formatting of the related defines in the header file.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com>
Tested-by: Jan Glauber <jglauber@cavium.com>
Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
2017-01-25 18:04:17 +07:00
|
|
|
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
|
|
|
|
host->caps & MMC_CAP_1_8V_DDR)
|
2017-01-25 17:12:34 +07:00
|
|
|
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
|
2014-08-15 10:28:07 +07:00
|
|
|
|
|
|
|
/* make sure vccq is 3.3v after switching disaster */
|
|
|
|
if (err)
|
2017-01-25 17:12:34 +07:00
|
|
|
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
|
2014-08-15 10:28:07 +07:00
|
|
|
|
2014-04-23 15:08:44 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-04-23 15:14:58 +07:00
|
|
|
static int mmc_select_hs400(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
struct mmc_host *host = card->host;
|
2015-10-28 19:25:41 +07:00
|
|
|
unsigned int max_dtr;
|
2014-04-23 15:14:58 +07:00
|
|
|
int err = 0;
|
2015-02-06 19:12:58 +07:00
|
|
|
u8 val;
|
2014-04-23 15:14:58 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* HS400 mode requires 8-bit bus width
|
|
|
|
*/
|
|
|
|
if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
|
|
|
|
host->ios.bus_width == MMC_BUS_WIDTH_8))
|
|
|
|
return 0;
|
|
|
|
|
2015-10-28 19:25:41 +07:00
|
|
|
/* Switch card to HS mode */
|
2015-11-26 19:00:44 +07:00
|
|
|
val = EXT_CSD_TIMING_HS;
|
2014-04-23 15:14:58 +07:00
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
2015-02-06 19:12:58 +07:00
|
|
|
EXT_CSD_HS_TIMING, val,
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 23:33:36 +07:00
|
|
|
card->ext_csd.generic_cmd6_time, 0,
|
2020-02-04 15:54:41 +07:00
|
|
|
false, true);
|
2014-04-23 15:14:58 +07:00
|
|
|
if (err) {
|
2014-10-01 19:14:11 +07:00
|
|
|
pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
|
2014-04-23 15:14:58 +07:00
|
|
|
mmc_hostname(host), err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-10-28 19:25:41 +07:00
|
|
|
/* Set host controller to HS timing */
|
|
|
|
mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
|
|
|
|
|
2018-06-18 19:57:49 +07:00
|
|
|
/* Prepare host to downgrade to HS timing */
|
|
|
|
if (host->ops->hs400_downgrade)
|
|
|
|
host->ops->hs400_downgrade(host);
|
|
|
|
|
2016-05-26 12:50:32 +07:00
|
|
|
/* Reduce frequency to HS frequency */
|
|
|
|
max_dtr = card->ext_csd.hs_max_dtr;
|
|
|
|
mmc_set_clock(host, max_dtr);
|
|
|
|
|
2020-02-04 15:54:40 +07:00
|
|
|
err = mmc_switch_status(card, true);
|
2016-05-19 15:47:41 +07:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
2015-10-28 19:25:43 +07:00
|
|
|
|
2018-11-23 10:15:33 +07:00
|
|
|
if (host->ops->hs400_prepare_ddr)
|
|
|
|
host->ops->hs400_prepare_ddr(host);
|
|
|
|
|
2015-10-28 19:25:43 +07:00
|
|
|
/* Switch card to DDR */
|
2014-04-23 15:14:58 +07:00
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_BUS_WIDTH,
|
|
|
|
EXT_CSD_DDR_BUS_WIDTH_8,
|
|
|
|
card->ext_csd.generic_cmd6_time);
|
|
|
|
if (err) {
|
2014-10-01 19:14:11 +07:00
|
|
|
pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
|
2014-04-23 15:14:58 +07:00
|
|
|
mmc_hostname(host), err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-10-28 19:25:43 +07:00
|
|
|
/* Switch card to HS400 */
|
2015-02-06 19:12:58 +07:00
|
|
|
val = EXT_CSD_TIMING_HS400 |
|
|
|
|
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
|
2014-04-23 15:14:58 +07:00
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
2015-02-06 19:12:58 +07:00
|
|
|
EXT_CSD_HS_TIMING, val,
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 23:33:36 +07:00
|
|
|
card->ext_csd.generic_cmd6_time, 0,
|
2020-02-04 15:54:41 +07:00
|
|
|
false, true);
|
2014-04-23 15:14:58 +07:00
|
|
|
if (err) {
|
2014-10-01 19:14:11 +07:00
|
|
|
pr_err("%s: switch to hs400 failed, err:%d\n",
|
2014-04-23 15:14:58 +07:00
|
|
|
mmc_hostname(host), err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-10-28 19:25:43 +07:00
|
|
|
/* Set host controller to HS400 timing and frequency */
|
2014-04-23 15:14:58 +07:00
|
|
|
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
|
|
|
|
mmc_set_bus_speed(card);
|
|
|
|
|
2019-06-12 03:03:43 +07:00
|
|
|
if (host->ops->hs400_complete)
|
|
|
|
host->ops->hs400_complete(host);
|
|
|
|
|
2020-02-04 15:54:40 +07:00
|
|
|
err = mmc_switch_status(card, true);
|
2016-05-19 15:47:41 +07:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
2015-10-28 19:25:43 +07:00
|
|
|
|
2014-04-23 15:14:58 +07:00
|
|
|
return 0;
|
2015-10-28 19:25:43 +07:00
|
|
|
|
|
|
|
out_err:
|
|
|
|
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
|
|
|
|
__func__, err);
|
|
|
|
return err;
|
2014-04-23 15:14:58 +07:00
|
|
|
}
|
|
|
|
|
2015-05-07 17:10:20 +07:00
|
|
|
int mmc_hs200_to_hs400(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
return mmc_select_hs400(card);
|
|
|
|
}
|
|
|
|
|
|
|
|
int mmc_hs400_to_hs200(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
struct mmc_host *host = card->host;
|
|
|
|
unsigned int max_dtr;
|
|
|
|
int err;
|
2015-02-06 19:12:58 +07:00
|
|
|
u8 val;
|
2015-05-07 17:10:20 +07:00
|
|
|
|
|
|
|
/* Reduce frequency to HS */
|
|
|
|
max_dtr = card->ext_csd.hs_max_dtr;
|
|
|
|
mmc_set_clock(host, max_dtr);
|
|
|
|
|
|
|
|
/* Switch HS400 to HS DDR */
|
2015-11-26 19:00:44 +07:00
|
|
|
val = EXT_CSD_TIMING_HS;
|
2015-05-07 17:10:20 +07:00
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 23:33:36 +07:00
|
|
|
val, card->ext_csd.generic_cmd6_time, 0,
|
2020-02-04 15:54:41 +07:00
|
|
|
false, true);
|
2015-05-07 17:10:20 +07:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
|
|
|
|
|
2020-02-04 15:54:40 +07:00
|
|
|
err = mmc_switch_status(card, true);
|
2016-05-19 15:47:41 +07:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
2015-05-07 17:10:20 +07:00
|
|
|
|
|
|
|
/* Switch HS DDR to HS */
|
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
|
|
|
|
EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
|
2020-02-04 15:54:41 +07:00
|
|
|
0, false, true);
|
2015-05-07 17:10:20 +07:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
mmc_set_timing(host, MMC_TIMING_MMC_HS);
|
|
|
|
|
2018-06-18 19:57:49 +07:00
|
|
|
if (host->ops->hs400_downgrade)
|
|
|
|
host->ops->hs400_downgrade(host);
|
|
|
|
|
2020-02-04 15:54:40 +07:00
|
|
|
err = mmc_switch_status(card, true);
|
2016-05-19 15:47:41 +07:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
2015-05-07 17:10:20 +07:00
|
|
|
|
|
|
|
/* Switch HS to HS200 */
|
2015-02-06 19:12:58 +07:00
|
|
|
val = EXT_CSD_TIMING_HS200 |
|
|
|
|
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
|
2015-05-07 17:10:20 +07:00
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 23:33:36 +07:00
|
|
|
val, card->ext_csd.generic_cmd6_time, 0,
|
2020-02-04 15:54:41 +07:00
|
|
|
false, true);
|
2015-05-07 17:10:20 +07:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
|
|
|
|
|
2016-12-02 18:16:35 +07:00
|
|
|
/*
|
|
|
|
* For HS200, CRC errors are not a reliable way to know the switch
|
|
|
|
* failed. If there really is a problem, we would expect tuning will
|
|
|
|
* fail and the result ends up the same.
|
|
|
|
*/
|
2020-02-04 15:54:40 +07:00
|
|
|
err = mmc_switch_status(card, false);
|
2016-05-19 15:47:41 +07:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
2015-05-07 17:10:20 +07:00
|
|
|
|
|
|
|
mmc_set_bus_speed(card);
|
|
|
|
|
2018-05-22 21:26:26 +07:00
|
|
|
/* Prepare tuning for HS400 mode. */
|
|
|
|
if (host->ops->prepare_hs400_tuning)
|
|
|
|
host->ops->prepare_hs400_tuning(host, &host->ios);
|
|
|
|
|
2015-05-07 17:10:20 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
|
|
|
|
__func__, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-09-26 07:03:40 +07:00
|
|
|
static void mmc_select_driver_type(struct mmc_card *card)
|
|
|
|
{
|
2017-11-30 21:49:10 +07:00
|
|
|
int card_drv_type, drive_strength, drv_type = 0;
|
2017-10-15 19:46:14 +07:00
|
|
|
int fixed_drv_type = card->host->fixed_drv_type;
|
2017-09-26 07:03:40 +07:00
|
|
|
|
|
|
|
card_drv_type = card->ext_csd.raw_driver_strength |
|
|
|
|
mmc_driver_type_mask(0);
|
|
|
|
|
2017-10-15 19:46:14 +07:00
|
|
|
if (fixed_drv_type >= 0)
|
|
|
|
drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
|
|
|
|
? fixed_drv_type : 0;
|
|
|
|
else
|
|
|
|
drive_strength = mmc_select_drive_strength(card,
|
|
|
|
card->ext_csd.hs200_max_dtr,
|
|
|
|
card_drv_type, &drv_type);
|
2017-09-26 07:03:40 +07:00
|
|
|
|
|
|
|
card->drive_strength = drive_strength;
|
|
|
|
|
|
|
|
if (drv_type)
|
|
|
|
mmc_set_driver_type(card->host, drv_type);
|
|
|
|
}
|
|
|
|
|
2016-05-26 08:56:22 +07:00
|
|
|
static int mmc_select_hs400es(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
struct mmc_host *host = card->host;
|
2017-08-08 17:54:01 +07:00
|
|
|
int err = -EINVAL;
|
2016-05-26 08:56:22 +07:00
|
|
|
u8 val;
|
|
|
|
|
|
|
|
if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
|
|
|
|
err = -ENOTSUPP;
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2016-09-30 13:18:58 +07:00
|
|
|
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
|
2017-01-25 17:12:34 +07:00
|
|
|
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
|
2016-09-30 13:18:58 +07:00
|
|
|
|
|
|
|
if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
|
2017-01-25 17:12:34 +07:00
|
|
|
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
|
2016-09-30 13:18:58 +07:00
|
|
|
|
|
|
|
/* If fails try again during next card power cycle */
|
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
|
2016-05-26 08:56:22 +07:00
|
|
|
err = mmc_select_bus_width(card);
|
2018-07-31 09:55:09 +07:00
|
|
|
if (err != MMC_BUS_WIDTH_8) {
|
|
|
|
pr_err("%s: switch to 8bit bus width failed, err:%d\n",
|
|
|
|
mmc_hostname(host), err);
|
|
|
|
err = err < 0 ? err : -ENOTSUPP;
|
2016-05-26 08:56:22 +07:00
|
|
|
goto out_err;
|
2018-07-31 09:55:09 +07:00
|
|
|
}
|
2016-05-26 08:56:22 +07:00
|
|
|
|
|
|
|
/* Switch card to HS mode */
|
mmc: core: Allow CMD13 polling when switching to HS mode for mmc
In cases when the mmc host doesn't support HW busy detection, polling for a
card being busy by using CMD13 is beneficial. That is because, instead of
waiting a fixed amount of time, 500ms or the generic CMD6 time from
EXT_CSD, we find out a lot sooner when the card stops signaling busy. This
leads to a significant decreased total initialization time for the mmc
card.
However, to allow polling with CMD13 during a bus timing change operation,
such as switching to HS mode, we first need to update the mmc host's bus
timing before starting to poll. Deal with that, simply by providing
MMC_TIMING_MMC_HS as the timing parameter to __mmc_switch() from
mmc_select_hs().
By telling __mmc_switch() to allow polling with CMD13, also makes it
validate the CMD6 status, thus we can remove the corresponding checks.
When switching to HS400ES, the mmc_select_hs() function is called in one of
the intermediate steps. To still prevent CMD13 polling for HS400ES, let's
call the __mmc_switch() function in this path as it enables us to keep
using the existing method.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-10 01:40:29 +07:00
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
|
|
|
|
card->ext_csd.generic_cmd6_time, 0,
|
2020-02-04 15:54:41 +07:00
|
|
|
false, true);
|
mmc: core: Allow CMD13 polling when switching to HS mode for mmc
In cases when the mmc host doesn't support HW busy detection, polling for a
card being busy by using CMD13 is beneficial. That is because, instead of
waiting a fixed amount of time, 500ms or the generic CMD6 time from
EXT_CSD, we find out a lot sooner when the card stops signaling busy. This
leads to a significant decreased total initialization time for the mmc
card.
However, to allow polling with CMD13 during a bus timing change operation,
such as switching to HS mode, we first need to update the mmc host's bus
timing before starting to poll. Deal with that, simply by providing
MMC_TIMING_MMC_HS as the timing parameter to __mmc_switch() from
mmc_select_hs().
By telling __mmc_switch() to allow polling with CMD13, also makes it
validate the CMD6 status, thus we can remove the corresponding checks.
When switching to HS400ES, the mmc_select_hs() function is called in one of
the intermediate steps. To still prevent CMD13 polling for HS400ES, let's
call the __mmc_switch() function in this path as it enables us to keep
using the existing method.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-10 01:40:29 +07:00
|
|
|
if (err) {
|
|
|
|
pr_err("%s: switch to hs for hs400es failed, err:%d\n",
|
|
|
|
mmc_hostname(host), err);
|
2016-05-26 08:56:22 +07:00
|
|
|
goto out_err;
|
mmc: core: Allow CMD13 polling when switching to HS mode for mmc
In cases when the mmc host doesn't support HW busy detection, polling for a
card being busy by using CMD13 is beneficial. That is because, instead of
waiting a fixed amount of time, 500ms or the generic CMD6 time from
EXT_CSD, we find out a lot sooner when the card stops signaling busy. This
leads to a significant decreased total initialization time for the mmc
card.
However, to allow polling with CMD13 during a bus timing change operation,
such as switching to HS mode, we first need to update the mmc host's bus
timing before starting to poll. Deal with that, simply by providing
MMC_TIMING_MMC_HS as the timing parameter to __mmc_switch() from
mmc_select_hs().
By telling __mmc_switch() to allow polling with CMD13, also makes it
validate the CMD6 status, thus we can remove the corresponding checks.
When switching to HS400ES, the mmc_select_hs() function is called in one of
the intermediate steps. To still prevent CMD13 polling for HS400ES, let's
call the __mmc_switch() function in this path as it enables us to keep
using the existing method.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-10 01:40:29 +07:00
|
|
|
}
|
2016-05-26 08:56:22 +07:00
|
|
|
|
mmc: core: Allow CMD13 polling when switching to HS mode for mmc
In cases when the mmc host doesn't support HW busy detection, polling for a
card being busy by using CMD13 is beneficial. That is because, instead of
waiting a fixed amount of time, 500ms or the generic CMD6 time from
EXT_CSD, we find out a lot sooner when the card stops signaling busy. This
leads to a significant decreased total initialization time for the mmc
card.
However, to allow polling with CMD13 during a bus timing change operation,
such as switching to HS mode, we first need to update the mmc host's bus
timing before starting to poll. Deal with that, simply by providing
MMC_TIMING_MMC_HS as the timing parameter to __mmc_switch() from
mmc_select_hs().
By telling __mmc_switch() to allow polling with CMD13, also makes it
validate the CMD6 status, thus we can remove the corresponding checks.
When switching to HS400ES, the mmc_select_hs() function is called in one of
the intermediate steps. To still prevent CMD13 polling for HS400ES, let's
call the __mmc_switch() function in this path as it enables us to keep
using the existing method.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-10 01:40:29 +07:00
|
|
|
mmc_set_timing(host, MMC_TIMING_MMC_HS);
|
2020-02-04 15:54:40 +07:00
|
|
|
err = mmc_switch_status(card, true);
|
2016-05-26 08:56:22 +07:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
|
mmc: core: Allow CMD13 polling when switching to HS mode for mmc
In cases when the mmc host doesn't support HW busy detection, polling for a
card being busy by using CMD13 is beneficial. That is because, instead of
waiting a fixed amount of time, 500ms or the generic CMD6 time from
EXT_CSD, we find out a lot sooner when the card stops signaling busy. This
leads to a significant decreased total initialization time for the mmc
card.
However, to allow polling with CMD13 during a bus timing change operation,
such as switching to HS mode, we first need to update the mmc host's bus
timing before starting to poll. Deal with that, simply by providing
MMC_TIMING_MMC_HS as the timing parameter to __mmc_switch() from
mmc_select_hs().
By telling __mmc_switch() to allow polling with CMD13, also makes it
validate the CMD6 status, thus we can remove the corresponding checks.
When switching to HS400ES, the mmc_select_hs() function is called in one of
the intermediate steps. To still prevent CMD13 polling for HS400ES, let's
call the __mmc_switch() function in this path as it enables us to keep
using the existing method.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-10 01:40:29 +07:00
|
|
|
mmc_set_clock(host, card->ext_csd.hs_max_dtr);
|
|
|
|
|
2016-05-26 08:56:22 +07:00
|
|
|
/* Switch card to DDR with strobe bit */
|
|
|
|
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
|
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_BUS_WIDTH,
|
|
|
|
val,
|
|
|
|
card->ext_csd.generic_cmd6_time);
|
|
|
|
if (err) {
|
|
|
|
pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
|
|
|
|
mmc_hostname(host), err);
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2017-09-26 07:03:40 +07:00
|
|
|
mmc_select_driver_type(card);
|
|
|
|
|
2016-05-26 08:56:22 +07:00
|
|
|
/* Switch card to HS400 */
|
|
|
|
val = EXT_CSD_TIMING_HS400 |
|
|
|
|
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
|
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_HS_TIMING, val,
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 23:33:36 +07:00
|
|
|
card->ext_csd.generic_cmd6_time, 0,
|
2020-02-04 15:54:41 +07:00
|
|
|
false, true);
|
2016-05-26 08:56:22 +07:00
|
|
|
if (err) {
|
|
|
|
pr_err("%s: switch to hs400es failed, err:%d\n",
|
|
|
|
mmc_hostname(host), err);
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set host controller to HS400 timing and frequency */
|
|
|
|
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
|
|
|
|
|
|
|
|
/* Controller enable enhanced strobe function */
|
|
|
|
host->ios.enhanced_strobe = true;
|
|
|
|
if (host->ops->hs400_enhanced_strobe)
|
|
|
|
host->ops->hs400_enhanced_strobe(host, &host->ios);
|
|
|
|
|
2020-02-04 15:54:40 +07:00
|
|
|
err = mmc_switch_status(card, true);
|
2016-05-26 08:56:22 +07:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
|
|
|
|
__func__, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-04-23 15:08:44 +07:00
|
|
|
/*
|
|
|
|
* For device supporting HS200 mode, the following sequence
|
|
|
|
* should be done before executing the tuning process.
|
|
|
|
* 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
|
|
|
|
* 2. switch to HS200 mode
|
|
|
|
* 3. set the clock to > 52Mhz and <=200MHz
|
|
|
|
*/
|
|
|
|
static int mmc_select_hs200(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
struct mmc_host *host = card->host;
|
2016-04-20 23:51:30 +07:00
|
|
|
unsigned int old_timing, old_signal_voltage;
|
2014-04-23 15:08:44 +07:00
|
|
|
int err = -EINVAL;
|
2015-02-06 19:12:58 +07:00
|
|
|
u8 val;
|
2014-04-23 15:08:44 +07:00
|
|
|
|
2016-04-20 23:51:30 +07:00
|
|
|
old_signal_voltage = host->ios.signal_voltage;
|
2014-04-23 15:08:44 +07:00
|
|
|
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
|
2017-01-25 17:12:34 +07:00
|
|
|
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
|
2014-04-23 15:08:44 +07:00
|
|
|
|
|
|
|
if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
|
2017-01-25 17:12:34 +07:00
|
|
|
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
|
2014-04-23 15:08:44 +07:00
|
|
|
|
|
|
|
/* If fails try again during next card power cycle */
|
|
|
|
if (err)
|
2016-04-20 23:51:30 +07:00
|
|
|
return err;
|
2014-04-23 15:08:44 +07:00
|
|
|
|
2015-02-06 19:12:58 +07:00
|
|
|
mmc_select_driver_type(card);
|
|
|
|
|
2014-04-23 15:08:44 +07:00
|
|
|
/*
|
|
|
|
* Set the bus width(4 or 8) with host's support and
|
|
|
|
* switch to HS200 mode if bus width is set successfully.
|
|
|
|
*/
|
|
|
|
err = mmc_select_bus_width(card);
|
2016-06-08 17:27:52 +07:00
|
|
|
if (err > 0) {
|
2015-02-06 19:12:58 +07:00
|
|
|
val = EXT_CSD_TIMING_HS200 |
|
|
|
|
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
|
2014-01-08 22:09:33 +07:00
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
2015-02-06 19:12:58 +07:00
|
|
|
EXT_CSD_HS_TIMING, val,
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 23:33:36 +07:00
|
|
|
card->ext_csd.generic_cmd6_time, 0,
|
2020-02-04 15:54:41 +07:00
|
|
|
false, true);
|
2015-10-28 19:25:40 +07:00
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
old_timing = host->ios.timing;
|
|
|
|
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
|
2016-05-19 15:47:41 +07:00
|
|
|
|
2016-12-02 18:16:35 +07:00
|
|
|
/*
|
|
|
|
* For HS200, CRC errors are not a reliable way to know the
|
|
|
|
* switch failed. If there really is a problem, we would expect
|
|
|
|
* tuning will fail and the result ends up the same.
|
|
|
|
*/
|
2020-02-04 15:54:40 +07:00
|
|
|
err = mmc_switch_status(card, false);
|
2016-12-02 18:16:35 +07:00
|
|
|
|
2016-05-19 15:47:41 +07:00
|
|
|
/*
|
|
|
|
* mmc_select_timing() assumes timing has not changed if
|
|
|
|
* it is a switch error.
|
|
|
|
*/
|
|
|
|
if (err == -EBADMSG)
|
|
|
|
mmc_set_timing(host, old_timing);
|
2014-04-23 15:08:44 +07:00
|
|
|
}
|
2012-01-12 02:04:52 +07:00
|
|
|
err:
|
2016-04-20 23:51:30 +07:00
|
|
|
if (err) {
|
|
|
|
/* fall back to the old signal voltage, if fails report error */
|
2017-01-25 17:12:34 +07:00
|
|
|
if (mmc_set_signal_voltage(host, old_signal_voltage))
|
2016-04-20 23:51:30 +07:00
|
|
|
err = -EIO;
|
|
|
|
|
2015-10-28 19:25:40 +07:00
|
|
|
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
|
|
|
|
__func__, err);
|
2016-04-20 23:51:30 +07:00
|
|
|
}
|
2012-01-12 02:04:52 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-04-23 15:08:44 +07:00
|
|
|
/*
|
2016-05-26 08:56:22 +07:00
|
|
|
* Activate High Speed, HS200 or HS400ES mode if supported.
|
2014-04-23 15:08:44 +07:00
|
|
|
*/
|
|
|
|
static int mmc_select_timing(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
2014-10-20 16:33:53 +07:00
|
|
|
if (!mmc_can_ext_csd(card))
|
2014-04-23 15:08:44 +07:00
|
|
|
goto bus_speed;
|
|
|
|
|
2016-05-26 08:56:22 +07:00
|
|
|
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
|
|
|
|
err = mmc_select_hs400es(card);
|
|
|
|
else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
|
2014-04-23 15:08:44 +07:00
|
|
|
err = mmc_select_hs200(card);
|
|
|
|
else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
|
|
|
|
err = mmc_select_hs(card);
|
|
|
|
|
|
|
|
if (err && err != -EBADMSG)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
bus_speed:
|
|
|
|
/*
|
|
|
|
* Set the bus speed to the selected bus timing.
|
|
|
|
* If timing is not selected, backward compatible is the default.
|
|
|
|
*/
|
|
|
|
mmc_set_bus_speed(card);
|
2016-04-20 23:51:31 +07:00
|
|
|
return 0;
|
2014-04-23 15:08:44 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Execute tuning sequence to seek the proper bus operating
|
2014-04-23 15:14:58 +07:00
|
|
|
* conditions for HS200 and HS400, which sends CMD21 to the device.
|
2014-04-23 15:08:44 +07:00
|
|
|
*/
|
|
|
|
static int mmc_hs200_tuning(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
struct mmc_host *host = card->host;
|
|
|
|
|
2014-04-23 15:14:58 +07:00
|
|
|
/*
|
|
|
|
* Timing should be adjusted to the HS400 target
|
|
|
|
* operation frequency for tuning process
|
|
|
|
*/
|
|
|
|
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
|
|
|
|
host->ios.bus_width == MMC_BUS_WIDTH_8)
|
|
|
|
if (host->ops->prepare_hs400_tuning)
|
|
|
|
host->ops->prepare_hs400_tuning(host, &host->ios);
|
|
|
|
|
2014-12-06 00:40:59 +07:00
|
|
|
return mmc_execute_tuning(card);
|
2014-04-23 15:08:44 +07:00
|
|
|
}
|
|
|
|
|
2006-12-31 06:11:32 +07:00
|
|
|
/*
|
2007-05-01 21:00:02 +07:00
|
|
|
* Handle the detection and initialisation of a card.
|
|
|
|
*
|
2008-06-17 09:20:57 +07:00
|
|
|
* In the case of a resume, "oldcard" will contain the card
|
2007-05-01 21:00:02 +07:00
|
|
|
* we're trying to reinitialise.
|
2006-12-31 06:11:32 +07:00
|
|
|
*/
|
2007-05-19 21:14:43 +07:00
|
|
|
static int mmc_init_card(struct mmc_host *host, u32 ocr,
|
2007-05-01 21:00:02 +07:00
|
|
|
struct mmc_card *oldcard)
|
2006-12-31 06:11:32 +07:00
|
|
|
{
|
|
|
|
struct mmc_card *card;
|
2014-04-23 15:08:44 +07:00
|
|
|
int err;
|
2006-12-31 06:11:32 +07:00
|
|
|
u32 cid[4];
|
2011-02-14 14:13:09 +07:00
|
|
|
u32 rocr;
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2007-08-09 18:23:56 +07:00
|
|
|
WARN_ON(!host->claimed);
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2011-09-15 22:50:38 +07:00
|
|
|
/* Set correct bus mode for MMC before attempting init */
|
|
|
|
if (!mmc_host_is_spi(host))
|
|
|
|
mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
|
|
|
|
|
2006-12-31 06:11:32 +07:00
|
|
|
/*
|
|
|
|
* Since we're changing the OCR value, we seem to
|
|
|
|
* need to tell some cards to go back to the idle
|
|
|
|
* state. We wait 1ms to give cards time to
|
|
|
|
* respond.
|
2011-09-08 23:38:39 +07:00
|
|
|
* mmc_go_idle is needed for eMMC that are asleep
|
2006-12-31 06:11:32 +07:00
|
|
|
*/
|
|
|
|
mmc_go_idle(host);
|
|
|
|
|
|
|
|
/* The extra bit indicates that we support high capacity */
|
2011-02-14 14:13:09 +07:00
|
|
|
err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
|
2007-07-23 03:18:46 +07:00
|
|
|
if (err)
|
2007-05-01 21:00:02 +07:00
|
|
|
goto err;
|
2006-12-31 06:11:32 +07:00
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 23:11:32 +07:00
|
|
|
/*
|
|
|
|
* For SPI, enable CRC as appropriate.
|
|
|
|
*/
|
|
|
|
if (mmc_host_is_spi(host)) {
|
|
|
|
err = mmc_spi_set_crc(host, use_spi_crc);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2006-12-31 06:11:32 +07:00
|
|
|
/*
|
|
|
|
* Fetch CID from card.
|
|
|
|
*/
|
2017-06-08 20:27:43 +07:00
|
|
|
err = mmc_send_cid(host, cid);
|
2007-07-23 03:18:46 +07:00
|
|
|
if (err)
|
2006-12-31 06:11:32 +07:00
|
|
|
goto err;
|
|
|
|
|
2007-05-01 21:00:02 +07:00
|
|
|
if (oldcard) {
|
2007-07-23 04:08:30 +07:00
|
|
|
if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
|
2019-02-28 13:08:28 +07:00
|
|
|
pr_debug("%s: Perhaps the card was replaced\n",
|
|
|
|
mmc_hostname(host));
|
2007-07-23 04:08:30 +07:00
|
|
|
err = -ENOENT;
|
2007-05-01 21:00:02 +07:00
|
|
|
goto err;
|
2007-07-23 04:08:30 +07:00
|
|
|
}
|
2007-05-01 21:00:02 +07:00
|
|
|
|
|
|
|
card = oldcard;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Allocate card structure.
|
|
|
|
*/
|
2008-03-22 05:54:50 +07:00
|
|
|
card = mmc_alloc_card(host, &mmc_type);
|
2007-07-23 04:08:30 +07:00
|
|
|
if (IS_ERR(card)) {
|
|
|
|
err = PTR_ERR(card);
|
2007-05-01 21:00:02 +07:00
|
|
|
goto err;
|
2007-07-23 04:08:30 +07:00
|
|
|
}
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2013-09-13 16:31:33 +07:00
|
|
|
card->ocr = ocr;
|
2007-05-01 21:00:02 +07:00
|
|
|
card->type = MMC_TYPE_MMC;
|
|
|
|
card->rca = 1;
|
|
|
|
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
|
|
|
|
}
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2014-12-03 06:42:45 +07:00
|
|
|
/*
|
|
|
|
* Call the optional HC's init_card function to handle quirks.
|
|
|
|
*/
|
|
|
|
if (host->ops->init_card)
|
|
|
|
host->ops->init_card(host, card);
|
|
|
|
|
2006-12-31 06:11:32 +07:00
|
|
|
/*
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 23:11:32 +07:00
|
|
|
* For native busses: set card RCA and quit open drain mode.
|
2006-12-31 06:11:32 +07:00
|
|
|
*/
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 23:11:32 +07:00
|
|
|
if (!mmc_host_is_spi(host)) {
|
|
|
|
err = mmc_set_relative_addr(card);
|
|
|
|
if (err)
|
|
|
|
goto free_card;
|
2006-12-31 06:11:32 +07:00
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 23:11:32 +07:00
|
|
|
mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
|
|
|
|
}
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2007-05-01 21:00:02 +07:00
|
|
|
if (!oldcard) {
|
|
|
|
/*
|
|
|
|
* Fetch CSD from card.
|
|
|
|
*/
|
|
|
|
err = mmc_send_csd(card, card->raw_csd);
|
2007-07-23 03:18:46 +07:00
|
|
|
if (err)
|
2007-05-01 21:00:02 +07:00
|
|
|
goto free_card;
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2007-05-01 21:11:57 +07:00
|
|
|
err = mmc_decode_csd(card);
|
2007-07-23 04:08:30 +07:00
|
|
|
if (err)
|
2007-05-01 21:11:57 +07:00
|
|
|
goto free_card;
|
|
|
|
err = mmc_decode_cid(card);
|
2007-07-23 04:08:30 +07:00
|
|
|
if (err)
|
2007-05-01 21:11:57 +07:00
|
|
|
goto free_card;
|
2007-05-01 21:00:02 +07:00
|
|
|
}
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2014-08-19 15:45:51 +07:00
|
|
|
/*
|
|
|
|
* handling only for cards supporting DSR and hosts requesting
|
|
|
|
* DSR configuration
|
|
|
|
*/
|
|
|
|
if (card->csd.dsr_imp && host->dsr_req)
|
|
|
|
mmc_set_dsr(host);
|
|
|
|
|
2006-12-31 06:11:32 +07:00
|
|
|
/*
|
2007-05-01 20:08:30 +07:00
|
|
|
* Select card, as all following commands rely on that.
|
2006-12-31 06:11:32 +07:00
|
|
|
*/
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 23:11:32 +07:00
|
|
|
if (!mmc_host_is_spi(host)) {
|
|
|
|
err = mmc_select_card(card);
|
|
|
|
if (err)
|
|
|
|
goto free_card;
|
|
|
|
}
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2007-05-01 21:00:02 +07:00
|
|
|
if (!oldcard) {
|
2014-10-20 18:37:24 +07:00
|
|
|
/* Read extended CSD. */
|
|
|
|
err = mmc_read_ext_csd(card);
|
2007-07-23 03:18:46 +07:00
|
|
|
if (err)
|
2007-05-01 21:00:02 +07:00
|
|
|
goto free_card;
|
2011-02-14 14:13:09 +07:00
|
|
|
|
2016-04-15 18:16:12 +07:00
|
|
|
/*
|
|
|
|
* If doing byte addressing, check if required to do sector
|
2011-02-14 14:13:09 +07:00
|
|
|
* addressing. Handle the case of <2GB cards needing sector
|
|
|
|
* addressing. See section 8.1 JEDEC Standard JED84-A441;
|
|
|
|
* ocr register has bit 30 set for sector addressing.
|
|
|
|
*/
|
2016-04-15 18:16:12 +07:00
|
|
|
if (rocr & BIT(30))
|
2011-02-14 14:13:09 +07:00
|
|
|
mmc_card_set_blockaddr(card);
|
|
|
|
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 04:17:46 +07:00
|
|
|
/* Erase size depends on CSD and Extended CSD */
|
|
|
|
mmc_set_erase_size(card);
|
2007-05-01 21:00:02 +07:00
|
|
|
}
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2017-06-08 20:23:08 +07:00
|
|
|
/* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
|
|
|
|
if (card->ext_csd.rev >= 3) {
|
2011-01-22 03:09:41 +07:00
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
2011-09-23 12:15:29 +07:00
|
|
|
EXT_CSD_ERASE_GROUP_DEF, 1,
|
|
|
|
card->ext_csd.generic_cmd6_time);
|
2011-01-22 03:09:41 +07:00
|
|
|
|
|
|
|
if (err && err != -EBADMSG)
|
|
|
|
goto free_card;
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
err = 0;
|
|
|
|
/*
|
|
|
|
* Just disable enhanced area off & sz
|
|
|
|
* will try to enable ERASE_GROUP_DEF
|
|
|
|
* during next time reinit
|
|
|
|
*/
|
|
|
|
card->ext_csd.enhanced_area_offset = -EINVAL;
|
|
|
|
card->ext_csd.enhanced_area_size = -EINVAL;
|
|
|
|
} else {
|
|
|
|
card->ext_csd.erase_group_def = 1;
|
|
|
|
/*
|
|
|
|
* enable ERASE_GRP_DEF successfully.
|
|
|
|
* This will affect the erase size, so
|
|
|
|
* here need to reset erase size
|
|
|
|
*/
|
|
|
|
mmc_set_erase_size(card);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-20 01:10:33 +07:00
|
|
|
/*
|
|
|
|
* Ensure eMMC user default partition is enabled
|
|
|
|
*/
|
2011-04-12 06:10:25 +07:00
|
|
|
if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
|
|
|
|
card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
|
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
|
|
|
|
card->ext_csd.part_config,
|
|
|
|
card->ext_csd.part_time);
|
|
|
|
if (err && err != -EBADMSG)
|
|
|
|
goto free_card;
|
2011-03-20 01:10:33 +07:00
|
|
|
}
|
|
|
|
|
2011-10-13 13:34:16 +07:00
|
|
|
/*
|
2013-06-10 22:03:45 +07:00
|
|
|
* Enable power_off_notification byte in the ext_csd register
|
2011-10-13 13:34:16 +07:00
|
|
|
*/
|
2013-06-10 22:03:45 +07:00
|
|
|
if (card->ext_csd.rev >= 6) {
|
2011-10-13 13:34:16 +07:00
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_POWER_OFF_NOTIFICATION,
|
|
|
|
EXT_CSD_POWER_ON,
|
|
|
|
card->ext_csd.generic_cmd6_time);
|
|
|
|
if (err && err != -EBADMSG)
|
|
|
|
goto free_card;
|
|
|
|
|
2011-11-04 17:52:47 +07:00
|
|
|
/*
|
|
|
|
* The err can be -EBADMSG or 0,
|
|
|
|
* so check for success and update the flag
|
|
|
|
*/
|
|
|
|
if (!err)
|
2012-10-05 23:45:39 +07:00
|
|
|
card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
|
2011-11-04 17:52:47 +07:00
|
|
|
}
|
2011-10-13 13:34:16 +07:00
|
|
|
|
2019-02-06 18:28:05 +07:00
|
|
|
/* set erase_arg */
|
|
|
|
if (mmc_can_discard(card))
|
|
|
|
card->erase_arg = MMC_DISCARD_ARG;
|
|
|
|
else if (mmc_can_trim(card))
|
|
|
|
card->erase_arg = MMC_TRIM_ARG;
|
|
|
|
else
|
|
|
|
card->erase_arg = MMC_ERASE_ARG;
|
|
|
|
|
2007-05-01 20:08:30 +07:00
|
|
|
/*
|
2014-04-23 15:08:44 +07:00
|
|
|
* Select timing interface
|
2010-10-01 04:37:23 +07:00
|
|
|
*/
|
2014-04-23 15:08:44 +07:00
|
|
|
err = mmc_select_timing(card);
|
|
|
|
if (err)
|
|
|
|
goto free_card;
|
2010-10-01 04:37:23 +07:00
|
|
|
|
2012-01-12 02:04:52 +07:00
|
|
|
if (mmc_card_hs200(card)) {
|
2014-04-23 15:08:44 +07:00
|
|
|
err = mmc_hs200_tuning(card);
|
|
|
|
if (err)
|
2014-10-01 19:14:11 +07:00
|
|
|
goto free_card;
|
2014-04-23 15:14:58 +07:00
|
|
|
|
|
|
|
err = mmc_select_hs400(card);
|
|
|
|
if (err)
|
2014-10-01 19:14:11 +07:00
|
|
|
goto free_card;
|
2017-03-02 05:11:47 +07:00
|
|
|
} else if (!mmc_card_hs400es(card)) {
|
2014-04-23 15:08:44 +07:00
|
|
|
/* Select the desired bus width optionally */
|
|
|
|
err = mmc_select_bus_width(card);
|
2017-02-13 18:46:41 +07:00
|
|
|
if (err > 0 && mmc_card_hs(card)) {
|
2014-04-23 15:08:44 +07:00
|
|
|
err = mmc_select_hs_ddr(card);
|
|
|
|
if (err)
|
2014-10-01 19:14:11 +07:00
|
|
|
goto free_card;
|
2009-09-23 06:44:37 +07:00
|
|
|
}
|
2007-05-01 20:08:30 +07:00
|
|
|
}
|
|
|
|
|
2014-04-23 15:08:05 +07:00
|
|
|
/*
|
|
|
|
* Choose the power class with selected bus interface
|
|
|
|
*/
|
|
|
|
mmc_select_powerclass(card);
|
|
|
|
|
2012-03-06 19:29:12 +07:00
|
|
|
/*
|
|
|
|
* Enable HPI feature (if supported)
|
|
|
|
*/
|
|
|
|
if (card->ext_csd.hpi) {
|
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_HPI_MGMT, 1,
|
|
|
|
card->ext_csd.generic_cmd6_time);
|
|
|
|
if (err && err != -EBADMSG)
|
|
|
|
goto free_card;
|
|
|
|
if (err) {
|
2014-09-13 04:56:56 +07:00
|
|
|
pr_warn("%s: Enabling HPI failed\n",
|
|
|
|
mmc_hostname(card->host));
|
2018-12-10 23:52:36 +07:00
|
|
|
card->ext_csd.hpi_en = 0;
|
2012-03-06 19:29:12 +07:00
|
|
|
err = 0;
|
2018-12-10 23:52:36 +07:00
|
|
|
} else {
|
2012-03-06 19:29:12 +07:00
|
|
|
card->ext_csd.hpi_en = 1;
|
2018-12-10 23:52:36 +07:00
|
|
|
}
|
2012-03-06 19:29:12 +07:00
|
|
|
}
|
|
|
|
|
2011-10-14 12:03:21 +07:00
|
|
|
/*
|
2018-12-10 23:52:38 +07:00
|
|
|
* If cache size is higher than 0, this indicates the existence of cache
|
|
|
|
* and it can be turned on. Note that some eMMCs from Micron has been
|
|
|
|
* reported to need ~800 ms timeout, while enabling the cache after
|
|
|
|
* sudden power failure tests. Let's extend the timeout to a minimum of
|
|
|
|
* DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
|
2011-10-14 12:03:21 +07:00
|
|
|
*/
|
2018-12-10 23:52:37 +07:00
|
|
|
if (card->ext_csd.cache_size > 0) {
|
2018-12-10 23:52:38 +07:00
|
|
|
unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
|
|
|
|
|
|
|
|
timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
|
2011-10-14 12:03:21 +07:00
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
2018-12-10 23:52:38 +07:00
|
|
|
EXT_CSD_CACHE_CTRL, 1, timeout_ms);
|
2011-10-14 12:03:21 +07:00
|
|
|
if (err && err != -EBADMSG)
|
|
|
|
goto free_card;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only if no error, cache is turned on successfully.
|
|
|
|
*/
|
2011-12-09 15:47:17 +07:00
|
|
|
if (err) {
|
2014-09-13 04:56:56 +07:00
|
|
|
pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
|
|
|
|
mmc_hostname(card->host), err);
|
2011-12-09 15:47:17 +07:00
|
|
|
card->ext_csd.cache_ctrl = 0;
|
|
|
|
err = 0;
|
|
|
|
} else {
|
|
|
|
card->ext_csd.cache_ctrl = 1;
|
|
|
|
}
|
2011-10-14 12:03:21 +07:00
|
|
|
}
|
|
|
|
|
2017-09-22 19:36:53 +07:00
|
|
|
/*
|
|
|
|
* Enable Command Queue if supported. Note that Packed Commands cannot
|
|
|
|
* be used with Command Queue.
|
|
|
|
*/
|
|
|
|
card->ext_csd.cmdq_en = false;
|
|
|
|
if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
|
|
|
|
err = mmc_cmdq_enable(card);
|
|
|
|
if (err && err != -EBADMSG)
|
|
|
|
goto free_card;
|
|
|
|
if (err) {
|
|
|
|
pr_warn("%s: Enabling CMDQ failed\n",
|
|
|
|
mmc_hostname(card->host));
|
|
|
|
card->ext_csd.cmdq_support = false;
|
|
|
|
card->ext_csd.cmdq_depth = 0;
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
}
|
2017-03-13 19:36:38 +07:00
|
|
|
/*
|
|
|
|
* In some cases (e.g. RPMB or mmc_test), the Command Queue must be
|
|
|
|
* disabled for a time, so a flag is needed to indicate to re-enable the
|
|
|
|
* Command Queue.
|
|
|
|
*/
|
|
|
|
card->reenable_cmdq = card->ext_csd.cmdq_en;
|
|
|
|
|
mmc: Add MMC host software queue support
Now the MMC read/write stack will always wait for previous request is
completed by mmc_blk_rw_wait(), before sending a new request to hardware,
or queue a work to complete request, that will bring context switching
overhead and spend some extra time to poll the card for busy completion
for I/O writes via sending CMD13, especially for high I/O per second
rates, to affect the IO performance.
Thus this patch introduces MMC software queue interface based on the
hardware command queue engine's interfaces, which is similar with the
hardware command queue engine's idea, that can remove the context
switching. Moreover we set the default queue depth as 64 for software
queue, which allows more requests to be prepared, merged and inserted
into IO scheduler to improve performance, but we only allow 2 requests
in flight, that is enough to let the irq handler always trigger the
next request without a context switch, as well as avoiding a long latency.
Moreover the host controller should support HW busy detection for I/O
operations when enabling the host software queue. That means, the host
controller must not complete a data transfer request, until after the
card stops signals busy.
From the fio testing data in cover letter, we can see the software
queue can improve some performance with 4K block size, increasing
about 16% for random read, increasing about 90% for random write,
though no obvious improvement for sequential read and write.
Moreover we can expand the software queue interface to support MMC
packed request or packed command in future.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
Signed-off-by: Baolin Wang <baolin.wang7@gmail.com>
Link: https://lore.kernel.org/r/4409c1586a9b3ed20d57ad2faf6c262fc3ccb6e2.1581478568.git.baolin.wang7@gmail.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2020-02-12 11:12:56 +07:00
|
|
|
if (host->cqe_ops && !host->cqe_enabled) {
|
2017-09-22 19:36:54 +07:00
|
|
|
err = host->cqe_ops->cqe_enable(host, card);
|
mmc: Add MMC host software queue support
Now the MMC read/write stack will always wait for previous request is
completed by mmc_blk_rw_wait(), before sending a new request to hardware,
or queue a work to complete request, that will bring context switching
overhead and spend some extra time to poll the card for busy completion
for I/O writes via sending CMD13, especially for high I/O per second
rates, to affect the IO performance.
Thus this patch introduces MMC software queue interface based on the
hardware command queue engine's interfaces, which is similar with the
hardware command queue engine's idea, that can remove the context
switching. Moreover we set the default queue depth as 64 for software
queue, which allows more requests to be prepared, merged and inserted
into IO scheduler to improve performance, but we only allow 2 requests
in flight, that is enough to let the irq handler always trigger the
next request without a context switch, as well as avoiding a long latency.
Moreover the host controller should support HW busy detection for I/O
operations when enabling the host software queue. That means, the host
controller must not complete a data transfer request, until after the
card stops signals busy.
From the fio testing data in cover letter, we can see the software
queue can improve some performance with 4K block size, increasing
about 16% for random read, increasing about 90% for random write,
though no obvious improvement for sequential read and write.
Moreover we can expand the software queue interface to support MMC
packed request or packed command in future.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
Signed-off-by: Baolin Wang <baolin.wang7@gmail.com>
Link: https://lore.kernel.org/r/4409c1586a9b3ed20d57ad2faf6c262fc3ccb6e2.1581478568.git.baolin.wang7@gmail.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2020-02-12 11:12:56 +07:00
|
|
|
if (!err) {
|
2017-09-22 19:36:54 +07:00
|
|
|
host->cqe_enabled = true;
|
mmc: Add MMC host software queue support
Now the MMC read/write stack will always wait for previous request is
completed by mmc_blk_rw_wait(), before sending a new request to hardware,
or queue a work to complete request, that will bring context switching
overhead and spend some extra time to poll the card for busy completion
for I/O writes via sending CMD13, especially for high I/O per second
rates, to affect the IO performance.
Thus this patch introduces MMC software queue interface based on the
hardware command queue engine's interfaces, which is similar with the
hardware command queue engine's idea, that can remove the context
switching. Moreover we set the default queue depth as 64 for software
queue, which allows more requests to be prepared, merged and inserted
into IO scheduler to improve performance, but we only allow 2 requests
in flight, that is enough to let the irq handler always trigger the
next request without a context switch, as well as avoiding a long latency.
Moreover the host controller should support HW busy detection for I/O
operations when enabling the host software queue. That means, the host
controller must not complete a data transfer request, until after the
card stops signals busy.
From the fio testing data in cover letter, we can see the software
queue can improve some performance with 4K block size, increasing
about 16% for random read, increasing about 90% for random write,
though no obvious improvement for sequential read and write.
Moreover we can expand the software queue interface to support MMC
packed request or packed command in future.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
Signed-off-by: Baolin Wang <baolin.wang7@gmail.com>
Link: https://lore.kernel.org/r/4409c1586a9b3ed20d57ad2faf6c262fc3ccb6e2.1581478568.git.baolin.wang7@gmail.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2020-02-12 11:12:56 +07:00
|
|
|
|
|
|
|
if (card->ext_csd.cmdq_en) {
|
|
|
|
pr_info("%s: Command Queue Engine enabled\n",
|
|
|
|
mmc_hostname(host));
|
|
|
|
} else {
|
|
|
|
host->hsq_enabled = true;
|
|
|
|
pr_info("%s: Host Software Queue enabled\n",
|
|
|
|
mmc_hostname(host));
|
|
|
|
}
|
2017-09-22 19:36:54 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-14 04:54:57 +07:00
|
|
|
if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
|
|
|
|
host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
|
|
|
|
pr_err("%s: Host failed to negotiate down from 3.3V\n",
|
|
|
|
mmc_hostname(host));
|
|
|
|
err = -EINVAL;
|
|
|
|
goto free_card;
|
|
|
|
}
|
|
|
|
|
2007-05-01 21:00:02 +07:00
|
|
|
if (!oldcard)
|
|
|
|
host->card = card;
|
|
|
|
|
2007-07-23 03:18:46 +07:00
|
|
|
return 0;
|
2007-05-01 21:00:02 +07:00
|
|
|
|
|
|
|
free_card:
|
|
|
|
if (!oldcard)
|
|
|
|
mmc_remove_card(card);
|
|
|
|
err:
|
2007-07-23 04:08:30 +07:00
|
|
|
return err;
|
2007-05-01 21:00:02 +07:00
|
|
|
}
|
|
|
|
|
2013-04-19 20:12:11 +07:00
|
|
|
static int mmc_can_sleep(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
return (card && card->ext_csd.rev >= 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mmc_sleep(struct mmc_host *host)
|
|
|
|
{
|
2016-12-19 18:51:18 +07:00
|
|
|
struct mmc_command cmd = {};
|
2013-04-19 20:12:11 +07:00
|
|
|
struct mmc_card *card = host->card;
|
2014-01-15 05:17:36 +07:00
|
|
|
unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
|
2013-04-19 20:12:11 +07:00
|
|
|
int err;
|
|
|
|
|
2015-05-07 17:10:18 +07:00
|
|
|
/* Re-tuning can't be done once the card is deselected */
|
|
|
|
mmc_retune_hold(host);
|
|
|
|
|
2013-04-19 20:12:11 +07:00
|
|
|
err = mmc_deselect_cards(host);
|
|
|
|
if (err)
|
2015-05-07 17:10:18 +07:00
|
|
|
goto out_release;
|
2013-04-19 20:12:11 +07:00
|
|
|
|
|
|
|
cmd.opcode = MMC_SLEEP_AWAKE;
|
|
|
|
cmd.arg = card->rca << 16;
|
|
|
|
cmd.arg |= 1 << 15;
|
|
|
|
|
2014-01-15 05:17:36 +07:00
|
|
|
/*
|
|
|
|
* If the max_busy_timeout of the host is specified, validate it against
|
|
|
|
* the sleep cmd timeout. A failure means we need to prevent the host
|
|
|
|
* from doing hw busy detection, which is done by converting to a R1
|
2020-03-11 16:20:36 +07:00
|
|
|
* response instead of a R1B. Note, some hosts requires R1B, which also
|
|
|
|
* means they are on their own when it comes to deal with the busy
|
|
|
|
* timeout.
|
2014-01-15 05:17:36 +07:00
|
|
|
*/
|
2020-03-11 16:20:36 +07:00
|
|
|
if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
|
|
|
|
(timeout_ms > host->max_busy_timeout)) {
|
2014-01-15 05:17:36 +07:00
|
|
|
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
|
|
|
} else {
|
|
|
|
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
|
|
|
|
cmd.busy_timeout = timeout_ms;
|
|
|
|
}
|
|
|
|
|
2013-04-19 20:12:11 +07:00
|
|
|
err = mmc_wait_for_cmd(host, &cmd, 0);
|
|
|
|
if (err)
|
2015-05-07 17:10:18 +07:00
|
|
|
goto out_release;
|
2013-04-19 20:12:11 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the host does not wait while the card signals busy, then we will
|
|
|
|
* will have to wait the sleep/awake timeout. Note, we cannot use the
|
|
|
|
* SEND_STATUS command to poll the status because that command (and most
|
|
|
|
* others) is invalid while the card sleeps.
|
|
|
|
*/
|
2014-01-15 05:17:36 +07:00
|
|
|
if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
|
|
|
|
mmc_delay(timeout_ms);
|
2013-04-19 20:12:11 +07:00
|
|
|
|
2015-05-07 17:10:18 +07:00
|
|
|
out_release:
|
|
|
|
mmc_retune_release(host);
|
2013-04-19 20:12:11 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-10-05 23:45:39 +07:00
|
|
|
static int mmc_can_poweroff_notify(const struct mmc_card *card)
|
|
|
|
{
|
|
|
|
return card &&
|
|
|
|
mmc_card_mmc(card) &&
|
|
|
|
(card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
|
|
|
|
{
|
|
|
|
unsigned int timeout = card->ext_csd.generic_cmd6_time;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
|
|
|
|
if (notify_type == EXT_CSD_POWER_OFF_LONG)
|
|
|
|
timeout = card->ext_csd.power_off_longtime;
|
|
|
|
|
2013-09-09 16:57:57 +07:00
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
|
|
EXT_CSD_POWER_OFF_NOTIFICATION,
|
2020-02-04 15:54:41 +07:00
|
|
|
notify_type, timeout, 0, false, false);
|
2012-10-05 23:45:39 +07:00
|
|
|
if (err)
|
|
|
|
pr_err("%s: Power Off Notification timed out, %u\n",
|
|
|
|
mmc_hostname(card->host), timeout);
|
|
|
|
|
|
|
|
/* Disable the power off notification after the switch operation. */
|
|
|
|
card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-05-01 21:00:02 +07:00
|
|
|
/*
|
|
|
|
* Host is being removed. Free up the current card.
|
|
|
|
*/
|
|
|
|
static void mmc_remove(struct mmc_host *host)
|
|
|
|
{
|
|
|
|
mmc_remove_card(host->card);
|
|
|
|
host->card = NULL;
|
|
|
|
}
|
|
|
|
|
2011-11-28 21:22:00 +07:00
|
|
|
/*
|
|
|
|
* Card detection - card is alive.
|
|
|
|
*/
|
|
|
|
static int mmc_alive(struct mmc_host *host)
|
|
|
|
{
|
|
|
|
return mmc_send_status(host->card, NULL);
|
|
|
|
}
|
|
|
|
|
2007-05-01 21:00:02 +07:00
|
|
|
/*
|
|
|
|
* Card detection callback from host.
|
|
|
|
*/
|
|
|
|
static void mmc_detect(struct mmc_host *host)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2017-09-22 19:36:51 +07:00
|
|
|
mmc_get_card(host->card, NULL);
|
2007-05-01 21:00:02 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Just check if our card has been removed.
|
|
|
|
*/
|
2011-11-28 21:22:00 +07:00
|
|
|
err = _mmc_detect_card_removed(host);
|
2007-05-01 21:00:02 +07:00
|
|
|
|
2017-09-22 19:36:51 +07:00
|
|
|
mmc_put_card(host->card, NULL);
|
2007-05-01 21:00:02 +07:00
|
|
|
|
2007-07-23 03:18:46 +07:00
|
|
|
if (err) {
|
2007-05-19 18:39:01 +07:00
|
|
|
mmc_remove(host);
|
2007-05-01 21:00:02 +07:00
|
|
|
|
|
|
|
mmc_claim_host(host);
|
|
|
|
mmc_detach_bus(host);
|
2011-09-22 01:08:13 +07:00
|
|
|
mmc_power_off(host);
|
2007-05-01 21:00:02 +07:00
|
|
|
mmc_release_host(host);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-10 22:03:43 +07:00
|
|
|
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
|
2007-05-01 21:00:02 +07:00
|
|
|
{
|
2011-09-08 23:38:39 +07:00
|
|
|
int err = 0;
|
2013-06-10 22:03:43 +07:00
|
|
|
unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
|
|
|
|
EXT_CSD_POWER_OFF_LONG;
|
2011-09-08 23:38:39 +07:00
|
|
|
|
2007-05-01 21:00:02 +07:00
|
|
|
mmc_claim_host(host);
|
2013-01-29 04:44:22 +07:00
|
|
|
|
2013-10-02 22:37:09 +07:00
|
|
|
if (mmc_card_suspended(host->card))
|
|
|
|
goto out;
|
|
|
|
|
2013-12-16 22:23:22 +07:00
|
|
|
err = mmc_flush_cache(host->card);
|
2013-01-29 04:44:22 +07:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2013-06-10 22:03:45 +07:00
|
|
|
if (mmc_can_poweroff_notify(host->card) &&
|
2013-06-10 22:03:46 +07:00
|
|
|
((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
|
2013-06-10 22:03:43 +07:00
|
|
|
err = mmc_poweroff_notify(host->card, notify_type);
|
2013-04-19 20:12:11 +07:00
|
|
|
else if (mmc_can_sleep(host->card))
|
|
|
|
err = mmc_sleep(host);
|
2012-10-05 23:45:39 +07:00
|
|
|
else if (!mmc_host_is_spi(host))
|
2012-05-31 18:31:47 +07:00
|
|
|
err = mmc_deselect_cards(host);
|
2009-09-23 06:45:29 +07:00
|
|
|
|
2013-10-02 22:37:09 +07:00
|
|
|
if (!err) {
|
2013-06-10 22:03:38 +07:00
|
|
|
mmc_power_off(host);
|
2013-10-02 22:37:09 +07:00
|
|
|
mmc_card_set_suspended(host->card);
|
|
|
|
}
|
2013-01-29 04:44:22 +07:00
|
|
|
out:
|
|
|
|
mmc_release_host(host);
|
2011-09-08 23:38:39 +07:00
|
|
|
return err;
|
2007-05-01 21:00:02 +07:00
|
|
|
}
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2013-06-10 22:03:43 +07:00
|
|
|
/*
|
2013-10-10 19:20:05 +07:00
|
|
|
* Suspend callback
|
2013-06-10 22:03:43 +07:00
|
|
|
*/
|
|
|
|
static int mmc_suspend(struct mmc_host *host)
|
|
|
|
{
|
2013-10-10 19:20:05 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = _mmc_suspend(host, true);
|
|
|
|
if (!err) {
|
|
|
|
pm_runtime_disable(&host->card->dev);
|
|
|
|
pm_runtime_set_suspended(&host->card->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
2013-06-10 22:03:43 +07:00
|
|
|
}
|
|
|
|
|
2007-05-01 21:00:02 +07:00
|
|
|
/*
|
|
|
|
* This function tries to determine if the same card is still present
|
|
|
|
* and, if so, restore all state to it.
|
|
|
|
*/
|
2013-10-10 19:20:05 +07:00
|
|
|
static int _mmc_resume(struct mmc_host *host)
|
2007-05-01 21:00:02 +07:00
|
|
|
{
|
2013-10-02 22:37:09 +07:00
|
|
|
int err = 0;
|
2007-05-01 21:00:02 +07:00
|
|
|
|
|
|
|
mmc_claim_host(host);
|
2013-10-02 22:37:09 +07:00
|
|
|
|
|
|
|
if (!mmc_card_suspended(host->card))
|
|
|
|
goto out;
|
|
|
|
|
2013-09-13 16:31:33 +07:00
|
|
|
mmc_power_up(host, host->card->ocr);
|
|
|
|
err = mmc_init_card(host, host->card->ocr, host->card);
|
2013-10-02 22:37:09 +07:00
|
|
|
mmc_card_clr_suspended(host->card);
|
2007-07-22 22:52:06 +07:00
|
|
|
|
2013-10-02 22:37:09 +07:00
|
|
|
out:
|
|
|
|
mmc_release_host(host);
|
2009-09-23 06:45:29 +07:00
|
|
|
return err;
|
2007-05-01 21:00:02 +07:00
|
|
|
}
|
|
|
|
|
2013-10-02 22:37:09 +07:00
|
|
|
/*
|
|
|
|
* Shutdown callback
|
|
|
|
*/
|
|
|
|
static int mmc_shutdown(struct mmc_host *host)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In a specific case for poweroff notify, we need to resume the card
|
|
|
|
* before we can shutdown it properly.
|
|
|
|
*/
|
|
|
|
if (mmc_can_poweroff_notify(host->card) &&
|
|
|
|
!(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
|
2013-10-10 19:20:05 +07:00
|
|
|
err = _mmc_resume(host);
|
2013-10-02 22:37:09 +07:00
|
|
|
|
|
|
|
if (!err)
|
|
|
|
err = _mmc_suspend(host, false);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2013-05-02 19:02:39 +07:00
|
|
|
|
2013-10-10 19:20:05 +07:00
|
|
|
/*
|
|
|
|
* Callback for resume.
|
|
|
|
*/
|
|
|
|
static int mmc_resume(struct mmc_host *host)
|
|
|
|
{
|
|
|
|
pm_runtime_enable(&host->card->dev);
|
2015-11-05 22:01:32 +07:00
|
|
|
return 0;
|
2013-10-10 19:20:05 +07:00
|
|
|
}
|
|
|
|
|
2013-05-02 19:02:39 +07:00
|
|
|
/*
|
|
|
|
* Callback for runtime_suspend.
|
|
|
|
*/
|
|
|
|
static int mmc_runtime_suspend(struct mmc_host *host)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
|
|
|
|
return 0;
|
|
|
|
|
2013-10-10 19:20:05 +07:00
|
|
|
err = _mmc_suspend(host, true);
|
2013-10-03 16:24:44 +07:00
|
|
|
if (err)
|
2015-02-24 21:11:26 +07:00
|
|
|
pr_err("%s: error %d doing aggressive suspend\n",
|
2013-05-02 19:02:39 +07:00
|
|
|
mmc_hostname(host), err);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Callback for runtime_resume.
|
|
|
|
*/
|
|
|
|
static int mmc_runtime_resume(struct mmc_host *host)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2013-10-10 19:20:05 +07:00
|
|
|
err = _mmc_resume(host);
|
2015-12-14 20:51:27 +07:00
|
|
|
if (err && err != -ENOMEDIUM)
|
2015-11-05 22:01:32 +07:00
|
|
|
pr_err("%s: error %d doing runtime resume\n",
|
2013-05-02 19:02:39 +07:00
|
|
|
mmc_hostname(host), err);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-08 20:27:41 +07:00
|
|
|
static int mmc_can_reset(struct mmc_card *card)
|
2015-01-12 21:38:05 +07:00
|
|
|
{
|
|
|
|
u8 rst_n_function;
|
|
|
|
|
|
|
|
rst_n_function = card->ext_csd.rst_n_function;
|
|
|
|
if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-04-05 18:24:43 +07:00
|
|
|
static int _mmc_hw_reset(struct mmc_host *host)
|
2015-01-12 21:38:05 +07:00
|
|
|
{
|
|
|
|
struct mmc_card *card = host->card;
|
|
|
|
|
2016-05-04 14:38:21 +07:00
|
|
|
/*
|
|
|
|
* In the case of recovery, we can't expect flushing the cache to work
|
|
|
|
* always, but we have a go and ignore errors.
|
|
|
|
*/
|
|
|
|
mmc_flush_cache(host->card);
|
|
|
|
|
2016-04-02 06:04:22 +07:00
|
|
|
if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
|
|
|
|
mmc_can_reset(card)) {
|
|
|
|
/* If the card accept RST_n signal, send it. */
|
|
|
|
mmc_set_clock(host, host->f_init);
|
|
|
|
host->ops->hw_reset(host);
|
|
|
|
/* Set initial state and call mmc_set_ios */
|
|
|
|
mmc_set_initial_state(host);
|
|
|
|
} else {
|
|
|
|
/* Do a brute force power cycle */
|
|
|
|
mmc_power_cycle(host, card->ocr);
|
2017-05-09 04:52:04 +07:00
|
|
|
mmc_pwrseq_reset(host);
|
2016-04-02 06:04:22 +07:00
|
|
|
}
|
2015-06-01 16:14:57 +07:00
|
|
|
return mmc_init_card(host, card->ocr, card);
|
2015-01-12 21:38:05 +07:00
|
|
|
}
|
|
|
|
|
2007-05-01 21:00:02 +07:00
|
|
|
static const struct mmc_bus_ops mmc_ops = {
|
|
|
|
.remove = mmc_remove,
|
|
|
|
.detect = mmc_detect,
|
|
|
|
.suspend = mmc_suspend,
|
|
|
|
.resume = mmc_resume,
|
2013-05-02 19:02:39 +07:00
|
|
|
.runtime_suspend = mmc_runtime_suspend,
|
|
|
|
.runtime_resume = mmc_runtime_resume,
|
2011-11-28 21:22:00 +07:00
|
|
|
.alive = mmc_alive,
|
2013-06-10 22:03:44 +07:00
|
|
|
.shutdown = mmc_shutdown,
|
2018-04-05 18:24:43 +07:00
|
|
|
.hw_reset = _mmc_hw_reset,
|
2007-05-01 21:00:02 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Starting point for MMC card init.
|
|
|
|
*/
|
2011-01-04 01:36:56 +07:00
|
|
|
int mmc_attach_mmc(struct mmc_host *host)
|
2007-05-01 21:00:02 +07:00
|
|
|
{
|
|
|
|
int err;
|
2013-09-13 16:31:33 +07:00
|
|
|
u32 ocr, rocr;
|
2007-05-01 21:00:02 +07:00
|
|
|
|
2007-08-09 18:23:56 +07:00
|
|
|
WARN_ON(!host->claimed);
|
2007-05-01 21:00:02 +07:00
|
|
|
|
2011-09-15 22:50:38 +07:00
|
|
|
/* Set correct bus mode for MMC before attempting attach */
|
|
|
|
if (!mmc_host_is_spi(host))
|
|
|
|
mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
|
|
|
|
|
2011-01-04 01:36:56 +07:00
|
|
|
err = mmc_send_op_cond(host, 0, &ocr);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2013-10-30 07:00:18 +07:00
|
|
|
mmc_attach_bus(host, &mmc_ops);
|
2010-12-08 16:04:30 +07:00
|
|
|
if (host->ocr_avail_mmc)
|
|
|
|
host->ocr_avail = host->ocr_avail_mmc;
|
2007-05-01 21:00:02 +07:00
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 23:11:32 +07:00
|
|
|
/*
|
|
|
|
* We need to get OCR a different way for SPI.
|
|
|
|
*/
|
|
|
|
if (mmc_host_is_spi(host)) {
|
|
|
|
err = mmc_spi_read_ocr(host, 1, &ocr);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2013-09-13 16:31:33 +07:00
|
|
|
rocr = mmc_select_voltage(host, ocr);
|
2007-05-01 21:00:02 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Can we support the voltage of the card?
|
|
|
|
*/
|
2013-09-13 16:31:33 +07:00
|
|
|
if (!rocr) {
|
2007-07-23 05:12:10 +07:00
|
|
|
err = -EINVAL;
|
2007-05-01 21:00:02 +07:00
|
|
|
goto err;
|
2007-07-23 05:12:10 +07:00
|
|
|
}
|
2007-05-01 21:00:02 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Detect and init the card.
|
|
|
|
*/
|
2013-09-13 16:31:33 +07:00
|
|
|
err = mmc_init_card(host, rocr, NULL);
|
2007-07-23 03:18:46 +07:00
|
|
|
if (err)
|
2007-05-01 21:00:02 +07:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
mmc_release_host(host);
|
2007-05-19 18:39:01 +07:00
|
|
|
err = mmc_add_card(host->card);
|
2006-12-31 06:11:32 +07:00
|
|
|
if (err)
|
2007-07-22 22:52:06 +07:00
|
|
|
goto remove_card;
|
2006-12-31 06:11:32 +07:00
|
|
|
|
2015-10-15 03:53:03 +07:00
|
|
|
mmc_claim_host(host);
|
2006-12-31 06:11:32 +07:00
|
|
|
return 0;
|
|
|
|
|
2007-07-22 22:52:06 +07:00
|
|
|
remove_card:
|
2007-05-01 21:00:02 +07:00
|
|
|
mmc_remove_card(host->card);
|
2007-07-22 22:52:06 +07:00
|
|
|
mmc_claim_host(host);
|
2011-01-04 01:36:56 +07:00
|
|
|
host->card = NULL;
|
2006-12-31 06:11:32 +07:00
|
|
|
err:
|
|
|
|
mmc_detach_bus(host);
|
|
|
|
|
2011-10-11 13:14:09 +07:00
|
|
|
pr_err("%s: error %d whilst initialising MMC card\n",
|
2007-07-23 05:12:10 +07:00
|
|
|
mmc_hostname(host), err);
|
|
|
|
|
2007-07-23 04:08:30 +07:00
|
|
|
return err;
|
2006-12-31 06:11:32 +07:00
|
|
|
}
|