mmc: core: Remove bounce buffer in mmc_send_cxd_data()

It is expected that Extended CSD register (the size of this register
is larger than CID/CSD) will be referenced more frequently as more
fields have been added to Extended CSD and it seems that it is not
a good option to double the memory used.

This patch is intended to avoid the use of bounce buffer for reading
Extended CSD register in mmc_send_cxd_data(). It will provide a better
performance gain by removing memcpy() overhead for a half KiB and
a redundant bounce buffer allocated repeatedly at the cost of providing
DMA-capable buffer from upper caller (but on-stack buffer is allowed
with no performance gain).

Signed-off-by: Kyungsik Lee <kyungsik.lee@lge.com>
Reviewed-by: Venkatraman S <svenkatr@ti.com>
Signed-off-by: Chris Ball <cjb@laptop.org>
This commit is contained in:
Kyungsik Lee 2012-08-03 00:58:03 +00:00 committed by Chris Ball
parent 8f63795c60
commit 1a41313e7f

View File

@ -230,6 +230,10 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
return 0;
}
/*
* NOTE: void *buf, caller for the buf is required to use DMA-capable
* buffer or on-stack buffer (with some overhead in callee).
*/
static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
u32 opcode, void *buf, unsigned len)
@ -239,13 +243,19 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
struct mmc_data data = {0};
struct scatterlist sg;
void *data_buf;
int is_on_stack;
/* dma onto stack is unsafe/nonportable, but callers to this
* routine normally provide temporary on-stack buffers ...
*/
data_buf = kmalloc(len, GFP_KERNEL);
if (data_buf == NULL)
return -ENOMEM;
is_on_stack = object_is_on_stack(buf);
if (is_on_stack) {
/*
* dma onto stack is unsafe/nonportable, but callers to this
* routine normally provide temporary on-stack buffers ...
*/
data_buf = kmalloc(len, GFP_KERNEL);
if (!data_buf)
return -ENOMEM;
} else
data_buf = buf;
mrq.cmd = &cmd;
mrq.data = &data;
@ -280,8 +290,10 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
mmc_wait_for_req(host, &mrq);
memcpy(buf, data_buf, len);
kfree(data_buf);
if (is_on_stack) {
memcpy(buf, data_buf, len);
kfree(data_buf);
}
if (cmd.error)
return cmd.error;
@ -294,24 +306,32 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
int mmc_send_csd(struct mmc_card *card, u32 *csd)
{
int ret, i;
u32 *csd_tmp;
if (!mmc_host_is_spi(card->host))
return mmc_send_cxd_native(card->host, card->rca << 16,
csd, MMC_SEND_CSD);
ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
csd_tmp = kmalloc(16, GFP_KERNEL);
if (!csd_tmp)
return -ENOMEM;
ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
if (ret)
return ret;
goto err;
for (i = 0;i < 4;i++)
csd[i] = be32_to_cpu(csd[i]);
csd[i] = be32_to_cpu(csd_tmp[i]);
return 0;
err:
kfree(csd_tmp);
return ret;
}
int mmc_send_cid(struct mmc_host *host, u32 *cid)
{
int ret, i;
u32 *cid_tmp;
if (!mmc_host_is_spi(host)) {
if (!host->card)
@ -320,14 +340,20 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid)
cid, MMC_SEND_CID);
}
ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
cid_tmp = kmalloc(16, GFP_KERNEL);
if (!cid_tmp)
return -ENOMEM;
ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
if (ret)
return ret;
goto err;
for (i = 0;i < 4;i++)
cid[i] = be32_to_cpu(cid[i]);
cid[i] = be32_to_cpu(cid_tmp[i]);
return 0;
err:
kfree(cid_tmp);
return ret;
}
int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)