2005-04-17 05:20:36 +07:00
|
|
|
#ifndef MMC_QUEUE_H
|
|
|
|
#define MMC_QUEUE_H
|
|
|
|
|
2017-01-13 20:14:07 +07:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/blkdev.h>
|
mmc: core: Allocate per-request data using the block layer core
The mmc_queue_req is a per-request state container the MMC core uses
to carry bounce buffers, pointers to asynchronous requests and so on.
Currently allocated as a static array of objects, then as a request
comes in, a mmc_queue_req is assigned to it, and used during the
lifetime of the request.
This is backwards compared to how other block layer drivers work:
they usally let the block core provide a per-request struct that get
allocated right beind the struct request, and which can be obtained
using the blk_mq_rq_to_pdu() helper. (The _mq_ infix in this function
name is misleading: it is used by both the old and the MQ block
layer.)
The per-request struct gets allocated to the size stored in the queue
variable .cmd_size initialized using the .init_rq_fn() and
cleaned up using .exit_rq_fn().
The block layer code makes the MMC core rely on this mechanism to
allocate the per-request mmc_queue_req state container.
Doing this make a lot of complicated queue handling go away. We only
need to keep the .qnct that keeps count of how many request are
currently being processed by the MMC layer. The MQ block layer will
replace also this once we transition to it.
Doing this refactoring is necessary to move the ioctl() operations
into custom block layer requests tagged with REQ_OP_DRV_[IN|OUT]
instead of the custom code using the BigMMCHostLock that we have
today: those require that per-request data be obtainable easily from
a request after creating a custom request with e.g.:
struct request *rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
struct mmc_queue_req *mq_rq = req_to_mq_rq(rq);
And this is not possible with the current construction, as the request
is not immediately assigned the per-request state container, but
instead it gets assigned when the request finally enters the MMC
queue, which is way too late for custom requests.
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
[Ulf: Folded in the fix to drop a call to blk_cleanup_queue()]
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Heiner Kallweit <hkallweit1@gmail.com>
2017-05-18 16:29:32 +07:00
|
|
|
#include <linux/blk-mq.h>
|
2017-01-13 20:14:07 +07:00
|
|
|
#include <linux/mmc/core.h>
|
|
|
|
#include <linux/mmc/host.h>
|
|
|
|
|
mmc: core: Allocate per-request data using the block layer core
The mmc_queue_req is a per-request state container the MMC core uses
to carry bounce buffers, pointers to asynchronous requests and so on.
Currently allocated as a static array of objects, then as a request
comes in, a mmc_queue_req is assigned to it, and used during the
lifetime of the request.
This is backwards compared to how other block layer drivers work:
they usally let the block core provide a per-request struct that get
allocated right beind the struct request, and which can be obtained
using the blk_mq_rq_to_pdu() helper. (The _mq_ infix in this function
name is misleading: it is used by both the old and the MQ block
layer.)
The per-request struct gets allocated to the size stored in the queue
variable .cmd_size initialized using the .init_rq_fn() and
cleaned up using .exit_rq_fn().
The block layer code makes the MMC core rely on this mechanism to
allocate the per-request mmc_queue_req state container.
Doing this make a lot of complicated queue handling go away. We only
need to keep the .qnct that keeps count of how many request are
currently being processed by the MMC layer. The MQ block layer will
replace also this once we transition to it.
Doing this refactoring is necessary to move the ioctl() operations
into custom block layer requests tagged with REQ_OP_DRV_[IN|OUT]
instead of the custom code using the BigMMCHostLock that we have
today: those require that per-request data be obtainable easily from
a request after creating a custom request with e.g.:
struct request *rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
struct mmc_queue_req *mq_rq = req_to_mq_rq(rq);
And this is not possible with the current construction, as the request
is not immediately assigned the per-request state container, but
instead it gets assigned when the request finally enters the MMC
queue, which is way too late for custom requests.
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
[Ulf: Folded in the fix to drop a call to blk_cleanup_queue()]
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Heiner Kallweit <hkallweit1@gmail.com>
2017-05-18 16:29:32 +07:00
|
|
|
static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
|
|
|
|
{
|
|
|
|
return blk_mq_rq_to_pdu(rq);
|
|
|
|
}
|
|
|
|
|
2017-05-19 20:37:27 +07:00
|
|
|
struct mmc_queue_req;
|
|
|
|
|
|
|
|
static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
|
|
|
|
{
|
|
|
|
return blk_mq_rq_from_pdu(mqr);
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
struct task_struct;
|
2016-11-18 19:36:15 +07:00
|
|
|
struct mmc_blk_data;
|
2017-05-18 16:29:34 +07:00
|
|
|
struct mmc_blk_ioc_data;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-07-10 04:12:36 +07:00
|
|
|
struct mmc_blk_request {
|
|
|
|
struct mmc_request mrq;
|
|
|
|
struct mmc_command sbc;
|
|
|
|
struct mmc_command cmd;
|
|
|
|
struct mmc_command stop;
|
|
|
|
struct mmc_data data;
|
2015-05-07 17:10:24 +07:00
|
|
|
int retune_retry_done;
|
2011-07-10 04:12:36 +07:00
|
|
|
};
|
|
|
|
|
2017-05-19 20:37:28 +07:00
|
|
|
/**
|
|
|
|
* enum mmc_drv_op - enumerates the operations in the mmc_queue_req
|
|
|
|
* @MMC_DRV_OP_IOCTL: ioctl operation
|
2017-05-19 20:37:30 +07:00
|
|
|
* @MMC_DRV_OP_BOOT_WP: write protect boot partitions
|
2017-08-21 04:39:08 +07:00
|
|
|
* @MMC_DRV_OP_GET_CARD_STATUS: get card status
|
|
|
|
* @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card
|
2017-05-19 20:37:28 +07:00
|
|
|
*/
|
|
|
|
enum mmc_drv_op {
|
|
|
|
MMC_DRV_OP_IOCTL,
|
2017-05-19 20:37:30 +07:00
|
|
|
MMC_DRV_OP_BOOT_WP,
|
2017-08-21 04:39:08 +07:00
|
|
|
MMC_DRV_OP_GET_CARD_STATUS,
|
|
|
|
MMC_DRV_OP_GET_EXT_CSD,
|
2017-05-19 20:37:28 +07:00
|
|
|
};
|
|
|
|
|
2011-07-10 04:12:36 +07:00
|
|
|
struct mmc_queue_req {
|
|
|
|
struct mmc_blk_request brq;
|
|
|
|
struct scatterlist *sg;
|
2017-02-01 19:47:55 +07:00
|
|
|
struct mmc_async_req areq;
|
2017-05-19 20:37:28 +07:00
|
|
|
enum mmc_drv_op drv_op;
|
2017-05-19 20:37:30 +07:00
|
|
|
int drv_op_result;
|
2017-08-21 04:39:06 +07:00
|
|
|
void *drv_op_data;
|
2017-05-18 16:29:35 +07:00
|
|
|
unsigned int ioc_count;
|
2011-07-10 04:12:36 +07:00
|
|
|
};
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
struct mmc_queue {
|
|
|
|
struct mmc_card *card;
|
2006-11-14 02:23:52 +07:00
|
|
|
struct task_struct *thread;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct semaphore thread_sem;
|
2017-02-01 19:47:56 +07:00
|
|
|
bool suspended;
|
2016-11-29 17:09:10 +07:00
|
|
|
bool asleep;
|
2016-11-18 19:36:15 +07:00
|
|
|
struct mmc_blk_data *blkdata;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct request_queue *queue;
|
mmc: core: Allocate per-request data using the block layer core
The mmc_queue_req is a per-request state container the MMC core uses
to carry bounce buffers, pointers to asynchronous requests and so on.
Currently allocated as a static array of objects, then as a request
comes in, a mmc_queue_req is assigned to it, and used during the
lifetime of the request.
This is backwards compared to how other block layer drivers work:
they usally let the block core provide a per-request struct that get
allocated right beind the struct request, and which can be obtained
using the blk_mq_rq_to_pdu() helper. (The _mq_ infix in this function
name is misleading: it is used by both the old and the MQ block
layer.)
The per-request struct gets allocated to the size stored in the queue
variable .cmd_size initialized using the .init_rq_fn() and
cleaned up using .exit_rq_fn().
The block layer code makes the MMC core rely on this mechanism to
allocate the per-request mmc_queue_req state container.
Doing this make a lot of complicated queue handling go away. We only
need to keep the .qnct that keeps count of how many request are
currently being processed by the MMC layer. The MQ block layer will
replace also this once we transition to it.
Doing this refactoring is necessary to move the ioctl() operations
into custom block layer requests tagged with REQ_OP_DRV_[IN|OUT]
instead of the custom code using the BigMMCHostLock that we have
today: those require that per-request data be obtainable easily from
a request after creating a custom request with e.g.:
struct request *rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
struct mmc_queue_req *mq_rq = req_to_mq_rq(rq);
And this is not possible with the current construction, as the request
is not immediately assigned the per-request state container, but
instead it gets assigned when the request finally enters the MMC
queue, which is way too late for custom requests.
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
[Ulf: Folded in the fix to drop a call to blk_cleanup_queue()]
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Heiner Kallweit <hkallweit1@gmail.com>
2017-05-18 16:29:32 +07:00
|
|
|
/*
|
|
|
|
* FIXME: this counter is not a very reliable way of keeping
|
|
|
|
* track of how many requests that are ongoing. Switch to just
|
|
|
|
* letting the block core keep track of requests and per-request
|
|
|
|
* associated mmc_queue_req data.
|
|
|
|
*/
|
2017-03-13 19:36:35 +07:00
|
|
|
int qcnt;
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2011-06-23 17:40:28 +07:00
|
|
|
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
|
|
|
|
const char *);
|
2005-04-17 05:20:36 +07:00
|
|
|
extern void mmc_cleanup_queue(struct mmc_queue *);
|
|
|
|
extern void mmc_queue_suspend(struct mmc_queue *);
|
|
|
|
extern void mmc_queue_resume(struct mmc_queue *);
|
2011-07-10 04:12:36 +07:00
|
|
|
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
|
|
|
|
struct mmc_queue_req *);
|
2007-05-12 05:26:16 +07:00
|
|
|
|
mmc: card: Don't access RPMB partitions for normal read/write
During kernel boot, it will try to read some logical sectors
of each block device node for the possible partition table.
But since RPMB partition is special and can not be accessed
by normal eMMC read / write CMDs, it will cause below error
messages during kernel boot:
...
mmc0: Got data interrupt 0x00000002 even though no data operation was in progress.
mmcblk0rpmb: error -110 transferring data, sector 0, nr 32, cmd response 0x900, card status 0xb00
mmcblk0rpmb: retrying using single block read
mmcblk0rpmb: timed out sending r/w cmd command, card status 0x400900
mmcblk0rpmb: timed out sending r/w cmd command, card status 0x400900
mmcblk0rpmb: timed out sending r/w cmd command, card status 0x400900
mmcblk0rpmb: timed out sending r/w cmd command, card status 0x400900
mmcblk0rpmb: timed out sending r/w cmd command, card status 0x400900
mmcblk0rpmb: timed out sending r/w cmd command, card status 0x400900
end_request: I/O error, dev mmcblk0rpmb, sector 0
Buffer I/O error on device mmcblk0rpmb, logical block 0
end_request: I/O error, dev mmcblk0rpmb, sector 8
Buffer I/O error on device mmcblk0rpmb, logical block 1
end_request: I/O error, dev mmcblk0rpmb, sector 16
Buffer I/O error on device mmcblk0rpmb, logical block 2
end_request: I/O error, dev mmcblk0rpmb, sector 24
Buffer I/O error on device mmcblk0rpmb, logical block 3
...
This patch will discard the access request in eMMC queue if
it is RPMB partition access request. By this way, it avoids
trigger above error messages.
Fixes: 090d25fe224c ("mmc: core: Expose access to RPMB partition")
Signed-off-by: Yunpeng Gao <yunpeng.gao@intel.com>
Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
Tested-by: Michael Shigorin <mike@altlinux.org>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2014-08-12 11:01:30 +07:00
|
|
|
extern int mmc_access_rpmb(struct mmc_queue *);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|