mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 13:51:00 +07:00
lightnvm: move ppa erase logic to core
A device may function in single, dual or quad plane mode. The gennvm media manager manages this with explicit helpers. They convert a single ppa to 1, 2 or 4 separate ppas in a ppa list. To aid implementation of recovery and system blocks, this functionality can be moved directly into the core. Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
c27278bddd
commit
069368e918
@ -192,6 +192,73 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_erase_blk);
|
||||
|
||||
void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (rqd->nr_pages > 1) {
|
||||
for (i = 0; i < rqd->nr_pages; i++)
|
||||
rqd->ppa_list[i] = dev_to_generic_addr(dev,
|
||||
rqd->ppa_list[i]);
|
||||
} else {
|
||||
rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_addr_to_generic_mode);
|
||||
|
||||
void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (rqd->nr_pages > 1) {
|
||||
for (i = 0; i < rqd->nr_pages; i++)
|
||||
rqd->ppa_list[i] = generic_to_dev_addr(dev,
|
||||
rqd->ppa_list[i]);
|
||||
} else {
|
||||
rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_generic_to_addr_mode);
|
||||
|
||||
int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa)
|
||||
{
|
||||
int plane_cnt = 0, pl_idx, ret;
|
||||
struct nvm_rq rqd;
|
||||
|
||||
if (!dev->ops->erase_block)
|
||||
return 0;
|
||||
|
||||
if (dev->plane_mode == NVM_PLANE_SINGLE) {
|
||||
rqd.nr_pages = 1;
|
||||
rqd.ppa_addr = ppa;
|
||||
} else {
|
||||
plane_cnt = (1 << dev->plane_mode);
|
||||
rqd.nr_pages = plane_cnt;
|
||||
|
||||
rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
|
||||
&rqd.dma_ppa_list);
|
||||
if (!rqd.ppa_list) {
|
||||
pr_err("nvm: failed to allocate dma memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
|
||||
ppa.g.pl = pl_idx;
|
||||
rqd.ppa_list[pl_idx] = ppa;
|
||||
}
|
||||
}
|
||||
|
||||
nvm_generic_to_addr_mode(dev, &rqd);
|
||||
|
||||
ret = dev->ops->erase_block(dev, &rqd);
|
||||
|
||||
if (plane_cnt)
|
||||
nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_erase_ppa);
|
||||
|
||||
static int nvm_core_init(struct nvm_dev *dev)
|
||||
{
|
||||
struct nvm_id *id = &dev->identity;
|
||||
|
@ -317,39 +317,13 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
|
||||
spin_unlock(&vlun->lock);
|
||||
}
|
||||
|
||||
static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (rqd->nr_pages > 1) {
|
||||
for (i = 0; i < rqd->nr_pages; i++)
|
||||
rqd->ppa_list[i] = dev_to_generic_addr(dev,
|
||||
rqd->ppa_list[i]);
|
||||
} else {
|
||||
rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
|
||||
}
|
||||
}
|
||||
|
||||
static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (rqd->nr_pages > 1) {
|
||||
for (i = 0; i < rqd->nr_pages; i++)
|
||||
rqd->ppa_list[i] = generic_to_dev_addr(dev,
|
||||
rqd->ppa_list[i]);
|
||||
} else {
|
||||
rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
|
||||
}
|
||||
}
|
||||
|
||||
static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
if (!dev->ops->submit_io)
|
||||
return -ENODEV;
|
||||
|
||||
/* Convert address space */
|
||||
gennvm_generic_to_addr_mode(dev, rqd);
|
||||
nvm_generic_to_addr_mode(dev, rqd);
|
||||
|
||||
rqd->dev = dev;
|
||||
return dev->ops->submit_io(dev, rqd);
|
||||
@ -391,7 +365,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
if (dev->ops->set_bb_tbl(dev, rqd, 1))
|
||||
return;
|
||||
|
||||
gennvm_addr_to_generic_mode(dev, rqd);
|
||||
nvm_addr_to_generic_mode(dev, rqd);
|
||||
|
||||
/* look up blocks and mark them as bad */
|
||||
if (rqd->nr_pages > 1)
|
||||
@ -425,43 +399,9 @@ static int gennvm_end_io(struct nvm_rq *rqd, int error)
|
||||
static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
|
||||
unsigned long flags)
|
||||
{
|
||||
int plane_cnt = 0, pl_idx, ret;
|
||||
struct ppa_addr addr;
|
||||
struct nvm_rq rqd;
|
||||
struct ppa_addr addr = block_to_ppa(dev, blk);
|
||||
|
||||
if (!dev->ops->erase_block)
|
||||
return 0;
|
||||
|
||||
addr = block_to_ppa(dev, blk);
|
||||
|
||||
if (dev->plane_mode == NVM_PLANE_SINGLE) {
|
||||
rqd.nr_pages = 1;
|
||||
rqd.ppa_addr = addr;
|
||||
} else {
|
||||
plane_cnt = (1 << dev->plane_mode);
|
||||
rqd.nr_pages = plane_cnt;
|
||||
|
||||
rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
|
||||
&rqd.dma_ppa_list);
|
||||
if (!rqd.ppa_list) {
|
||||
pr_err("gennvm: failed to allocate dma memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
|
||||
addr.g.pl = pl_idx;
|
||||
rqd.ppa_list[pl_idx] = addr;
|
||||
}
|
||||
}
|
||||
|
||||
gennvm_generic_to_addr_mode(dev, &rqd);
|
||||
|
||||
ret = dev->ops->erase_block(dev, &rqd);
|
||||
|
||||
if (plane_cnt)
|
||||
nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
|
||||
|
||||
return ret;
|
||||
return nvm_erase_ppa(dev, addr);
|
||||
}
|
||||
|
||||
static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
|
||||
|
@ -427,6 +427,9 @@ extern int nvm_register(struct request_queue *, char *,
|
||||
extern void nvm_unregister(char *);
|
||||
|
||||
extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
|
||||
extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
|
||||
extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
|
||||
extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr);
|
||||
extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
|
||||
#else /* CONFIG_NVM */
|
||||
struct nvm_dev_ops;
|
||||
|
Loading…
Reference in New Issue
Block a user