2019-05-27 13:55:05 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Scatter-Gather buffer
|
|
|
|
*
|
|
|
|
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/vmalloc.h>
|
2012-09-21 10:29:12 +07:00
|
|
|
#include <linux/export.h>
|
2018-08-08 22:01:00 +07:00
|
|
|
#include <asm/pgtable.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <sound/memalloc.h>
|
|
|
|
|
|
|
|
|
|
|
|
/* table entries are align to 32 */
|
|
|
|
#define SGBUF_TBL_ALIGN 32
|
2006-10-09 13:13:32 +07:00
|
|
|
#define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
struct snd_sg_buf *sgbuf = dmab->private_data;
|
|
|
|
struct snd_dma_buffer tmpb;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (! sgbuf)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-11-22 00:34:48 +07:00
|
|
|
vunmap(dmab->area);
|
2009-03-17 20:00:06 +07:00
|
|
|
dmab->area = NULL;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
|
2018-08-08 22:01:00 +07:00
|
|
|
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
|
|
|
|
tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
|
2005-04-17 05:20:36 +07:00
|
|
|
tmpb.dev.dev = sgbuf->dev;
|
|
|
|
for (i = 0; i < sgbuf->pages; i++) {
|
2008-07-30 20:13:33 +07:00
|
|
|
if (!(sgbuf->table[i].addr & ~PAGE_MASK))
|
|
|
|
continue; /* continuous pages */
|
2005-04-17 05:20:36 +07:00
|
|
|
tmpb.area = sgbuf->table[i].buf;
|
2008-07-30 20:13:33 +07:00
|
|
|
tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
|
|
|
|
tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
|
2005-04-17 05:20:36 +07:00
|
|
|
snd_dma_free_pages(&tmpb);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(sgbuf->table);
|
|
|
|
kfree(sgbuf->page_table);
|
|
|
|
kfree(sgbuf);
|
|
|
|
dmab->private_data = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-30 20:13:33 +07:00
|
|
|
#define MAX_ALLOC_PAGES 32
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
void *snd_malloc_sgbuf_pages(struct device *device,
|
|
|
|
size_t size, struct snd_dma_buffer *dmab,
|
|
|
|
size_t *res_size)
|
|
|
|
{
|
|
|
|
struct snd_sg_buf *sgbuf;
|
2008-07-30 20:13:33 +07:00
|
|
|
unsigned int i, pages, chunk, maxpages;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct snd_dma_buffer tmpb;
|
2008-07-30 20:13:33 +07:00
|
|
|
struct snd_sg_page *table;
|
|
|
|
struct page **pgtable;
|
2018-08-08 22:01:00 +07:00
|
|
|
int type = SNDRV_DMA_TYPE_DEV;
|
|
|
|
pgprot_t prot = PAGE_KERNEL;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
dmab->area = NULL;
|
|
|
|
dmab->addr = 0;
|
2006-07-25 20:28:03 +07:00
|
|
|
dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (! sgbuf)
|
|
|
|
return NULL;
|
2018-08-08 22:01:00 +07:00
|
|
|
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
|
|
|
|
type = SNDRV_DMA_TYPE_DEV_UC;
|
|
|
|
#ifdef pgprot_noncached
|
|
|
|
prot = pgprot_noncached(PAGE_KERNEL);
|
|
|
|
#endif
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
sgbuf->dev = device;
|
|
|
|
pages = snd_sgbuf_aligned_pages(size);
|
|
|
|
sgbuf->tblsize = sgbuf_align_table(pages);
|
2008-07-30 20:13:33 +07:00
|
|
|
table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
|
|
|
|
if (!table)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto _failed;
|
2008-07-30 20:13:33 +07:00
|
|
|
sgbuf->table = table;
|
|
|
|
pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
|
|
|
|
if (!pgtable)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto _failed;
|
2008-07-30 20:13:33 +07:00
|
|
|
sgbuf->page_table = pgtable;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-30 20:13:33 +07:00
|
|
|
/* allocate pages */
|
|
|
|
maxpages = MAX_ALLOC_PAGES;
|
|
|
|
while (pages > 0) {
|
|
|
|
chunk = pages;
|
|
|
|
/* don't be too eager to take a huge chunk */
|
|
|
|
if (chunk > maxpages)
|
|
|
|
chunk = maxpages;
|
|
|
|
chunk <<= PAGE_SHIFT;
|
2018-08-08 22:01:00 +07:00
|
|
|
if (snd_dma_alloc_pages_fallback(type, device,
|
2008-07-30 20:13:33 +07:00
|
|
|
chunk, &tmpb) < 0) {
|
|
|
|
if (!sgbuf->pages)
|
2012-08-03 17:48:32 +07:00
|
|
|
goto _failed;
|
2008-07-30 20:13:33 +07:00
|
|
|
if (!res_size)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto _failed;
|
2008-07-30 20:13:33 +07:00
|
|
|
size = sgbuf->pages * PAGE_SIZE;
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
}
|
2008-07-30 20:13:33 +07:00
|
|
|
chunk = tmpb.bytes >> PAGE_SHIFT;
|
|
|
|
for (i = 0; i < chunk; i++) {
|
|
|
|
table->buf = tmpb.area;
|
|
|
|
table->addr = tmpb.addr;
|
|
|
|
if (!i)
|
|
|
|
table->addr |= chunk; /* mark head */
|
|
|
|
table++;
|
|
|
|
*pgtable++ = virt_to_page(tmpb.area);
|
|
|
|
tmpb.area += PAGE_SIZE;
|
|
|
|
tmpb.addr += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
sgbuf->pages += chunk;
|
|
|
|
pages -= chunk;
|
|
|
|
if (chunk < maxpages)
|
|
|
|
maxpages = chunk;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
sgbuf->size = size;
|
2018-08-08 22:01:00 +07:00
|
|
|
dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (! dmab->area)
|
|
|
|
goto _failed;
|
2008-07-30 20:13:33 +07:00
|
|
|
if (res_size)
|
|
|
|
*res_size = sgbuf->size;
|
2005-04-17 05:20:36 +07:00
|
|
|
return dmab->area;
|
|
|
|
|
|
|
|
_failed:
|
|
|
|
snd_free_sgbuf_pages(dmab); /* free the table */
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-09-21 10:29:12 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* compute the max chunk size with continuous pages on sg-buffer
|
|
|
|
*/
|
|
|
|
unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
|
|
|
|
unsigned int ofs, unsigned int size)
|
|
|
|
{
|
|
|
|
struct snd_sg_buf *sg = dmab->private_data;
|
|
|
|
unsigned int start, end, pg;
|
|
|
|
|
|
|
|
start = ofs >> PAGE_SHIFT;
|
|
|
|
end = (ofs + size - 1) >> PAGE_SHIFT;
|
|
|
|
/* check page continuity */
|
|
|
|
pg = sg->table[start].addr >> PAGE_SHIFT;
|
|
|
|
for (;;) {
|
|
|
|
start++;
|
|
|
|
if (start > end)
|
|
|
|
break;
|
|
|
|
pg++;
|
|
|
|
if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
|
|
|
|
return (start << PAGE_SHIFT) - ofs;
|
|
|
|
}
|
|
|
|
/* ok, all on continuous pages */
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
|