ALSA: memalloc: Add non-cached buffer type

In some cases (mainly for x86), we need the DMA coherent buffer with
non-cached pages.  Although this has been done in each driver side
like HD-audio and intel8x0, it can be done cleaner in the core memory
allocator.

This patch adds the new types, SNDRV_DMA_TYPE_DEV_UC and
SNDRV_DMA_TYPE_DEV_UC_SG, for allocating such non-cached buffer
pages.  On non-x86 architectures, they work as same as the standard
SNDRV_DMA_TYPE_DEV and *_SG.

One additional change by this move is that we can assure to pass the
non-cached pgprot to the vmapped buffer, too.  It eventually fixes the
case like non-snoop mode without mmap access on HD-audio.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
Takashi Iwai 2018-08-08 17:01:00 +02:00
parent 28f3f4f685
commit 42e748a0b3
3 changed files with 33 additions and 2 deletions

View File

@ -47,10 +47,13 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */ #define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */ #define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */ #define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
#define SNDRV_DMA_TYPE_DEV_UC 5 /* continuous non-cahced */
#ifdef CONFIG_SND_DMA_SGBUF #ifdef CONFIG_SND_DMA_SGBUF
#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */ #define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
#define SNDRV_DMA_TYPE_DEV_UC_SG 6 /* SG non-cached */
#else #else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */ #define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
#define SNDRV_DMA_TYPE_DEV_UC_SG SNDRV_DMA_TYPE_DEV_UC
#endif #endif
#ifdef CONFIG_GENERIC_ALLOCATOR #ifdef CONFIG_GENERIC_ALLOCATOR
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */ #define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */

View File

@ -25,6 +25,9 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/genalloc.h> #include <linux/genalloc.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
#include <sound/memalloc.h> #include <sound/memalloc.h>
/* /*
@ -92,11 +95,21 @@ static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size)
| __GFP_NOWARN; /* no stack trace print - this call is non-critical */ | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
gfp_flags); gfp_flags);
#ifdef CONFIG_X86
if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
set_memory_wc((unsigned long)dmab->area,
PAGE_ALIGN(size) >> PAGE_SHIFT);
#endif
} }
/* free the coherent DMA pages */ /* free the coherent DMA pages */
static void snd_free_dev_pages(struct snd_dma_buffer *dmab) static void snd_free_dev_pages(struct snd_dma_buffer *dmab)
{ {
#ifdef CONFIG_X86
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
set_memory_wb((unsigned long)dmab->area,
PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
#endif
dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
} }
@ -191,11 +204,13 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
dmab->dev.type = SNDRV_DMA_TYPE_DEV; dmab->dev.type = SNDRV_DMA_TYPE_DEV;
#endif /* CONFIG_GENERIC_ALLOCATOR */ #endif /* CONFIG_GENERIC_ALLOCATOR */
case SNDRV_DMA_TYPE_DEV: case SNDRV_DMA_TYPE_DEV:
case SNDRV_DMA_TYPE_DEV_UC:
snd_malloc_dev_pages(dmab, size); snd_malloc_dev_pages(dmab, size);
break; break;
#endif #endif
#ifdef CONFIG_SND_DMA_SGBUF #ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG: case SNDRV_DMA_TYPE_DEV_SG:
case SNDRV_DMA_TYPE_DEV_UC_SG:
snd_malloc_sgbuf_pages(device, size, dmab, NULL); snd_malloc_sgbuf_pages(device, size, dmab, NULL);
break; break;
#endif #endif
@ -266,11 +281,13 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
break; break;
#endif /* CONFIG_GENERIC_ALLOCATOR */ #endif /* CONFIG_GENERIC_ALLOCATOR */
case SNDRV_DMA_TYPE_DEV: case SNDRV_DMA_TYPE_DEV:
case SNDRV_DMA_TYPE_DEV_UC:
snd_free_dev_pages(dmab); snd_free_dev_pages(dmab);
break; break;
#endif #endif
#ifdef CONFIG_SND_DMA_SGBUF #ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG: case SNDRV_DMA_TYPE_DEV_SG:
case SNDRV_DMA_TYPE_DEV_UC_SG:
snd_free_sgbuf_pages(dmab); snd_free_sgbuf_pages(dmab);
break; break;
#endif #endif

View File

@ -23,6 +23,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/export.h> #include <linux/export.h>
#include <asm/pgtable.h>
#include <sound/memalloc.h> #include <sound/memalloc.h>
@ -43,6 +44,8 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
dmab->area = NULL; dmab->area = NULL;
tmpb.dev.type = SNDRV_DMA_TYPE_DEV; tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
tmpb.dev.dev = sgbuf->dev; tmpb.dev.dev = sgbuf->dev;
for (i = 0; i < sgbuf->pages; i++) { for (i = 0; i < sgbuf->pages; i++) {
if (!(sgbuf->table[i].addr & ~PAGE_MASK)) if (!(sgbuf->table[i].addr & ~PAGE_MASK))
@ -72,12 +75,20 @@ void *snd_malloc_sgbuf_pages(struct device *device,
struct snd_dma_buffer tmpb; struct snd_dma_buffer tmpb;
struct snd_sg_page *table; struct snd_sg_page *table;
struct page **pgtable; struct page **pgtable;
int type = SNDRV_DMA_TYPE_DEV;
pgprot_t prot = PAGE_KERNEL;
dmab->area = NULL; dmab->area = NULL;
dmab->addr = 0; dmab->addr = 0;
dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
if (! sgbuf) if (! sgbuf)
return NULL; return NULL;
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
type = SNDRV_DMA_TYPE_DEV_UC;
#ifdef pgprot_noncached
prot = pgprot_noncached(PAGE_KERNEL);
#endif
}
sgbuf->dev = device; sgbuf->dev = device;
pages = snd_sgbuf_aligned_pages(size); pages = snd_sgbuf_aligned_pages(size);
sgbuf->tblsize = sgbuf_align_table(pages); sgbuf->tblsize = sgbuf_align_table(pages);
@ -98,7 +109,7 @@ void *snd_malloc_sgbuf_pages(struct device *device,
if (chunk > maxpages) if (chunk > maxpages)
chunk = maxpages; chunk = maxpages;
chunk <<= PAGE_SHIFT; chunk <<= PAGE_SHIFT;
if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device, if (snd_dma_alloc_pages_fallback(type, device,
chunk, &tmpb) < 0) { chunk, &tmpb) < 0) {
if (!sgbuf->pages) if (!sgbuf->pages)
goto _failed; goto _failed;
@ -125,7 +136,7 @@ void *snd_malloc_sgbuf_pages(struct device *device,
} }
sgbuf->size = size; sgbuf->size = size;
dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL); dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
if (! dmab->area) if (! dmab->area)
goto _failed; goto _failed;
if (res_size) if (res_size)