2019-05-27 13:55:05 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2007-10-15 14:50:19 +07:00
|
|
|
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
|
2005-04-17 05:20:36 +07:00
|
|
|
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
|
|
|
|
*
|
|
|
|
* EMU10K1 memory page allocation (PTB area)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/pci.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/gfp.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/time.h>
|
2006-01-16 22:34:20 +07:00
|
|
|
#include <linux/mutex.h>
|
2011-09-22 20:34:58 +07:00
|
|
|
#include <linux/export.h>
|
2006-01-16 22:34:20 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <sound/core.h>
|
|
|
|
#include <sound/emu10k1.h>
|
|
|
|
|
|
|
|
/* page arguments of these two macros are Emu page (4096 bytes), not like
|
|
|
|
* aligned pages in others
|
|
|
|
*/
|
|
|
|
#define __set_ptb_entry(emu,page,addr) \
|
2018-02-14 06:06:18 +07:00
|
|
|
(((__le32 *)(emu)->ptb_pages.area)[page] = \
|
|
|
|
cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
|
|
|
|
#define __get_ptb_entry(emu, page) \
|
|
|
|
(le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
|
2015-04-29 02:57:29 +07:00
|
|
|
#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
|
|
|
|
#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
|
2005-04-17 05:20:36 +07:00
|
|
|
/* get aligned page from offset address */
|
|
|
|
#define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
|
|
|
|
/* get offset address from aligned page */
|
|
|
|
#define aligned_page_offset(page) ((page) << PAGE_SHIFT)
|
|
|
|
|
2018-02-14 06:06:18 +07:00
|
|
|
#if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
|
2005-04-17 05:20:36 +07:00
|
|
|
/* fill PTB entrie(s) corresponding to page with addr */
|
|
|
|
#define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
|
|
|
|
/* fill PTB entrie(s) corresponding to page with silence pointer */
|
|
|
|
#define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
|
|
|
|
#else
|
|
|
|
/* fill PTB entries -- we need to fill UNIT_PAGES entries */
|
2005-11-17 20:50:13 +07:00
|
|
|
static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
page *= UNIT_PAGES;
|
|
|
|
for (i = 0; i < UNIT_PAGES; i++, page++) {
|
|
|
|
__set_ptb_entry(emu, page, addr);
|
2018-02-14 06:06:18 +07:00
|
|
|
dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
|
|
|
|
(unsigned int)__get_ptb_entry(emu, page));
|
2005-04-17 05:20:36 +07:00
|
|
|
addr += EMUPAGESIZE;
|
|
|
|
}
|
|
|
|
}
|
2005-11-17 20:50:13 +07:00
|
|
|
static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
page *= UNIT_PAGES;
|
2018-02-14 06:06:18 +07:00
|
|
|
for (i = 0; i < UNIT_PAGES; i++, page++) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* do not increment ptr */
|
|
|
|
__set_ptb_entry(emu, page, emu->silent_page.addr);
|
2018-02-14 06:06:18 +07:00
|
|
|
dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
|
|
|
|
page, (unsigned int)__get_ptb_entry(emu, page));
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
#endif /* PAGE_SIZE */
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
|
|
|
|
static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-11-17 20:50:13 +07:00
|
|
|
#define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
|
|
|
|
/* initialize emu10k1 part */
|
2005-11-17 20:50:13 +07:00
|
|
|
static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
blk->mapped_page = -1;
|
|
|
|
INIT_LIST_HEAD(&blk->mapped_link);
|
|
|
|
INIT_LIST_HEAD(&blk->mapped_order_link);
|
|
|
|
blk->map_locked = 0;
|
|
|
|
|
|
|
|
blk->first_page = get_aligned_page(blk->mem.offset);
|
|
|
|
blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
|
|
|
|
blk->pages = blk->last_page - blk->first_page + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* search empty region on PTB with the given size
|
|
|
|
*
|
|
|
|
* if an empty region is found, return the page and store the next mapped block
|
|
|
|
* in nextp
|
|
|
|
* if not found, return a negative error code.
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2018-02-14 06:04:58 +07:00
|
|
|
int page = 1, found_page = -ENOMEM;
|
2005-04-17 05:20:36 +07:00
|
|
|
int max_size = npages;
|
|
|
|
int size;
|
|
|
|
struct list_head *candidate = &emu->mapped_link_head;
|
|
|
|
struct list_head *pos;
|
|
|
|
|
|
|
|
list_for_each (pos, &emu->mapped_link_head) {
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
|
2008-08-08 22:12:14 +07:00
|
|
|
if (blk->mapped_page < 0)
|
|
|
|
continue;
|
2005-04-17 05:20:36 +07:00
|
|
|
size = blk->mapped_page - page;
|
|
|
|
if (size == npages) {
|
|
|
|
*nextp = pos;
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
else if (size > max_size) {
|
|
|
|
/* we look for the maximum empty hole */
|
|
|
|
max_size = size;
|
|
|
|
candidate = pos;
|
|
|
|
found_page = page;
|
|
|
|
}
|
|
|
|
page = blk->mapped_page + blk->pages;
|
|
|
|
}
|
2015-04-29 02:57:29 +07:00
|
|
|
size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
|
2005-04-17 05:20:36 +07:00
|
|
|
if (size >= max_size) {
|
|
|
|
*nextp = pos;
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
*nextp = candidate;
|
|
|
|
return found_page;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* map a memory block onto emu10k1's PTB
|
|
|
|
*
|
|
|
|
* call with memblk_lock held
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int page, pg;
|
|
|
|
struct list_head *next;
|
|
|
|
|
|
|
|
page = search_empty_map_area(emu, blk->pages, &next);
|
|
|
|
if (page < 0) /* not found */
|
|
|
|
return page;
|
2018-02-14 06:04:58 +07:00
|
|
|
if (page == 0) {
|
|
|
|
dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
/* insert this block in the proper position of mapped list */
|
|
|
|
list_add_tail(&blk->mapped_link, next);
|
|
|
|
/* append this as a newest block in order list */
|
|
|
|
list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
|
|
|
|
blk->mapped_page = page;
|
|
|
|
/* fill PTB */
|
|
|
|
for (pg = blk->first_page; pg <= blk->last_page; pg++) {
|
|
|
|
set_ptb_entry(emu, page, emu->page_addr_table[pg]);
|
|
|
|
page++;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unmap the block
|
|
|
|
* return the size of resultant empty pages
|
|
|
|
*
|
|
|
|
* call with memblk_lock held
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int start_page, end_page, mpage, pg;
|
|
|
|
struct list_head *p;
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_emu10k1_memblk *q;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* calculate the expected size of empty region */
|
|
|
|
if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
|
|
|
|
q = get_emu10k1_memblk(p, mapped_link);
|
|
|
|
start_page = q->mapped_page + q->pages;
|
|
|
|
} else
|
2018-02-14 06:04:58 +07:00
|
|
|
start_page = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
|
|
|
|
q = get_emu10k1_memblk(p, mapped_link);
|
|
|
|
end_page = q->mapped_page;
|
|
|
|
} else
|
2015-04-29 02:57:29 +07:00
|
|
|
end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* remove links */
|
|
|
|
list_del(&blk->mapped_link);
|
|
|
|
list_del(&blk->mapped_order_link);
|
|
|
|
/* clear PTB */
|
|
|
|
mpage = blk->mapped_page;
|
|
|
|
for (pg = blk->first_page; pg <= blk->last_page; pg++) {
|
|
|
|
set_silent_ptb(emu, mpage);
|
|
|
|
mpage++;
|
|
|
|
}
|
|
|
|
blk->mapped_page = -1;
|
|
|
|
return end_page - start_page; /* return the new empty size */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* search empty pages with the given size, and create a memory block
|
|
|
|
*
|
|
|
|
* unlike synth_alloc the memory block is aligned to the page start
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
static struct snd_emu10k1_memblk *
|
|
|
|
search_empty(struct snd_emu10k1 *emu, int size)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct list_head *p;
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_emu10k1_memblk *blk;
|
2005-04-17 05:20:36 +07:00
|
|
|
int page, psize;
|
|
|
|
|
|
|
|
psize = get_aligned_page(size + PAGE_SIZE -1);
|
|
|
|
page = 0;
|
|
|
|
list_for_each(p, &emu->memhdr->block) {
|
|
|
|
blk = get_emu10k1_memblk(p, mem.list);
|
|
|
|
if (page + psize <= blk->first_page)
|
|
|
|
goto __found_pages;
|
|
|
|
page = blk->last_page + 1;
|
|
|
|
}
|
|
|
|
if (page + psize > emu->max_cache_pages)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
__found_pages:
|
|
|
|
/* create a new memory block */
|
2005-11-17 20:50:13 +07:00
|
|
|
blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (blk == NULL)
|
|
|
|
return NULL;
|
|
|
|
blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
|
|
|
|
emu10k1_memblk_init(blk);
|
|
|
|
return blk;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check if the given pointer is valid for pages
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
if (addr & ~emu->dma_mask) {
|
2018-05-18 01:02:23 +07:00
|
|
|
dev_err_ratelimited(emu->card->dev,
|
2014-02-25 23:02:09 +07:00
|
|
|
"max memory size is 0x%lx (addr = 0x%lx)!!\n",
|
|
|
|
emu->dma_mask, (unsigned long)addr);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (addr & (EMUPAGESIZE-1)) {
|
2018-05-18 01:02:23 +07:00
|
|
|
dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* map the given memory block on PTB.
|
|
|
|
* if the block is already mapped, update the link order.
|
2011-03-31 08:57:33 +07:00
|
|
|
* if no empty pages are found, tries to release unused memory blocks
|
2005-04-17 05:20:36 +07:00
|
|
|
* and retry the mapping.
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
int size;
|
|
|
|
struct list_head *p, *nextp;
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_emu10k1_memblk *deleted;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&emu->memblk_lock, flags);
|
|
|
|
if (blk->mapped_page >= 0) {
|
|
|
|
/* update order link */
|
2012-09-05 14:00:15 +07:00
|
|
|
list_move_tail(&blk->mapped_order_link,
|
|
|
|
&emu->mapped_order_link_head);
|
2005-04-17 05:20:36 +07:00
|
|
|
spin_unlock_irqrestore(&emu->memblk_lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if ((err = map_memblk(emu, blk)) < 0) {
|
|
|
|
/* no enough page - try to unmap some blocks */
|
|
|
|
/* starting from the oldest block */
|
|
|
|
p = emu->mapped_order_link_head.next;
|
|
|
|
for (; p != &emu->mapped_order_link_head; p = nextp) {
|
|
|
|
nextp = p->next;
|
|
|
|
deleted = get_emu10k1_memblk(p, mapped_order_link);
|
|
|
|
if (deleted->map_locked)
|
|
|
|
continue;
|
|
|
|
size = unmap_memblk(emu, deleted);
|
|
|
|
if (size >= blk->pages) {
|
|
|
|
/* ok the empty region is enough large */
|
|
|
|
err = map_memblk(emu, blk);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&emu->memblk_lock, flags);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-04-28 20:13:39 +07:00
|
|
|
EXPORT_SYMBOL(snd_emu10k1_memblk_map);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* page allocation for DMA
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_util_memblk *
|
|
|
|
snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_pcm_runtime *runtime = substream->runtime;
|
|
|
|
struct snd_util_memhdr *hdr;
|
|
|
|
struct snd_emu10k1_memblk *blk;
|
2005-04-17 05:20:36 +07:00
|
|
|
int page, err, idx;
|
|
|
|
|
2008-08-08 22:12:14 +07:00
|
|
|
if (snd_BUG_ON(!emu))
|
|
|
|
return NULL;
|
|
|
|
if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
|
2015-04-29 02:57:29 +07:00
|
|
|
runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
|
2008-08-08 22:12:14 +07:00
|
|
|
return NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
hdr = emu->memhdr;
|
2008-08-08 22:12:14 +07:00
|
|
|
if (snd_BUG_ON(!hdr))
|
|
|
|
return NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-08-18 19:08:17 +07:00
|
|
|
idx = runtime->period_size >= runtime->buffer_size ?
|
|
|
|
(emu->delay_pcm_irq * 2) : 0;
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_lock(&hdr->block_mutex);
|
2010-08-18 19:08:17 +07:00
|
|
|
blk = search_empty(emu, runtime->dma_bytes + idx);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (blk == NULL) {
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_unlock(&hdr->block_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
/* fill buffer addresses but pointers are not stored so that
|
|
|
|
* snd_free_pci_page() is not called in in synth_free()
|
|
|
|
*/
|
|
|
|
idx = 0;
|
|
|
|
for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
|
2008-08-21 18:00:13 +07:00
|
|
|
unsigned long ofs = idx << PAGE_SHIFT;
|
2005-04-17 05:20:36 +07:00
|
|
|
dma_addr_t addr;
|
2012-08-03 17:51:21 +07:00
|
|
|
if (ofs >= runtime->dma_bytes)
|
|
|
|
addr = emu->silent_page.addr;
|
|
|
|
else
|
|
|
|
addr = snd_pcm_sgbuf_get_addr(substream, ofs);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (! is_valid_page(emu, addr)) {
|
2018-05-18 01:02:23 +07:00
|
|
|
dev_err_ratelimited(emu->card->dev,
|
2014-02-25 23:02:09 +07:00
|
|
|
"emu: failure page = %d\n", idx);
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_unlock(&hdr->block_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
emu->page_addr_table[page] = addr;
|
|
|
|
emu->page_ptr_table[page] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set PTB entries */
|
|
|
|
blk->map_locked = 1; /* do not unmap this block! */
|
|
|
|
err = snd_emu10k1_memblk_map(emu, blk);
|
|
|
|
if (err < 0) {
|
2005-11-17 20:50:13 +07:00
|
|
|
__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_unlock(&hdr->block_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_unlock(&hdr->block_mutex);
|
2005-11-17 20:50:13 +07:00
|
|
|
return (struct snd_util_memblk *)blk;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* release DMA buffer from page table
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-08-08 22:12:14 +07:00
|
|
|
if (snd_BUG_ON(!emu || !blk))
|
|
|
|
return -EINVAL;
|
2005-04-17 05:20:36 +07:00
|
|
|
return snd_emu10k1_synth_free(emu, blk);
|
|
|
|
}
|
|
|
|
|
2018-02-14 06:07:58 +07:00
|
|
|
/*
|
|
|
|
* allocate DMA pages, widening the allocation if necessary
|
|
|
|
*
|
|
|
|
* See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why
|
|
|
|
* this might be needed.
|
|
|
|
*
|
|
|
|
* If you modify this function check whether __synth_free_pages() also needs
|
|
|
|
* changes.
|
|
|
|
*/
|
|
|
|
int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
|
|
|
|
struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
if (emu->iommu_workaround) {
|
|
|
|
size_t npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
|
|
|
size_t size_real = npages * PAGE_SIZE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The device has been observed to accesses up to 256 extra
|
|
|
|
* bytes, but use 1k to be safe.
|
|
|
|
*/
|
|
|
|
if (size_real < size + 1024)
|
|
|
|
size += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
|
2019-11-05 22:18:55 +07:00
|
|
|
&emu->pci->dev, size, dmab);
|
2018-02-14 06:07:58 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* memory allocation using multiple pages (for synth)
|
|
|
|
* Unlike the DMA allocation above, non-contiguous pages are assined.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate a synth sample area
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_util_memblk *
|
|
|
|
snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_emu10k1_memblk *blk;
|
|
|
|
struct snd_util_memhdr *hdr = hw->memhdr;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_lock(&hdr->block_mutex);
|
2005-11-17 20:50:13 +07:00
|
|
|
blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (blk == NULL) {
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_unlock(&hdr->block_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (synth_alloc_pages(hw, blk)) {
|
2005-11-17 20:50:13 +07:00
|
|
|
__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_unlock(&hdr->block_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
snd_emu10k1_memblk_map(hw, blk);
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_unlock(&hdr->block_mutex);
|
2005-11-17 20:50:13 +07:00
|
|
|
return (struct snd_util_memblk *)blk;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-04-28 20:13:39 +07:00
|
|
|
EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* free a synth sample area
|
|
|
|
*/
|
|
|
|
int
|
2005-11-17 20:50:13 +07:00
|
|
|
snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_util_memhdr *hdr = emu->memhdr;
|
|
|
|
struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_lock(&hdr->block_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
spin_lock_irqsave(&emu->memblk_lock, flags);
|
|
|
|
if (blk->mapped_page >= 0)
|
|
|
|
unmap_memblk(emu, blk);
|
|
|
|
spin_unlock_irqrestore(&emu->memblk_lock, flags);
|
|
|
|
synth_free_pages(emu, blk);
|
|
|
|
__snd_util_mem_free(hdr, memblk);
|
2006-01-16 22:34:20 +07:00
|
|
|
mutex_unlock(&hdr->block_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-04-28 20:13:39 +07:00
|
|
|
EXPORT_SYMBOL(snd_emu10k1_synth_free);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* check new allocation range */
|
2005-11-17 20:50:13 +07:00
|
|
|
static void get_single_page_range(struct snd_util_memhdr *hdr,
|
|
|
|
struct snd_emu10k1_memblk *blk,
|
|
|
|
int *first_page_ret, int *last_page_ret)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct list_head *p;
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_emu10k1_memblk *q;
|
2005-04-17 05:20:36 +07:00
|
|
|
int first_page, last_page;
|
|
|
|
first_page = blk->first_page;
|
|
|
|
if ((p = blk->mem.list.prev) != &hdr->block) {
|
|
|
|
q = get_emu10k1_memblk(p, mem.list);
|
|
|
|
if (q->last_page == first_page)
|
|
|
|
first_page++; /* first page was already allocated */
|
|
|
|
}
|
|
|
|
last_page = blk->last_page;
|
|
|
|
if ((p = blk->mem.list.next) != &hdr->block) {
|
|
|
|
q = get_emu10k1_memblk(p, mem.list);
|
|
|
|
if (q->first_page == last_page)
|
|
|
|
last_page--; /* last page was already allocated */
|
|
|
|
}
|
|
|
|
*first_page_ret = first_page;
|
|
|
|
*last_page_ret = last_page;
|
|
|
|
}
|
|
|
|
|
2008-05-30 14:49:41 +07:00
|
|
|
/* release allocated pages */
|
|
|
|
static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
|
|
|
|
int last_page)
|
|
|
|
{
|
2018-02-14 06:06:34 +07:00
|
|
|
struct snd_dma_buffer dmab;
|
2008-05-30 14:49:41 +07:00
|
|
|
int page;
|
|
|
|
|
2018-02-14 06:06:34 +07:00
|
|
|
dmab.dev.type = SNDRV_DMA_TYPE_DEV;
|
2019-11-05 22:18:55 +07:00
|
|
|
dmab.dev.dev = &emu->pci->dev;
|
2018-02-14 06:06:34 +07:00
|
|
|
|
2008-05-30 14:49:41 +07:00
|
|
|
for (page = first_page; page <= last_page; page++) {
|
2018-02-14 06:06:34 +07:00
|
|
|
if (emu->page_ptr_table[page] == NULL)
|
|
|
|
continue;
|
|
|
|
dmab.area = emu->page_ptr_table[page];
|
|
|
|
dmab.addr = emu->page_addr_table[page];
|
2018-02-14 06:07:58 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* please keep me in sync with logic in
|
|
|
|
* snd_emu10k1_alloc_pages_maybe_wider()
|
|
|
|
*/
|
2018-02-14 06:06:34 +07:00
|
|
|
dmab.bytes = PAGE_SIZE;
|
2018-02-14 06:07:58 +07:00
|
|
|
if (emu->iommu_workaround)
|
|
|
|
dmab.bytes *= 2;
|
|
|
|
|
2018-02-14 06:06:34 +07:00
|
|
|
snd_dma_free_pages(&dmab);
|
2008-05-30 14:49:41 +07:00
|
|
|
emu->page_addr_table[page] = 0;
|
|
|
|
emu->page_ptr_table[page] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* allocate kernel pages
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int page, first_page, last_page;
|
2018-02-14 06:06:34 +07:00
|
|
|
struct snd_dma_buffer dmab;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
emu10k1_memblk_init(blk);
|
|
|
|
get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
|
|
|
|
/* allocate kernel pages */
|
|
|
|
for (page = first_page; page <= last_page; page++) {
|
2018-02-14 06:07:58 +07:00
|
|
|
if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
|
|
|
|
&dmab) < 0)
|
2018-02-14 06:06:34 +07:00
|
|
|
goto __fail;
|
|
|
|
if (!is_valid_page(emu, dmab.addr)) {
|
|
|
|
snd_dma_free_pages(&dmab);
|
|
|
|
goto __fail;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2018-02-14 06:06:34 +07:00
|
|
|
emu->page_addr_table[page] = dmab.addr;
|
|
|
|
emu->page_ptr_table[page] = dmab.area;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
return 0;
|
2018-02-14 06:06:34 +07:00
|
|
|
|
|
|
|
__fail:
|
|
|
|
/* release allocated pages */
|
|
|
|
last_page = page - 1;
|
|
|
|
__synth_free_pages(emu, first_page, last_page);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* free pages
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-05-30 14:49:41 +07:00
|
|
|
int first_page, last_page;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
|
2008-05-30 14:49:41 +07:00
|
|
|
__synth_free_pages(emu, first_page, last_page);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* calculate buffer pointer from offset address */
|
2005-11-17 20:50:13 +07:00
|
|
|
static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
2008-08-08 22:12:14 +07:00
|
|
|
if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
|
|
|
|
return NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
ptr = emu->page_ptr_table[page];
|
|
|
|
if (! ptr) {
|
2014-02-25 23:02:09 +07:00
|
|
|
dev_err(emu->card->dev,
|
|
|
|
"access to NULL ptr: page = %d\n", page);
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ptr += offset & (PAGE_SIZE - 1);
|
|
|
|
return (void*)ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bzero(blk + offset, size)
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
|
|
|
|
int offset, int size)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int page, nextofs, end_offset, temp, temp1;
|
|
|
|
void *ptr;
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
offset += blk->offset & (PAGE_SIZE - 1);
|
|
|
|
end_offset = offset + size;
|
|
|
|
page = get_aligned_page(offset);
|
|
|
|
do {
|
|
|
|
nextofs = aligned_page_offset(page + 1);
|
|
|
|
temp = nextofs - offset;
|
|
|
|
temp1 = end_offset - offset;
|
|
|
|
if (temp1 < temp)
|
|
|
|
temp = temp1;
|
|
|
|
ptr = offset_ptr(emu, page + p->first_page, offset);
|
|
|
|
if (ptr)
|
|
|
|
memset(ptr, 0, temp);
|
|
|
|
offset = nextofs;
|
|
|
|
page++;
|
|
|
|
} while (offset < end_offset);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-04-28 20:13:39 +07:00
|
|
|
EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* copy_from_user(blk + offset, data, size)
|
|
|
|
*/
|
2005-11-17 20:50:13 +07:00
|
|
|
int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
|
|
|
|
int offset, const char __user *data, int size)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int page, nextofs, end_offset, temp, temp1;
|
|
|
|
void *ptr;
|
2005-11-17 20:50:13 +07:00
|
|
|
struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
offset += blk->offset & (PAGE_SIZE - 1);
|
|
|
|
end_offset = offset + size;
|
|
|
|
page = get_aligned_page(offset);
|
|
|
|
do {
|
|
|
|
nextofs = aligned_page_offset(page + 1);
|
|
|
|
temp = nextofs - offset;
|
|
|
|
temp1 = end_offset - offset;
|
|
|
|
if (temp1 < temp)
|
|
|
|
temp = temp1;
|
|
|
|
ptr = offset_ptr(emu, page + p->first_page, offset);
|
|
|
|
if (ptr && copy_from_user(ptr, data, temp))
|
|
|
|
return -EFAULT;
|
|
|
|
offset = nextofs;
|
|
|
|
data += temp;
|
|
|
|
page++;
|
|
|
|
} while (offset < end_offset);
|
|
|
|
return 0;
|
|
|
|
}
|
2006-04-28 20:13:39 +07:00
|
|
|
|
|
|
|
EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);
|