mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 04:50:53 +07:00
aio: clean up and fix aio_setup_ring page mapping
Since commit36bc08cc01
("fs/aio: Add support to aio ring pages migration") the aio ring setup code has used a special per-ring backing inode for the page allocations, rather than just using random anonymous pages. However, rather than remembering the pages as it allocated them, it would allocate the pages, insert them into the file mapping (dirty, so that they couldn't be free'd), and then forget about them. And then to look them up again, it would mmap the mapping, and then use "get_user_pages()" to get back an array of the pages we just created. Now, not only is that incredibly inefficient, it also leaked all the pages if the mmap failed (which could happen due to excessive number of mappings, for example). So clean it all up, making it much more straightforward. Also remove some left-overs of the previous (broken) mm_populate() usage that was removed in commitd6c355c7da
("aio: fix race in ring buffer page lookup introduced by page migration support") but left the pointless and now misleading MAP_POPULATE flag around. Tested-and-acked-by: Benjamin LaHaise <bcrl@kvack.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b7000adef1
commit
3dc9acb676
60
fs/aio.c
60
fs/aio.c
@ -326,7 +326,7 @@ static int aio_setup_ring(struct kioctx *ctx)
|
|||||||
struct aio_ring *ring;
|
struct aio_ring *ring;
|
||||||
unsigned nr_events = ctx->max_reqs;
|
unsigned nr_events = ctx->max_reqs;
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
unsigned long size, populate;
|
unsigned long size, unused;
|
||||||
int nr_pages;
|
int nr_pages;
|
||||||
int i;
|
int i;
|
||||||
struct file *file;
|
struct file *file;
|
||||||
@ -347,18 +347,6 @@ static int aio_setup_ring(struct kioctx *ctx)
|
|||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
|
||||||
struct page *page;
|
|
||||||
page = find_or_create_page(file->f_inode->i_mapping,
|
|
||||||
i, GFP_HIGHUSER | __GFP_ZERO);
|
|
||||||
if (!page)
|
|
||||||
break;
|
|
||||||
pr_debug("pid(%d) page[%d]->count=%d\n",
|
|
||||||
current->pid, i, page_count(page));
|
|
||||||
SetPageUptodate(page);
|
|
||||||
SetPageDirty(page);
|
|
||||||
unlock_page(page);
|
|
||||||
}
|
|
||||||
ctx->aio_ring_file = file;
|
ctx->aio_ring_file = file;
|
||||||
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
|
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
|
||||||
/ sizeof(struct io_event);
|
/ sizeof(struct io_event);
|
||||||
@ -373,15 +361,36 @@ static int aio_setup_ring(struct kioctx *ctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < nr_pages; i++) {
|
||||||
|
struct page *page;
|
||||||
|
page = find_or_create_page(file->f_inode->i_mapping,
|
||||||
|
i, GFP_HIGHUSER | __GFP_ZERO);
|
||||||
|
if (!page)
|
||||||
|
break;
|
||||||
|
pr_debug("pid(%d) page[%d]->count=%d\n",
|
||||||
|
current->pid, i, page_count(page));
|
||||||
|
SetPageUptodate(page);
|
||||||
|
SetPageDirty(page);
|
||||||
|
unlock_page(page);
|
||||||
|
|
||||||
|
ctx->ring_pages[i] = page;
|
||||||
|
}
|
||||||
|
ctx->nr_pages = i;
|
||||||
|
|
||||||
|
if (unlikely(i != nr_pages)) {
|
||||||
|
aio_free_ring(ctx);
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
|
||||||
ctx->mmap_size = nr_pages * PAGE_SIZE;
|
ctx->mmap_size = nr_pages * PAGE_SIZE;
|
||||||
pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
|
pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
|
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
|
||||||
PROT_READ | PROT_WRITE,
|
PROT_READ | PROT_WRITE,
|
||||||
MAP_SHARED | MAP_POPULATE, 0, &populate);
|
MAP_SHARED, 0, &unused);
|
||||||
|
up_write(&mm->mmap_sem);
|
||||||
if (IS_ERR((void *)ctx->mmap_base)) {
|
if (IS_ERR((void *)ctx->mmap_base)) {
|
||||||
up_write(&mm->mmap_sem);
|
|
||||||
ctx->mmap_size = 0;
|
ctx->mmap_size = 0;
|
||||||
aio_free_ring(ctx);
|
aio_free_ring(ctx);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
@ -389,27 +398,6 @@ static int aio_setup_ring(struct kioctx *ctx)
|
|||||||
|
|
||||||
pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
|
pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
|
||||||
|
|
||||||
/* We must do this while still holding mmap_sem for write, as we
|
|
||||||
* need to be protected against userspace attempting to mremap()
|
|
||||||
* or munmap() the ring buffer.
|
|
||||||
*/
|
|
||||||
ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
|
|
||||||
1, 0, ctx->ring_pages, NULL);
|
|
||||||
|
|
||||||
/* Dropping the reference here is safe as the page cache will hold
|
|
||||||
* onto the pages for us. It is also required so that page migration
|
|
||||||
* can unmap the pages and get the right reference count.
|
|
||||||
*/
|
|
||||||
for (i = 0; i < ctx->nr_pages; i++)
|
|
||||||
put_page(ctx->ring_pages[i]);
|
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
|
||||||
|
|
||||||
if (unlikely(ctx->nr_pages != nr_pages)) {
|
|
||||||
aio_free_ring(ctx);
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->user_id = ctx->mmap_base;
|
ctx->user_id = ctx->mmap_base;
|
||||||
ctx->nr_events = nr_events; /* trusted copy */
|
ctx->nr_events = nr_events; /* trusted copy */
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user