mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 14:10:49 +07:00
[PATCH] hugetlb: overcommit accounting check
Basic overcommit checking for hugetlb_file_map() based on an implementation used with demand faulting in SLES9. Since demand faulting can't guarantee the availability of pages at mmap time, this patch implements a basic sanity check to ensure that the number of huge pages required to satisfy the mmap are currently available. Despite the obvious race, I think it is a good start on doing proper accounting. I'd like to work towards an accounting system that mimics the semantics of normal pages (especially for the MAP_PRIVATE/COW case). That work is underway and builds on what this patch starts. Huge page shared memory segments are simpler and still maintain their commit on shmget semantics. Signed-off-by: Adam Litke <agl@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
4c88726597
commit
2e9b367c22
@ -45,9 +45,58 @@ static struct backing_dev_info hugetlbfs_backing_dev_info = {
|
||||
|
||||
int sysctl_hugetlb_shm_group;
|
||||
|
||||
static void huge_pagevec_release(struct pagevec *pvec)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pagevec_count(pvec); ++i)
|
||||
put_page(pvec->pages[i]);
|
||||
|
||||
pagevec_reinit(pvec);
|
||||
}
|
||||
|
||||
/*
|
||||
* huge_pages_needed tries to determine the number of new huge pages that
|
||||
* will be required to fully populate this VMA. This will be equal to
|
||||
* the size of the VMA in huge pages minus the number of huge pages
|
||||
* (covered by this VMA) that are found in the page cache.
|
||||
*
|
||||
* Result is in bytes to be compatible with is_hugepage_mem_enough()
|
||||
*/
|
||||
unsigned long
|
||||
huge_pages_needed(struct address_space *mapping, struct vm_area_struct *vma)
|
||||
{
|
||||
int i;
|
||||
struct pagevec pvec;
|
||||
unsigned long start = vma->vm_start;
|
||||
unsigned long end = vma->vm_end;
|
||||
unsigned long hugepages = (end - start) >> HPAGE_SHIFT;
|
||||
pgoff_t next = vma->vm_pgoff;
|
||||
pgoff_t endpg = next + ((end - start) >> PAGE_SHIFT);
|
||||
|
||||
pagevec_init(&pvec, 0);
|
||||
while (next < endpg) {
|
||||
if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
|
||||
break;
|
||||
for (i = 0; i < pagevec_count(&pvec); i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
if (page->index > next)
|
||||
next = page->index;
|
||||
if (page->index >= endpg)
|
||||
break;
|
||||
next++;
|
||||
hugepages--;
|
||||
}
|
||||
huge_pagevec_release(&pvec);
|
||||
}
|
||||
return hugepages << HPAGE_SHIFT;
|
||||
}
|
||||
|
||||
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct inode *inode = file->f_dentry->d_inode;
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
unsigned long bytes;
|
||||
loff_t len, vma_len;
|
||||
int ret;
|
||||
|
||||
@ -66,6 +115,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
bytes = huge_pages_needed(mapping, vma);
|
||||
if (!is_hugepage_mem_enough(bytes))
|
||||
return -ENOMEM;
|
||||
|
||||
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
|
||||
|
||||
down(&inode->i_sem);
|
||||
@ -168,16 +221,6 @@ static int hugetlbfs_commit_write(struct file *file,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void huge_pagevec_release(struct pagevec *pvec)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pagevec_count(pvec); ++i)
|
||||
put_page(pvec->pages[i]);
|
||||
|
||||
pagevec_reinit(pvec);
|
||||
}
|
||||
|
||||
static void truncate_huge_page(struct page *page)
|
||||
{
|
||||
clear_page_dirty(page);
|
||||
|
Loading…
Reference in New Issue
Block a user