hugetlb, rmap: fix confusing page locking in hugetlb_cow()

The "if (!trylock_page)" block in the avoidcopy path of hugetlb_cow()
looks confusing and is buggy.  Originally this trylock_page() was
intended to make sure that old_page is locked even when old_page !=
pagecache_page, because then only pagecache_page is locked.

This patch fixes it by moving page locking into hugetlb_fault().

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Naoya Horiguchi 2010-09-10 13:23:04 +09:00 committed by Linus Torvalds
parent cd67f0d2a9
commit 56c9cfb13c

View File

@ -2324,11 +2324,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
* and just make the page writable */
avoidcopy = (page_mapcount(old_page) == 1);
if (avoidcopy) {
if (!trylock_page(old_page)) {
if (PageAnon(old_page))
page_move_anon_rmap(old_page, vma, address);
} else
unlock_page(old_page);
if (PageAnon(old_page))
page_move_anon_rmap(old_page, vma, address);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vma, address);
}
if (!pagecache_page) {
page = pte_page(entry);
/*
* hugetlb_cow() requires page locks of pte_page(entry) and
* pagecache_page, so here we need take the former one
* when page != pagecache_page or !pagecache_page.
* Note that locking order is always pagecache_page -> page,
* so no worry about deadlock.
*/
page = pte_page(entry);
if (page != pagecache_page)
lock_page(page);
}
spin_lock(&mm->page_table_lock);
/* Check for a racing update before calling hugetlb_cow */
@ -2661,9 +2664,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (pagecache_page) {
unlock_page(pagecache_page);
put_page(pagecache_page);
} else {
unlock_page(page);
}
unlock_page(page);
out_mutex:
mutex_unlock(&hugetlb_instantiation_mutex);