mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-16 05:17:43 +07:00
mm: fix some typos in mm directory
No functional change. Link: http://lkml.kernel.org/r/20190118235123.27843-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Reviewed-by: Pekka Enberg <penberg@kernel.org> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8aa49762db
commit
8bb4e7a2ee
@ -1301,7 +1301,7 @@ void memory_present(int nid, unsigned long start, unsigned long end);
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
|
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
|
||||||
* need to check pfn validility within that MAX_ORDER_NR_PAGES block.
|
* need to check pfn validity within that MAX_ORDER_NR_PAGES block.
|
||||||
* pfn_valid_within() should be used in this case; we optimise this away
|
* pfn_valid_within() should be used in this case; we optimise this away
|
||||||
* when we have no holes within a MAX_ORDER_NR_PAGES block.
|
* when we have no holes within a MAX_ORDER_NR_PAGES block.
|
||||||
*/
|
*/
|
||||||
|
@ -100,7 +100,7 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
|
|||||||
/*
|
/*
|
||||||
* Check PageMovable before holding a PG_lock because page's owner
|
* Check PageMovable before holding a PG_lock because page's owner
|
||||||
* assumes anybody doesn't touch PG_lock of newly allocated page
|
* assumes anybody doesn't touch PG_lock of newly allocated page
|
||||||
* so unconditionally grapping the lock ruins page's owner side.
|
* so unconditionally grabbing the lock ruins page's owner side.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!__PageMovable(page)))
|
if (unlikely(!__PageMovable(page)))
|
||||||
goto out_putpage;
|
goto out_putpage;
|
||||||
|
@ -438,7 +438,7 @@ static void vma_gap_update(struct vm_area_struct *vma)
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* As it turns out, RB_DECLARE_CALLBACKS() already created a callback
|
* As it turns out, RB_DECLARE_CALLBACKS() already created a callback
|
||||||
* function that does exacltly what we want.
|
* function that does exactly what we want.
|
||||||
*/
|
*/
|
||||||
vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
|
vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
|
||||||
}
|
}
|
||||||
@ -1012,7 +1012,7 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
|
|||||||
* VM_SOFTDIRTY should not prevent from VMA merging, if we
|
* VM_SOFTDIRTY should not prevent from VMA merging, if we
|
||||||
* match the flags but dirty bit -- the caller should mark
|
* match the flags but dirty bit -- the caller should mark
|
||||||
* merged VMA as dirty. If dirty bit won't be excluded from
|
* merged VMA as dirty. If dirty bit won't be excluded from
|
||||||
* comparison, we increase pressue on the memory system forcing
|
* comparison, we increase pressure on the memory system forcing
|
||||||
* the kernel to generate new VMAs when old one could be
|
* the kernel to generate new VMAs when old one could be
|
||||||
* extended instead.
|
* extended instead.
|
||||||
*/
|
*/
|
||||||
@ -1115,7 +1115,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
|
|||||||
* PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
|
* PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
|
||||||
* might become case 1 below case 2 below case 3 below
|
* might become case 1 below case 2 below case 3 below
|
||||||
*
|
*
|
||||||
* It is important for case 8 that the the vma NNNN overlapping the
|
* It is important for case 8 that the vma NNNN overlapping the
|
||||||
* region AAAA is never going to extended over XXXX. Instead XXXX must
|
* region AAAA is never going to extended over XXXX. Instead XXXX must
|
||||||
* be extended in region AAAA and NNNN must be removed. This way in
|
* be extended in region AAAA and NNNN must be removed. This way in
|
||||||
* all cases where vma_merge succeeds, the moment vma_adjust drops the
|
* all cases where vma_merge succeeds, the moment vma_adjust drops the
|
||||||
@ -1645,7 +1645,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
|
|||||||
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
|
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some shared mappigns will want the pages marked read-only
|
* Some shared mappings will want the pages marked read-only
|
||||||
* to track write events. If so, we'll downgrade vm_page_prot
|
* to track write events. If so, we'll downgrade vm_page_prot
|
||||||
* to the private version (using protection_map[] without the
|
* to the private version (using protection_map[] without the
|
||||||
* VM_SHARED bit).
|
* VM_SHARED bit).
|
||||||
|
@ -7551,7 +7551,7 @@ static void __setup_per_zone_wmarks(void)
|
|||||||
* value here.
|
* value here.
|
||||||
*
|
*
|
||||||
* The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
|
* The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
|
||||||
* deltas control asynch page reclaim, and so should
|
* deltas control async page reclaim, and so should
|
||||||
* not be capped for highmem.
|
* not be capped for highmem.
|
||||||
*/
|
*/
|
||||||
unsigned long min_pages;
|
unsigned long min_pages;
|
||||||
@ -8028,7 +8028,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Hugepages are not in LRU lists, but they're movable.
|
* Hugepages are not in LRU lists, but they're movable.
|
||||||
* We need not scan over tail pages bacause we don't
|
* We need not scan over tail pages because we don't
|
||||||
* handle each tail page individually in migration.
|
* handle each tail page individually in migration.
|
||||||
*/
|
*/
|
||||||
if (PageHuge(page)) {
|
if (PageHuge(page)) {
|
||||||
|
@ -2129,7 +2129,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
|
|||||||
if (!lock) {
|
if (!lock) {
|
||||||
lock = 1;
|
lock = 1;
|
||||||
/*
|
/*
|
||||||
* Taking the spinlock removes the possiblity
|
* Taking the spinlock removes the possibility
|
||||||
* that acquire_slab() will see a slab page that
|
* that acquire_slab() will see a slab page that
|
||||||
* is frozen
|
* is frozen
|
||||||
*/
|
*/
|
||||||
|
@ -3527,7 +3527,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
|
|||||||
*
|
*
|
||||||
* kswapd scans the zones in the highmem->normal->dma direction. It skips
|
* kswapd scans the zones in the highmem->normal->dma direction. It skips
|
||||||
* zones which have free_pages > high_wmark_pages(zone), but once a zone is
|
* zones which have free_pages > high_wmark_pages(zone), but once a zone is
|
||||||
* found to have free_pages <= high_wmark_pages(zone), any page is that zone
|
* found to have free_pages <= high_wmark_pages(zone), any page in that zone
|
||||||
* or lower is eligible for reclaim until at least one usable zone is
|
* or lower is eligible for reclaim until at least one usable zone is
|
||||||
* balanced.
|
* balanced.
|
||||||
*/
|
*/
|
||||||
|
Loading…
Reference in New Issue
Block a user