aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/slub.c2
-rw-r--r--mm/vmscan.c2
6 files changed, 10 insertions, 10 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 90c13cdeefb5..6d3290cd1f6f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1301,7 +1301,7 @@ void memory_present(int nid, unsigned long start, unsigned long end);
1301 1301
1302/* 1302/*
1303 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we 1303 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
1304 * need to check pfn validility within that MAX_ORDER_NR_PAGES block. 1304 * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
1305 * pfn_valid_within() should be used in this case; we optimise this away 1305 * pfn_valid_within() should be used in this case; we optimise this away
1306 * when we have no holes within a MAX_ORDER_NR_PAGES block. 1306 * when we have no holes within a MAX_ORDER_NR_PAGES block.
1307 */ 1307 */
diff --git a/mm/migrate.c b/mm/migrate.c
index 0e9888cb33ad..5308d6abd384 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -100,7 +100,7 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
100 /* 100 /*
101 * Check PageMovable before holding a PG_lock because page's owner 101 * Check PageMovable before holding a PG_lock because page's owner
102 * assumes anybody doesn't touch PG_lock of newly allocated page 102 * assumes anybody doesn't touch PG_lock of newly allocated page
103 * so unconditionally grapping the lock ruins page's owner side. 103 * so unconditionally grabbing the lock ruins page's owner side.
104 */ 104 */
105 if (unlikely(!__PageMovable(page))) 105 if (unlikely(!__PageMovable(page)))
106 goto out_putpage; 106 goto out_putpage;
diff --git a/mm/mmap.c b/mm/mmap.c
index eccba2650ef6..41eb48d9b527 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -438,7 +438,7 @@ static void vma_gap_update(struct vm_area_struct *vma)
438{ 438{
439 /* 439 /*
440 * As it turns out, RB_DECLARE_CALLBACKS() already created a callback 440 * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
441 * function that does exacltly what we want. 441 * function that does exactly what we want.
442 */ 442 */
443 vma_gap_callbacks_propagate(&vma->vm_rb, NULL); 443 vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
444} 444}
@@ -1012,7 +1012,7 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
1012 * VM_SOFTDIRTY should not prevent from VMA merging, if we 1012 * VM_SOFTDIRTY should not prevent from VMA merging, if we
1013 * match the flags but dirty bit -- the caller should mark 1013 * match the flags but dirty bit -- the caller should mark
1014 * merged VMA as dirty. If dirty bit won't be excluded from 1014 * merged VMA as dirty. If dirty bit won't be excluded from
1015 * comparison, we increase pressue on the memory system forcing 1015 * comparison, we increase pressure on the memory system forcing
1016 * the kernel to generate new VMAs when old one could be 1016 * the kernel to generate new VMAs when old one could be
1017 * extended instead. 1017 * extended instead.
1018 */ 1018 */
@@ -1115,7 +1115,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
1115 * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN 1115 * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
1116 * might become case 1 below case 2 below case 3 below 1116 * might become case 1 below case 2 below case 3 below
1117 * 1117 *
1118 * It is important for case 8 that the the vma NNNN overlapping the 1118 * It is important for case 8 that the vma NNNN overlapping the
1119 * region AAAA is never going to extended over XXXX. Instead XXXX must 1119 * region AAAA is never going to extended over XXXX. Instead XXXX must
1120 * be extended in region AAAA and NNNN must be removed. This way in 1120 * be extended in region AAAA and NNNN must be removed. This way in
1121 * all cases where vma_merge succeeds, the moment vma_adjust drops the 1121 * all cases where vma_merge succeeds, the moment vma_adjust drops the
@@ -1645,7 +1645,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1645#endif /* __ARCH_WANT_SYS_OLD_MMAP */ 1645#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1646 1646
1647/* 1647/*
1648 * Some shared mappigns will want the pages marked read-only 1648 * Some shared mappings will want the pages marked read-only
1649 * to track write events. If so, we'll downgrade vm_page_prot 1649 * to track write events. If so, we'll downgrade vm_page_prot
1650 * to the private version (using protection_map[] without the 1650 * to the private version (using protection_map[] without the
1651 * VM_SHARED bit). 1651 * VM_SHARED bit).
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9be9a22ebe35..ec250453f5e8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7551,7 +7551,7 @@ static void __setup_per_zone_wmarks(void)
7551 * value here. 7551 * value here.
7552 * 7552 *
7553 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 7553 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
7554 * deltas control asynch page reclaim, and so should 7554 * deltas control async page reclaim, and so should
7555 * not be capped for highmem. 7555 * not be capped for highmem.
7556 */ 7556 */
7557 unsigned long min_pages; 7557 unsigned long min_pages;
@@ -8028,7 +8028,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
8028 8028
8029 /* 8029 /*
8030 * Hugepages are not in LRU lists, but they're movable. 8030 * Hugepages are not in LRU lists, but they're movable.
8031 * We need not scan over tail pages bacause we don't 8031 * We need not scan over tail pages because we don't
8032 * handle each tail page individually in migration. 8032 * handle each tail page individually in migration.
8033 */ 8033 */
8034 if (PageHuge(page)) { 8034 if (PageHuge(page)) {
diff --git a/mm/slub.c b/mm/slub.c
index d8b1eee2dd86..017a2ce5ba23 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2129,7 +2129,7 @@ redo:
2129 if (!lock) { 2129 if (!lock) {
2130 lock = 1; 2130 lock = 1;
2131 /* 2131 /*
2132 * Taking the spinlock removes the possiblity 2132 * Taking the spinlock removes the possibility
2133 * that acquire_slab() will see a slab page that 2133 * that acquire_slab() will see a slab page that
2134 * is frozen 2134 * is frozen
2135 */ 2135 */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e979705bbf32..63195364ab2e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3527,7 +3527,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
3527 * 3527 *
3528 * kswapd scans the zones in the highmem->normal->dma direction. It skips 3528 * kswapd scans the zones in the highmem->normal->dma direction. It skips
3529 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 3529 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
3530 * found to have free_pages <= high_wmark_pages(zone), any page is that zone 3530 * found to have free_pages <= high_wmark_pages(zone), any page in that zone
3531 * or lower is eligible for reclaim until at least one usable zone is 3531 * or lower is eligible for reclaim until at least one usable zone is
3532 * balanced. 3532 * balanced.
3533 */ 3533 */