aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLucas De Marchi <lucas.demarchi@profusion.mobi>2011-03-30 21:57:33 -0400
committerLucas De Marchi <lucas.demarchi@profusion.mobi>2011-03-31 10:26:23 -0400
commit25985edcedea6396277003854657b5f3cb31a628 (patch)
treef026e810210a2ee7290caeb737c23cb6472b7c38 /mm
parent6aba74f2791287ec407e0f92487a725a25908067 (diff)
Fix common misspellings
Fixes generated by 'codespell' and manually reviewed. Signed-off-by: Lucas De Marchi <lucas.demarchi@profusion.mobi>
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/hugetlb.c10
-rw-r--r--mm/hwpoison-inject.c2
-rw-r--r--mm/internal.h2
-rw-r--r--mm/kmemleak.c6
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/memory-failure.c6
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/nobootmem.c2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_cgroup.c2
-rw-r--r--mm/percpu.c10
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slub.c8
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmscan.c4
19 files changed, 40 insertions, 40 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 0d9a036ada66..befc87531e4f 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -787,7 +787,7 @@ EXPORT_SYMBOL(congestion_wait);
787 * jiffies for either a BDI to exit congestion of the given @sync queue 787 * jiffies for either a BDI to exit congestion of the given @sync queue
788 * or a write to complete. 788 * or a write to complete.
789 * 789 *
790 * In the absense of zone congestion, cond_resched() is called to yield 790 * In the absence of zone congestion, cond_resched() is called to yield
791 * the processor if necessary but otherwise does not sleep. 791 * the processor if necessary but otherwise does not sleep.
792 * 792 *
793 * The return value is 0 if the sleep is for the full timeout. Otherwise, 793 * The return value is 0 if the sleep is for the full timeout. Otherwise,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06de5aa4d644..8ee3bd8ec5b5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -146,7 +146,7 @@ static long region_chg(struct list_head *head, long f, long t)
146 if (rg->from > t) 146 if (rg->from > t)
147 return chg; 147 return chg;
148 148
149 /* We overlap with this area, if it extends futher than 149 /* We overlap with this area, if it extends further than
150 * us then we must extend ourselves. Account for its 150 * us then we must extend ourselves. Account for its
151 * existing reservation. */ 151 * existing reservation. */
152 if (rg->to > t) { 152 if (rg->to > t) {
@@ -842,7 +842,7 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
842} 842}
843 843
844/* 844/*
845 * Increase the hugetlb pool such that it can accomodate a reservation 845 * Increase the hugetlb pool such that it can accommodate a reservation
846 * of size 'delta'. 846 * of size 'delta'.
847 */ 847 */
848static int gather_surplus_pages(struct hstate *h, int delta) 848static int gather_surplus_pages(struct hstate *h, int delta)
@@ -890,7 +890,7 @@ retry:
890 890
891 /* 891 /*
892 * The surplus_list now contains _at_least_ the number of extra pages 892 * The surplus_list now contains _at_least_ the number of extra pages
893 * needed to accomodate the reservation. Add the appropriate number 893 * needed to accommodate the reservation. Add the appropriate number
894 * of pages to the hugetlb pool and free the extras back to the buddy 894 * of pages to the hugetlb pool and free the extras back to the buddy
895 * allocator. Commit the entire reservation here to prevent another 895 * allocator. Commit the entire reservation here to prevent another
896 * process from stealing the pages as they are added to the pool but 896 * process from stealing the pages as they are added to the pool but
@@ -2043,7 +2043,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2043 * This new VMA should share its siblings reservation map if present. 2043 * This new VMA should share its siblings reservation map if present.
2044 * The VMA will only ever have a valid reservation map pointer where 2044 * The VMA will only ever have a valid reservation map pointer where
2045 * it is being copied for another still existing VMA. As that VMA 2045 * it is being copied for another still existing VMA. As that VMA
2046 * has a reference to the reservation map it cannot dissappear until 2046 * has a reference to the reservation map it cannot disappear until
2047 * after this open call completes. It is therefore safe to take a 2047 * after this open call completes. It is therefore safe to take a
2048 * new reference here without additional locking. 2048 * new reference here without additional locking.
2049 */ 2049 */
@@ -2490,7 +2490,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2490 /* 2490 /*
2491 * Currently, we are forced to kill the process in the event the 2491 * Currently, we are forced to kill the process in the event the
2492 * original mapper has unmapped pages from the child due to a failed 2492 * original mapper has unmapped pages from the child due to a failed
2493 * COW. Warn that such a situation has occured as it may not be obvious 2493 * COW. Warn that such a situation has occurred as it may not be obvious
2494 */ 2494 */
2495 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 2495 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2496 printk(KERN_WARNING 2496 printk(KERN_WARNING
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 0948f1072d6b..c7fc7fd00e32 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -1,4 +1,4 @@
1/* Inject a hwpoison memory failure on a arbitary pfn */ 1/* Inject a hwpoison memory failure on a arbitrary pfn */
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/debugfs.h> 3#include <linux/debugfs.h>
4#include <linux/kernel.h> 4#include <linux/kernel.h>
diff --git a/mm/internal.h b/mm/internal.h
index 3438dd43a062..9d0ced8e505e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -162,7 +162,7 @@ static inline struct page *mem_map_offset(struct page *base, int offset)
162} 162}
163 163
164/* 164/*
165 * Iterator over all subpages withing the maximally aligned gigantic 165 * Iterator over all subpages within the maximally aligned gigantic
166 * page 'base'. Handle any discontiguity in the mem_map. 166 * page 'base'. Handle any discontiguity in the mem_map.
167 */ 167 */
168static inline struct page *mem_map_next(struct page *iter, 168static inline struct page *mem_map_next(struct page *iter,
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 84225f3b7190..c1d5867543e4 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -265,7 +265,7 @@ static void kmemleak_disable(void);
265} while (0) 265} while (0)
266 266
267/* 267/*
268 * Macro invoked when a serious kmemleak condition occured and cannot be 268 * Macro invoked when a serious kmemleak condition occurred and cannot be
269 * recovered from. Kmemleak will be disabled and further allocation/freeing 269 * recovered from. Kmemleak will be disabled and further allocation/freeing
270 * tracing no longer available. 270 * tracing no longer available.
271 */ 271 */
@@ -1006,7 +1006,7 @@ static bool update_checksum(struct kmemleak_object *object)
1006 1006
1007/* 1007/*
1008 * Memory scanning is a long process and it needs to be interruptable. This 1008 * Memory scanning is a long process and it needs to be interruptable. This
1009 * function checks whether such interrupt condition occured. 1009 * function checks whether such interrupt condition occurred.
1010 */ 1010 */
1011static int scan_should_stop(void) 1011static int scan_should_stop(void)
1012{ 1012{
@@ -1733,7 +1733,7 @@ static int __init kmemleak_late_init(void)
1733 1733
1734 if (atomic_read(&kmemleak_error)) { 1734 if (atomic_read(&kmemleak_error)) {
1735 /* 1735 /*
1736 * Some error occured and kmemleak was disabled. There is a 1736 * Some error occurred and kmemleak was disabled. There is a
1737 * small chance that kmemleak_disable() was called immediately 1737 * small chance that kmemleak_disable() was called immediately
1738 * after setting kmemleak_initialized and we may end up with 1738 * after setting kmemleak_initialized and we may end up with
1739 * two clean-up threads but serialized by scan_mutex. 1739 * two clean-up threads but serialized by scan_mutex.
diff --git a/mm/ksm.c b/mm/ksm.c
index 1bbe785aa559..942dfc73a2ff 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -720,7 +720,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
720 swapped = PageSwapCache(page); 720 swapped = PageSwapCache(page);
721 flush_cache_page(vma, addr, page_to_pfn(page)); 721 flush_cache_page(vma, addr, page_to_pfn(page));
722 /* 722 /*
723 * Ok this is tricky, when get_user_pages_fast() run it doesnt 723 * Ok this is tricky, when get_user_pages_fast() run it doesn't
724 * take any lock, therefore the check that we are going to make 724 * take any lock, therefore the check that we are going to make
725 * with the pagecount against the mapcount is racey and 725 * with the pagecount against the mapcount is racey and
726 * O_DIRECT can happen right after the check. 726 * O_DIRECT can happen right after the check.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1f0b460fe58c..010f9166fa6e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1466,7 +1466,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1466 break; 1466 break;
1467 } 1467 }
1468 /* 1468 /*
1469 * We want to do more targetted reclaim. 1469 * We want to do more targeted reclaim.
1470 * excess >> 2 is not to excessive so as to 1470 * excess >> 2 is not to excessive so as to
1471 * reclaim too much, nor too less that we keep 1471 * reclaim too much, nor too less that we keep
1472 * coming back to reclaim from this cgroup 1472 * coming back to reclaim from this cgroup
@@ -2265,7 +2265,7 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2265 * - compound_lock is held when nr_pages > 1 2265 * - compound_lock is held when nr_pages > 1
2266 * 2266 *
2267 * This function doesn't do "charge" nor css_get to new cgroup. It should be 2267 * This function doesn't do "charge" nor css_get to new cgroup. It should be
2268 * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is 2268 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
2269 * true, this function does "uncharge" from old cgroup, but it doesn't if 2269 * true, this function does "uncharge" from old cgroup, but it doesn't if
2270 * @uncharge is false, so a caller should do "uncharge". 2270 * @uncharge is false, so a caller should do "uncharge".
2271 */ 2271 */
@@ -2318,7 +2318,7 @@ static int mem_cgroup_move_account(struct page *page,
2318 * We charges against "to" which may not have any tasks. Then, "to" 2318 * We charges against "to" which may not have any tasks. Then, "to"
2319 * can be under rmdir(). But in current implementation, caller of 2319 * can be under rmdir(). But in current implementation, caller of
2320 * this function is just force_empty() and move charge, so it's 2320 * this function is just force_empty() and move charge, so it's
2321 * garanteed that "to" is never removed. So, we don't check rmdir 2321 * guaranteed that "to" is never removed. So, we don't check rmdir
2322 * status here. 2322 * status here.
2323 */ 2323 */
2324 move_unlock_page_cgroup(pc, &flags); 2324 move_unlock_page_cgroup(pc, &flags);
@@ -2648,7 +2648,7 @@ static void mem_cgroup_do_uncharge(struct mem_cgroup *mem,
2648 batch->memcg = mem; 2648 batch->memcg = mem;
2649 /* 2649 /*
2650 * do_batch > 0 when unmapping pages or inode invalidate/truncate. 2650 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2651 * In those cases, all pages freed continously can be expected to be in 2651 * In those cases, all pages freed continuously can be expected to be in
2652 * the same cgroup and we have chance to coalesce uncharges. 2652 * the same cgroup and we have chance to coalesce uncharges.
2653 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) 2653 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2654 * because we want to do uncharge as soon as possible. 2654 * because we want to do uncharge as soon as possible.
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 37feb9fec228..2b9a5eef39e0 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -208,7 +208,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
208 * Don't use force here, it's convenient if the signal 208 * Don't use force here, it's convenient if the signal
209 * can be temporarily blocked. 209 * can be temporarily blocked.
210 * This could cause a loop when the user sets SIGBUS 210 * This could cause a loop when the user sets SIGBUS
211 * to SIG_IGN, but hopefully noone will do that? 211 * to SIG_IGN, but hopefully no one will do that?
212 */ 212 */
213 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ 213 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
214 if (ret < 0) 214 if (ret < 0)
@@ -634,7 +634,7 @@ static int me_pagecache_dirty(struct page *p, unsigned long pfn)
634 * when the page is reread or dropped. If an 634 * when the page is reread or dropped. If an
635 * application assumes it will always get error on 635 * application assumes it will always get error on
636 * fsync, but does other operations on the fd before 636 * fsync, but does other operations on the fd before
637 * and the page is dropped inbetween then the error 637 * and the page is dropped between then the error
638 * will not be properly reported. 638 * will not be properly reported.
639 * 639 *
640 * This can already happen even without hwpoisoned 640 * This can already happen even without hwpoisoned
@@ -728,7 +728,7 @@ static int me_huge_page(struct page *p, unsigned long pfn)
728 * The table matches them in order and calls the right handler. 728 * The table matches them in order and calls the right handler.
729 * 729 *
730 * This is quite tricky because we can access page at any time 730 * This is quite tricky because we can access page at any time
731 * in its live cycle, so all accesses have to be extremly careful. 731 * in its live cycle, so all accesses have to be extremely careful.
732 * 732 *
733 * This is not complete. More states could be added. 733 * This is not complete. More states could be added.
734 * For any missing state don't attempt recovery. 734 * For any missing state don't attempt recovery.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 321fc7455df7..a2acaf820fe5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -724,7 +724,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
724 pfn); 724 pfn);
725 dump_page(page); 725 dump_page(page);
726#endif 726#endif
727 /* Becasue we don't have big zone->lock. we should 727 /* Because we don't have big zone->lock. we should
728 check this again here. */ 728 check this again here. */
729 if (page_count(page)) { 729 if (page_count(page)) {
730 not_managed++; 730 not_managed++;
diff --git a/mm/migrate.c b/mm/migrate.c
index b0406d739ea7..34132f8e9109 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -375,7 +375,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
375 * redo the accounting that clear_page_dirty_for_io undid, 375 * redo the accounting that clear_page_dirty_for_io undid,
376 * but we can't use set_page_dirty because that function 376 * but we can't use set_page_dirty because that function
377 * is actually a signal that all of the page has become dirty. 377 * is actually a signal that all of the page has become dirty.
378 * Wheras only part of our page may be dirty. 378 * Whereas only part of our page may be dirty.
379 */ 379 */
380 __set_page_dirty_nobuffers(newpage); 380 __set_page_dirty_nobuffers(newpage);
381 } 381 }
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index e99f6cd1da1f..9109049f0bbc 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -150,7 +150,7 @@ unsigned long __init free_all_bootmem(void)
150{ 150{
151 /* 151 /*
152 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id 152 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
153 * because in some case like Node0 doesnt have RAM installed 153 * because in some case like Node0 doesn't have RAM installed
154 * low ram will be on Node1 154 * low ram will be on Node1
155 * Use MAX_NUMNODES will make sure all ranges in early_node_map[] 155 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
156 * will be used instead of only Node0 related 156 * will be used instead of only Node0 related
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d6e7ba7373be..2747f5e5abc1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -942,7 +942,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
942 * If breaking a large block of pages, move all free 942 * If breaking a large block of pages, move all free
943 * pages to the preferred allocation list. If falling 943 * pages to the preferred allocation list. If falling
944 * back for a reclaimable kernel allocation, be more 944 * back for a reclaimable kernel allocation, be more
945 * agressive about taking ownership of free pages 945 * aggressive about taking ownership of free pages
946 */ 946 */
947 if (unlikely(current_order >= (pageblock_order >> 1)) || 947 if (unlikely(current_order >= (pageblock_order >> 1)) ||
948 start_migratetype == MIGRATE_RECLAIMABLE || 948 start_migratetype == MIGRATE_RECLAIMABLE ||
@@ -3926,7 +3926,7 @@ static void __init find_usable_zone_for_movable(void)
3926 3926
3927/* 3927/*
3928 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 3928 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3929 * because it is sized independant of architecture. Unlike the other zones, 3929 * because it is sized independent of architecture. Unlike the other zones,
3930 * the starting point for ZONE_MOVABLE is not fixed. It may be different 3930 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3931 * in each node depending on the size of each node and how evenly kernelcore 3931 * in each node depending on the size of each node and how evenly kernelcore
3932 * is distributed. This helper function adjusts the zone ranges 3932 * is distributed. This helper function adjusts the zone ranges
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index a12cc3fa9859..99055010cece 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -377,7 +377,7 @@ not_enough_page:
377 * @new: new id 377 * @new: new id
378 * 378 *
379 * Returns old id at success, 0 at failure. 379 * Returns old id at success, 0 at failure.
380 * (There is no mem_cgroup useing 0 as its id) 380 * (There is no mem_cgroup using 0 as its id)
381 */ 381 */
382unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, 382unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
383 unsigned short old, unsigned short new) 383 unsigned short old, unsigned short new)
diff --git a/mm/percpu.c b/mm/percpu.c
index 55d4d113fbd3..a160db39b810 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -342,7 +342,7 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
342 * @chunk: chunk of interest 342 * @chunk: chunk of interest
343 * 343 *
344 * Determine whether area map of @chunk needs to be extended to 344 * Determine whether area map of @chunk needs to be extended to
345 * accomodate a new allocation. 345 * accommodate a new allocation.
346 * 346 *
347 * CONTEXT: 347 * CONTEXT:
348 * pcpu_lock. 348 * pcpu_lock.
@@ -431,7 +431,7 @@ out_unlock:
431 * depending on @head, is reduced by @tail bytes and @tail byte block 431 * depending on @head, is reduced by @tail bytes and @tail byte block
432 * is inserted after the target block. 432 * is inserted after the target block.
433 * 433 *
434 * @chunk->map must have enough free slots to accomodate the split. 434 * @chunk->map must have enough free slots to accommodate the split.
435 * 435 *
436 * CONTEXT: 436 * CONTEXT:
437 * pcpu_lock. 437 * pcpu_lock.
@@ -1435,7 +1435,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1435 /* 1435 /*
1436 * Determine min_unit_size, alloc_size and max_upa such that 1436 * Determine min_unit_size, alloc_size and max_upa such that
1437 * alloc_size is multiple of atom_size and is the smallest 1437 * alloc_size is multiple of atom_size and is the smallest
1438 * which can accomodate 4k aligned segments which are equal to 1438 * which can accommodate 4k aligned segments which are equal to
1439 * or larger than min_unit_size. 1439 * or larger than min_unit_size.
1440 */ 1440 */
1441 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1441 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
@@ -1550,7 +1550,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1550 * @atom_size: allocation atom size 1550 * @atom_size: allocation atom size
1551 * @cpu_distance_fn: callback to determine distance between cpus, optional 1551 * @cpu_distance_fn: callback to determine distance between cpus, optional
1552 * @alloc_fn: function to allocate percpu page 1552 * @alloc_fn: function to allocate percpu page
1553 * @free_fn: funtion to free percpu page 1553 * @free_fn: function to free percpu page
1554 * 1554 *
1555 * This is a helper to ease setting up embedded first percpu chunk and 1555 * This is a helper to ease setting up embedded first percpu chunk and
1556 * can be called where pcpu_setup_first_chunk() is expected. 1556 * can be called where pcpu_setup_first_chunk() is expected.
@@ -1678,7 +1678,7 @@ out_free:
1678 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 1678 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1679 * @reserved_size: the size of reserved percpu area in bytes 1679 * @reserved_size: the size of reserved percpu area in bytes
1680 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 1680 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1681 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE 1681 * @free_fn: function to free percpu page, always called with PAGE_SIZE
1682 * @populate_pte_fn: function to populate pte 1682 * @populate_pte_fn: function to populate pte
1683 * 1683 *
1684 * This is a helper to ease setting up page-remapped first percpu 1684 * This is a helper to ease setting up page-remapped first percpu
diff --git a/mm/slab.c b/mm/slab.c
index 568803f121a8..46a9c163a92f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -878,7 +878,7 @@ static struct array_cache *alloc_arraycache(int node, int entries,
878 nc = kmalloc_node(memsize, gfp, node); 878 nc = kmalloc_node(memsize, gfp, node);
879 /* 879 /*
880 * The array_cache structures contain pointers to free object. 880 * The array_cache structures contain pointers to free object.
881 * However, when such objects are allocated or transfered to another 881 * However, when such objects are allocated or transferred to another
882 * cache the pointers are not cleared and they could be counted as 882 * cache the pointers are not cleared and they could be counted as
883 * valid references during a kmemleak scan. Therefore, kmemleak must 883 * valid references during a kmemleak scan. Therefore, kmemleak must
884 * not scan such objects. 884 * not scan such objects.
@@ -2606,7 +2606,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
2606 * 2606 *
2607 * The cache must be empty before calling this function. 2607 * The cache must be empty before calling this function.
2608 * 2608 *
2609 * The caller must guarantee that noone will allocate memory from the cache 2609 * The caller must guarantee that no one will allocate memory from the cache
2610 * during the kmem_cache_destroy(). 2610 * during the kmem_cache_destroy().
2611 */ 2611 */
2612void kmem_cache_destroy(struct kmem_cache *cachep) 2612void kmem_cache_destroy(struct kmem_cache *cachep)
diff --git a/mm/slub.c b/mm/slub.c
index f881874843a5..94d2a33a866e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -64,7 +64,7 @@
64 * we must stay away from it for a while since we may cause a bouncing 64 * we must stay away from it for a while since we may cause a bouncing
65 * cacheline if we try to acquire the lock. So go onto the next slab. 65 * cacheline if we try to acquire the lock. So go onto the next slab.
66 * If all pages are busy then we may allocate a new slab instead of reusing 66 * If all pages are busy then we may allocate a new slab instead of reusing
67 * a partial slab. A new slab has noone operating on it and thus there is 67 * a partial slab. A new slab has no one operating on it and thus there is
68 * no danger of cacheline contention. 68 * no danger of cacheline contention.
69 * 69 *
70 * Interrupts are disabled during allocation and deallocation in order to 70 * Interrupts are disabled during allocation and deallocation in order to
@@ -1929,7 +1929,7 @@ redo:
1929 else { 1929 else {
1930#ifdef CONFIG_CMPXCHG_LOCAL 1930#ifdef CONFIG_CMPXCHG_LOCAL
1931 /* 1931 /*
1932 * The cmpxchg will only match if there was no additonal 1932 * The cmpxchg will only match if there was no additional
1933 * operation and if we are on the right processor. 1933 * operation and if we are on the right processor.
1934 * 1934 *
1935 * The cmpxchg does the following atomically (without lock semantics!) 1935 * The cmpxchg does the following atomically (without lock semantics!)
@@ -3547,7 +3547,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3547 3547
3548 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); 3548 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
3549 3549
3550 /* Honor the call site pointer we recieved. */ 3550 /* Honor the call site pointer we received. */
3551 trace_kmalloc(caller, ret, size, s->size, gfpflags); 3551 trace_kmalloc(caller, ret, size, s->size, gfpflags);
3552 3552
3553 return ret; 3553 return ret;
@@ -3577,7 +3577,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3577 3577
3578 ret = slab_alloc(s, gfpflags, node, caller); 3578 ret = slab_alloc(s, gfpflags, node, caller);
3579 3579
3580 /* Honor the call site pointer we recieved. */ 3580 /* Honor the call site pointer we received. */
3581 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 3581 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3582 3582
3583 return ret; 3583 return ret;
diff --git a/mm/sparse.c b/mm/sparse.c
index 93250207c5cf..aa64b12831a2 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -500,7 +500,7 @@ void __init sparse_init(void)
500 * so alloc 2M (with 2M align) and 24 bytes in turn will 500 * so alloc 2M (with 2M align) and 24 bytes in turn will
501 * make next 2M slip to one more 2M later. 501 * make next 2M slip to one more 2M later.
502 * then in big system, the memory will have a lot of holes... 502 * then in big system, the memory will have a lot of holes...
503 * here try to allocate 2M pages continously. 503 * here try to allocate 2M pages continuously.
504 * 504 *
505 * powerpc need to call sparse_init_one_section right after each 505 * powerpc need to call sparse_init_one_section right after each
506 * sparse_early_mem_map_alloc, so allocate usemap_map at first. 506 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
diff --git a/mm/util.c b/mm/util.c
index f126975ef23e..e7b103a6fd21 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -227,7 +227,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
227/* 227/*
228 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall 228 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
229 * back to the regular GUP. 229 * back to the regular GUP.
230 * If the architecture not support this fucntion, simply return with no 230 * If the architecture not support this function, simply return with no
231 * page pinned 231 * page pinned
232 */ 232 */
233int __attribute__((weak)) __get_user_pages_fast(unsigned long start, 233int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f73b8657c2d0..c7f5a6d4b75b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1065,7 +1065,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1065 * surrounding the tag page. Only take those pages of 1065 * surrounding the tag page. Only take those pages of
1066 * the same active state as that tag page. We may safely 1066 * the same active state as that tag page. We may safely
1067 * round the target page pfn down to the requested order 1067 * round the target page pfn down to the requested order
1068 * as the mem_map is guarenteed valid out to MAX_ORDER, 1068 * as the mem_map is guaranteed valid out to MAX_ORDER,
1069 * where that page is in a different zone we will detect 1069 * where that page is in a different zone we will detect
1070 * it from its zone id and abort this block scan. 1070 * it from its zone id and abort this block scan.
1071 */ 1071 */
@@ -2224,7 +2224,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2224 * o a 16M DMA zone that is balanced will not balance a zone on any 2224 * o a 16M DMA zone that is balanced will not balance a zone on any
2225 * reasonable sized machine 2225 * reasonable sized machine
2226 * o On all other machines, the top zone must be at least a reasonable 2226 * o On all other machines, the top zone must be at least a reasonable
2227 * precentage of the middle zones. For example, on 32-bit x86, highmem 2227 * percentage of the middle zones. For example, on 32-bit x86, highmem
2228 * would need to be at least 256M for it to be balance a whole node. 2228 * would need to be at least 256M for it to be balance a whole node.
2229 * Similarly, on x86-64 the Normal zone would need to be at least 1G 2229 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2230 * to balance a node on its own. These seemed like reasonable ratios. 2230 * to balance a node on its own. These seemed like reasonable ratios.