diff options
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 2 | ||||
-rw-r--r-- | mm/mempool.c | 2 | ||||
-rw-r--r-- | mm/page-writeback.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 8 | ||||
-rw-r--r-- | mm/prio_tree.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 6 | ||||
-rw-r--r-- | mm/swap.c | 2 | ||||
-rw-r--r-- | mm/vmalloc.c | 6 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
11 files changed, 18 insertions, 18 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 034617f8cdb2..8b809ecefa39 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1017,7 +1017,7 @@ static long region_chg(struct list_head *head, long f, long t) | |||
1017 | 1017 | ||
1018 | /* If we are below the current region then a new region is required. | 1018 | /* If we are below the current region then a new region is required. |
1019 | * Subtle, allocate a new region at the position but make it zero | 1019 | * Subtle, allocate a new region at the position but make it zero |
1020 | * size such that we can guarentee to record the reservation. */ | 1020 | * size such that we can guarantee to record the reservation. */ |
1021 | if (&rg->link == head || t < rg->from) { | 1021 | if (&rg->link == head || t < rg->from) { |
1022 | nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); | 1022 | nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); |
1023 | if (!nrg) | 1023 | if (!nrg) |
diff --git a/mm/memory.c b/mm/memory.c index 142683df8755..eefd5b68bc42 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2713,7 +2713,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in | |||
2713 | return 0; | 2713 | return 0; |
2714 | 2714 | ||
2715 | down_read(&mm->mmap_sem); | 2715 | down_read(&mm->mmap_sem); |
2716 | /* ignore errors, just check how much was sucessfully transfered */ | 2716 | /* ignore errors, just check how much was successfully transferred */ |
2717 | while (len) { | 2717 | while (len) { |
2718 | int bytes, ret, offset; | 2718 | int bytes, ret, offset; |
2719 | void *maddr; | 2719 | void *maddr; |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 091b9c6c2529..1833879f8438 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -121,7 +121,7 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
121 | err = __add_section(zone, i << PFN_SECTION_SHIFT); | 121 | err = __add_section(zone, i << PFN_SECTION_SHIFT); |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * EEXIST is finally dealed with by ioresource collision | 124 | * EEXIST is finally dealt with by ioresource collision |
125 | * check. see add_memory() => register_memory_resource() | 125 | * check. see add_memory() => register_memory_resource() |
126 | * Warning will be printed if there is collision. | 126 | * Warning will be printed if there is collision. |
127 | */ | 127 | */ |
diff --git a/mm/mempool.c b/mm/mempool.c index 02d5ec3feabc..a46eb1b4bb66 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
@@ -299,7 +299,7 @@ EXPORT_SYMBOL(mempool_free_slab); | |||
299 | 299 | ||
300 | /* | 300 | /* |
301 | * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory | 301 | * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory |
302 | * specfied by pool_data | 302 | * specified by pool_data |
303 | */ | 303 | */ |
304 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) | 304 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) |
305 | { | 305 | { |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7845462064f4..838a5e31394c 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -989,7 +989,7 @@ int __set_page_dirty_no_writeback(struct page *page) | |||
989 | * mapping is pinned by the vma's ->vm_file reference. | 989 | * mapping is pinned by the vma's ->vm_file reference. |
990 | * | 990 | * |
991 | * We take care to handle the case where the page was truncated from the | 991 | * We take care to handle the case where the page was truncated from the |
992 | * mapping by re-checking page_mapping() insode tree_lock. | 992 | * mapping by re-checking page_mapping() inside tree_lock. |
993 | */ | 993 | */ |
994 | int __set_page_dirty_nobuffers(struct page *page) | 994 | int __set_page_dirty_nobuffers(struct page *page) |
995 | { | 995 | { |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 43f757fcf30f..da69d833e067 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -123,7 +123,7 @@ static unsigned long __meminitdata dma_reserve; | |||
123 | 123 | ||
124 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 124 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
125 | /* | 125 | /* |
126 | * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct | 126 | * MAX_ACTIVE_REGIONS determines the maximum number of distinct |
127 | * ranges of memory (RAM) that may be registered with add_active_range(). | 127 | * ranges of memory (RAM) that may be registered with add_active_range(). |
128 | * Ranges passed to add_active_range() will be merged if possible | 128 | * Ranges passed to add_active_range() will be merged if possible |
129 | * so the number of times add_active_range() can be called is | 129 | * so the number of times add_active_range() can be called is |
@@ -1260,7 +1260,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
1260 | * skip over zones that are not allowed by the cpuset, or that have | 1260 | * skip over zones that are not allowed by the cpuset, or that have |
1261 | * been recently (in last second) found to be nearly full. See further | 1261 | * been recently (in last second) found to be nearly full. See further |
1262 | * comments in mmzone.h. Reduces cache footprint of zonelist scans | 1262 | * comments in mmzone.h. Reduces cache footprint of zonelist scans |
1263 | * that have to skip over alot of full or unallowed zones. | 1263 | * that have to skip over a lot of full or unallowed zones. |
1264 | * | 1264 | * |
1265 | * If the zonelist cache is present in the passed in zonelist, then | 1265 | * If the zonelist cache is present in the passed in zonelist, then |
1266 | * returns a pointer to the allowed node mask (either the current | 1266 | * returns a pointer to the allowed node mask (either the current |
@@ -2358,7 +2358,7 @@ void build_all_zonelists(void) | |||
2358 | __build_all_zonelists(NULL); | 2358 | __build_all_zonelists(NULL); |
2359 | cpuset_init_current_mems_allowed(); | 2359 | cpuset_init_current_mems_allowed(); |
2360 | } else { | 2360 | } else { |
2361 | /* we have to stop all cpus to guaranntee there is no user | 2361 | /* we have to stop all cpus to guarantee there is no user |
2362 | of zonelist */ | 2362 | of zonelist */ |
2363 | stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); | 2363 | stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); |
2364 | /* cpuset refresh routine should be here */ | 2364 | /* cpuset refresh routine should be here */ |
@@ -2864,7 +2864,7 @@ static int __meminit first_active_region_index_in_nid(int nid) | |||
2864 | 2864 | ||
2865 | /* | 2865 | /* |
2866 | * Basic iterator support. Return the next active range of PFNs for a node | 2866 | * Basic iterator support. Return the next active range of PFNs for a node |
2867 | * Note: nid == MAX_NUMNODES returns next region regardles of node | 2867 | * Note: nid == MAX_NUMNODES returns next region regardless of node |
2868 | */ | 2868 | */ |
2869 | static int __meminit next_active_region_index_in_nid(int index, int nid) | 2869 | static int __meminit next_active_region_index_in_nid(int index, int nid) |
2870 | { | 2870 | { |
diff --git a/mm/prio_tree.c b/mm/prio_tree.c index b4e76c25f953..603ae98d9694 100644 --- a/mm/prio_tree.c +++ b/mm/prio_tree.c | |||
@@ -34,7 +34,7 @@ | |||
34 | * Radix priority search tree for address_space->i_mmap | 34 | * Radix priority search tree for address_space->i_mmap |
35 | * | 35 | * |
36 | * For each vma that map a unique set of file pages i.e., unique [radix_index, | 36 | * For each vma that map a unique set of file pages i.e., unique [radix_index, |
37 | * heap_index] value, we have a corresponing priority search tree node. If | 37 | * heap_index] value, we have a corresponding priority search tree node. If |
38 | * multiple vmas have identical [radix_index, heap_index] value, then one of | 38 | * multiple vmas have identical [radix_index, heap_index] value, then one of |
39 | * them is used as a tree node and others are stored in a vm_set list. The tree | 39 | * them is used as a tree node and others are stored in a vm_set list. The tree |
40 | * node points to the first vma (head) of the list using vm_set.head. | 40 | * node points to the first vma (head) of the list using vm_set.head. |
@@ -26,7 +26,7 @@ | |||
26 | * initialized objects. | 26 | * initialized objects. |
27 | * | 27 | * |
28 | * This means, that your constructor is used only for newly allocated | 28 | * This means, that your constructor is used only for newly allocated |
29 | * slabs and you must pass objects with the same intializations to | 29 | * slabs and you must pass objects with the same initializations to |
30 | * kmem_cache_free. | 30 | * kmem_cache_free. |
31 | * | 31 | * |
32 | * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, | 32 | * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, |
@@ -1369,7 +1369,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1369 | * structure is usually allocated from kmem_cache_create() and | 1369 | * structure is usually allocated from kmem_cache_create() and |
1370 | * gets destroyed at kmem_cache_destroy(). | 1370 | * gets destroyed at kmem_cache_destroy(). |
1371 | */ | 1371 | */ |
1372 | /* fall thru */ | 1372 | /* fall through */ |
1373 | #endif | 1373 | #endif |
1374 | case CPU_UP_CANCELED: | 1374 | case CPU_UP_CANCELED: |
1375 | case CPU_UP_CANCELED_FROZEN: | 1375 | case CPU_UP_CANCELED_FROZEN: |
@@ -3806,7 +3806,7 @@ const char *kmem_cache_name(struct kmem_cache *cachep) | |||
3806 | EXPORT_SYMBOL_GPL(kmem_cache_name); | 3806 | EXPORT_SYMBOL_GPL(kmem_cache_name); |
3807 | 3807 | ||
3808 | /* | 3808 | /* |
3809 | * This initializes kmem_list3 or resizes varioius caches for all nodes. | 3809 | * This initializes kmem_list3 or resizes various caches for all nodes. |
3810 | */ | 3810 | */ |
3811 | static int alloc_kmemlist(struct kmem_cache *cachep) | 3811 | static int alloc_kmemlist(struct kmem_cache *cachep) |
3812 | { | 3812 | { |
@@ -5,7 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * This file contains the default values for the opereation of the | 8 | * This file contains the default values for the operation of the |
9 | * Linux VM subsystem. Fine-tuning documentation can be found in | 9 | * Linux VM subsystem. Fine-tuning documentation can be found in |
10 | * Documentation/sysctl/vm.txt. | 10 | * Documentation/sysctl/vm.txt. |
11 | * Started 18.12.91 | 11 | * Started 18.12.91 |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2e01af365848..af77e171e339 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -247,7 +247,7 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | |||
247 | EXPORT_SYMBOL_GPL(__get_vm_area); | 247 | EXPORT_SYMBOL_GPL(__get_vm_area); |
248 | 248 | ||
249 | /** | 249 | /** |
250 | * get_vm_area - reserve a contingous kernel virtual area | 250 | * get_vm_area - reserve a contiguous kernel virtual area |
251 | * @size: size of the area | 251 | * @size: size of the area |
252 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC | 252 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC |
253 | * | 253 | * |
@@ -303,7 +303,7 @@ found: | |||
303 | } | 303 | } |
304 | 304 | ||
305 | /** | 305 | /** |
306 | * remove_vm_area - find and remove a contingous kernel virtual area | 306 | * remove_vm_area - find and remove a continuous kernel virtual area |
307 | * @addr: base address | 307 | * @addr: base address |
308 | * | 308 | * |
309 | * Search for the kernel VM area starting at @addr, and remove it. | 309 | * Search for the kernel VM area starting at @addr, and remove it. |
@@ -364,7 +364,7 @@ static void __vunmap(void *addr, int deallocate_pages) | |||
364 | * vfree - release memory allocated by vmalloc() | 364 | * vfree - release memory allocated by vmalloc() |
365 | * @addr: memory base address | 365 | * @addr: memory base address |
366 | * | 366 | * |
367 | * Free the virtually contiguous memory area starting at @addr, as | 367 | * Free the virtually continuous memory area starting at @addr, as |
368 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is | 368 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is |
369 | * NULL, no operation is performed. | 369 | * NULL, no operation is performed. |
370 | * | 370 | * |
diff --git a/mm/vmscan.c b/mm/vmscan.c index cb474cc99645..e5a9597e3bbc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(unregister_shrinker); | |||
141 | * percentages of the lru and ageable caches. This should balance the seeks | 141 | * percentages of the lru and ageable caches. This should balance the seeks |
142 | * generated by these structures. | 142 | * generated by these structures. |
143 | * | 143 | * |
144 | * If the vm encounted mapped pages on the LRU it increase the pressure on | 144 | * If the vm encountered mapped pages on the LRU it increase the pressure on |
145 | * slab to avoid swapping. | 145 | * slab to avoid swapping. |
146 | * | 146 | * |
147 | * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. | 147 | * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. |