diff options
author | Simon Arlott <simon@fire.lp0.eux> | 2007-10-19 19:27:18 -0400 |
---|---|---|
committer | Adrian Bunk <bunk@kernel.org> | 2007-10-19 19:27:18 -0400 |
commit | 183ff22bb6bd8188c904ebfb479656ae52230b72 (patch) | |
tree | 425207fc9cdca03df64ee8241ba764c75db4d8d1 /mm/page_alloc.c | |
parent | 676b1855de0a18100b3c340084eb8ef72bde4fb1 (diff) |
spelling fixes: mm/
Spelling fixes in mm/.
Signed-off-by: Simon Arlott <simon@fire.lp0.eu>
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 43f757fcf30f..da69d833e067 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -123,7 +123,7 @@ static unsigned long __meminitdata dma_reserve; | |||
123 | 123 | ||
124 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 124 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
125 | /* | 125 | /* |
126 | * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct | 126 | * MAX_ACTIVE_REGIONS determines the maximum number of distinct |
127 | * ranges of memory (RAM) that may be registered with add_active_range(). | 127 | * ranges of memory (RAM) that may be registered with add_active_range(). |
128 | * Ranges passed to add_active_range() will be merged if possible | 128 | * Ranges passed to add_active_range() will be merged if possible |
129 | * so the number of times add_active_range() can be called is | 129 | * so the number of times add_active_range() can be called is |
@@ -1260,7 +1260,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
1260 | * skip over zones that are not allowed by the cpuset, or that have | 1260 | * skip over zones that are not allowed by the cpuset, or that have |
1261 | * been recently (in last second) found to be nearly full. See further | 1261 | * been recently (in last second) found to be nearly full. See further |
1262 | * comments in mmzone.h. Reduces cache footprint of zonelist scans | 1262 | * comments in mmzone.h. Reduces cache footprint of zonelist scans |
1263 | * that have to skip over alot of full or unallowed zones. | 1263 | * that have to skip over a lot of full or unallowed zones. |
1264 | * | 1264 | * |
1265 | * If the zonelist cache is present in the passed in zonelist, then | 1265 | * If the zonelist cache is present in the passed in zonelist, then |
1266 | * returns a pointer to the allowed node mask (either the current | 1266 | * returns a pointer to the allowed node mask (either the current |
@@ -2358,7 +2358,7 @@ void build_all_zonelists(void) | |||
2358 | __build_all_zonelists(NULL); | 2358 | __build_all_zonelists(NULL); |
2359 | cpuset_init_current_mems_allowed(); | 2359 | cpuset_init_current_mems_allowed(); |
2360 | } else { | 2360 | } else { |
2361 | /* we have to stop all cpus to guaranntee there is no user | 2361 | /* we have to stop all cpus to guarantee there is no user |
2362 | of zonelist */ | 2362 | of zonelist */ |
2363 | stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); | 2363 | stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); |
2364 | /* cpuset refresh routine should be here */ | 2364 | /* cpuset refresh routine should be here */ |
@@ -2864,7 +2864,7 @@ static int __meminit first_active_region_index_in_nid(int nid) | |||
2864 | 2864 | ||
2865 | /* | 2865 | /* |
2866 | * Basic iterator support. Return the next active range of PFNs for a node | 2866 | * Basic iterator support. Return the next active range of PFNs for a node |
2867 | * Note: nid == MAX_NUMNODES returns next region regardles of node | 2867 | * Note: nid == MAX_NUMNODES returns next region regardless of node |
2868 | */ | 2868 | */ |
2869 | static int __meminit next_active_region_index_in_nid(int index, int nid) | 2869 | static int __meminit next_active_region_index_in_nid(int index, int nid) |
2870 | { | 2870 | { |