diff options
author | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-06-10 14:47:26 -0400 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-06-10 14:47:26 -0400 |
commit | f0cd91a68acdc9b49d7f6738b514a426da627649 (patch) | |
tree | 8ad73564015794197583b094217ae0a71e71e753 /mm | |
parent | 60eef25701d25e99c991dd0f4a9f3832a0c3ad3e (diff) | |
parent | 128e6ced247cda88f96fa9f2e4ba8b2c4a681560 (diff) |
Merge ../linux-2.6
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 32 | ||||
-rw-r--r-- | mm/madvise.c | 3 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 14 | ||||
-rw-r--r-- | mm/mempolicy.c | 1 | ||||
-rw-r--r-- | mm/migrate.c | 11 | ||||
-rw-r--r-- | mm/oom_kill.c | 71 | ||||
-rw-r--r-- | mm/page_alloc.c | 42 | ||||
-rw-r--r-- | mm/shmem.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 51 | ||||
-rw-r--r-- | mm/slob.c | 10 | ||||
-rw-r--r-- | mm/sparse.c | 16 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
12 files changed, 170 insertions, 86 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 3ef20739e725..fd57442186cb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, | |||
697 | return ret; | 697 | return ret; |
698 | } | 698 | } |
699 | 699 | ||
700 | /** | ||
701 | * find_get_pages_contig - gang contiguous pagecache lookup | ||
702 | * @mapping: The address_space to search | ||
703 | * @index: The starting page index | ||
704 | * @nr_pages: The maximum number of pages | ||
705 | * @pages: Where the resulting pages are placed | ||
706 | * | ||
707 | * find_get_pages_contig() works exactly like find_get_pages(), except | ||
708 | * that the returned number of pages are guaranteed to be contiguous. | ||
709 | * | ||
710 | * find_get_pages_contig() returns the number of pages which were found. | ||
711 | */ | ||
712 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, | ||
713 | unsigned int nr_pages, struct page **pages) | ||
714 | { | ||
715 | unsigned int i; | ||
716 | unsigned int ret; | ||
717 | |||
718 | read_lock_irq(&mapping->tree_lock); | ||
719 | ret = radix_tree_gang_lookup(&mapping->page_tree, | ||
720 | (void **)pages, index, nr_pages); | ||
721 | for (i = 0; i < ret; i++) { | ||
722 | if (pages[i]->mapping == NULL || pages[i]->index != index) | ||
723 | break; | ||
724 | |||
725 | page_cache_get(pages[i]); | ||
726 | index++; | ||
727 | } | ||
728 | read_unlock_irq(&mapping->tree_lock); | ||
729 | return i; | ||
730 | } | ||
731 | |||
700 | /* | 732 | /* |
701 | * Like find_get_pages, except we only return pages which are tagged with | 733 | * Like find_get_pages, except we only return pages which are tagged with |
702 | * `tag'. We update *index to index the next page for the traversal. | 734 | * `tag'. We update *index to index the next page for the traversal. |
diff --git a/mm/madvise.c b/mm/madvise.c index af3d573b0141..4e196155a0c3 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -168,6 +168,9 @@ static long madvise_remove(struct vm_area_struct *vma, | |||
168 | return -EINVAL; | 168 | return -EINVAL; |
169 | } | 169 | } |
170 | 170 | ||
171 | if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) | ||
172 | return -EACCES; | ||
173 | |||
171 | mapping = vma->vm_file->f_mapping; | 174 | mapping = vma->vm_file->f_mapping; |
172 | 175 | ||
173 | offset = (loff_t)(start - vma->vm_start) | 176 | offset = (loff_t)(start - vma->vm_start) |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 1fe76d963ac2..70df5c0d957e 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -69,12 +69,16 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
69 | for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { | 69 | for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { |
70 | err = __add_section(zone, phys_start_pfn + i); | 70 | err = __add_section(zone, phys_start_pfn + i); |
71 | 71 | ||
72 | if (err) | 72 | /* We want to keep adding the rest of the |
73 | * sections if the first ones already exist | ||
74 | */ | ||
75 | if (err && (err != -EEXIST)) | ||
73 | break; | 76 | break; |
74 | } | 77 | } |
75 | 78 | ||
76 | return err; | 79 | return err; |
77 | } | 80 | } |
81 | EXPORT_SYMBOL_GPL(__add_pages); | ||
78 | 82 | ||
79 | static void grow_zone_span(struct zone *zone, | 83 | static void grow_zone_span(struct zone *zone, |
80 | unsigned long start_pfn, unsigned long end_pfn) | 84 | unsigned long start_pfn, unsigned long end_pfn) |
@@ -87,8 +91,8 @@ static void grow_zone_span(struct zone *zone, | |||
87 | if (start_pfn < zone->zone_start_pfn) | 91 | if (start_pfn < zone->zone_start_pfn) |
88 | zone->zone_start_pfn = start_pfn; | 92 | zone->zone_start_pfn = start_pfn; |
89 | 93 | ||
90 | if (end_pfn > old_zone_end_pfn) | 94 | zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - |
91 | zone->spanned_pages = end_pfn - zone->zone_start_pfn; | 95 | zone->zone_start_pfn; |
92 | 96 | ||
93 | zone_span_writeunlock(zone); | 97 | zone_span_writeunlock(zone); |
94 | } | 98 | } |
@@ -102,8 +106,8 @@ static void grow_pgdat_span(struct pglist_data *pgdat, | |||
102 | if (start_pfn < pgdat->node_start_pfn) | 106 | if (start_pfn < pgdat->node_start_pfn) |
103 | pgdat->node_start_pfn = start_pfn; | 107 | pgdat->node_start_pfn = start_pfn; |
104 | 108 | ||
105 | if (end_pfn > old_pgdat_end_pfn) | 109 | pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - |
106 | pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn; | 110 | pgdat->node_start_pfn; |
107 | } | 111 | } |
108 | 112 | ||
109 | int online_pages(unsigned long pfn, unsigned long nr_pages) | 113 | int online_pages(unsigned long pfn, unsigned long nr_pages) |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index dec8249e972d..8778f58880c4 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1761,7 +1761,6 @@ static void gather_stats(struct page *page, void *private, int pte_dirty) | |||
1761 | md->mapcount_max = count; | 1761 | md->mapcount_max = count; |
1762 | 1762 | ||
1763 | md->node[page_to_nid(page)]++; | 1763 | md->node[page_to_nid(page)]++; |
1764 | cond_resched(); | ||
1765 | } | 1764 | } |
1766 | 1765 | ||
1767 | #ifdef CONFIG_HUGETLB_PAGE | 1766 | #ifdef CONFIG_HUGETLB_PAGE |
diff --git a/mm/migrate.c b/mm/migrate.c index d444229f2599..1c25040693d2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -439,6 +439,17 @@ redo: | |||
439 | goto unlock_both; | 439 | goto unlock_both; |
440 | } | 440 | } |
441 | 441 | ||
442 | /* Make sure the dirty bit is up to date */ | ||
443 | if (try_to_unmap(page, 1) == SWAP_FAIL) { | ||
444 | rc = -EPERM; | ||
445 | goto unlock_both; | ||
446 | } | ||
447 | |||
448 | if (page_mapcount(page)) { | ||
449 | rc = -EAGAIN; | ||
450 | goto unlock_both; | ||
451 | } | ||
452 | |||
442 | /* | 453 | /* |
443 | * Default handling if a filesystem does not provide | 454 | * Default handling if a filesystem does not provide |
444 | * a migration function. We can only migrate clean | 455 | * a migration function. We can only migrate clean |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 78747afad6b0..042e6436c3ee 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -46,15 +46,25 @@ | |||
46 | unsigned long badness(struct task_struct *p, unsigned long uptime) | 46 | unsigned long badness(struct task_struct *p, unsigned long uptime) |
47 | { | 47 | { |
48 | unsigned long points, cpu_time, run_time, s; | 48 | unsigned long points, cpu_time, run_time, s; |
49 | struct list_head *tsk; | 49 | struct mm_struct *mm; |
50 | struct task_struct *child; | ||
50 | 51 | ||
51 | if (!p->mm) | 52 | task_lock(p); |
53 | mm = p->mm; | ||
54 | if (!mm) { | ||
55 | task_unlock(p); | ||
52 | return 0; | 56 | return 0; |
57 | } | ||
53 | 58 | ||
54 | /* | 59 | /* |
55 | * The memory size of the process is the basis for the badness. | 60 | * The memory size of the process is the basis for the badness. |
56 | */ | 61 | */ |
57 | points = p->mm->total_vm; | 62 | points = mm->total_vm; |
63 | |||
64 | /* | ||
65 | * After this unlock we can no longer dereference local variable `mm' | ||
66 | */ | ||
67 | task_unlock(p); | ||
58 | 68 | ||
59 | /* | 69 | /* |
60 | * Processes which fork a lot of child processes are likely | 70 | * Processes which fork a lot of child processes are likely |
@@ -64,11 +74,11 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
64 | * child is eating the vast majority of memory, adding only half | 74 | * child is eating the vast majority of memory, adding only half |
65 | * to the parents will make the child our kill candidate of choice. | 75 | * to the parents will make the child our kill candidate of choice. |
66 | */ | 76 | */ |
67 | list_for_each(tsk, &p->children) { | 77 | list_for_each_entry(child, &p->children, sibling) { |
68 | struct task_struct *chld; | 78 | task_lock(child); |
69 | chld = list_entry(tsk, struct task_struct, sibling); | 79 | if (child->mm != mm && child->mm) |
70 | if (chld->mm != p->mm && chld->mm) | 80 | points += child->mm->total_vm/2 + 1; |
71 | points += chld->mm->total_vm/2 + 1; | 81 | task_unlock(child); |
72 | } | 82 | } |
73 | 83 | ||
74 | /* | 84 | /* |
@@ -244,17 +254,24 @@ static void __oom_kill_task(task_t *p, const char *message) | |||
244 | force_sig(SIGKILL, p); | 254 | force_sig(SIGKILL, p); |
245 | } | 255 | } |
246 | 256 | ||
247 | static struct mm_struct *oom_kill_task(task_t *p, const char *message) | 257 | static int oom_kill_task(task_t *p, const char *message) |
248 | { | 258 | { |
249 | struct mm_struct *mm = get_task_mm(p); | 259 | struct mm_struct *mm; |
250 | task_t * g, * q; | 260 | task_t * g, * q; |
251 | 261 | ||
252 | if (!mm) | 262 | mm = p->mm; |
253 | return NULL; | 263 | |
254 | if (mm == &init_mm) { | 264 | /* WARNING: mm may not be dereferenced since we did not obtain its |
255 | mmput(mm); | 265 | * value from get_task_mm(p). This is OK since all we need to do is |
256 | return NULL; | 266 | * compare mm to q->mm below. |
257 | } | 267 | * |
268 | * Furthermore, even if mm contains a non-NULL value, p->mm may | ||
269 | * change to NULL at any time since we do not hold task_lock(p). | ||
270 | * However, this is of no concern to us. | ||
271 | */ | ||
272 | |||
273 | if (mm == NULL || mm == &init_mm) | ||
274 | return 1; | ||
258 | 275 | ||
259 | __oom_kill_task(p, message); | 276 | __oom_kill_task(p, message); |
260 | /* | 277 | /* |
@@ -266,13 +283,12 @@ static struct mm_struct *oom_kill_task(task_t *p, const char *message) | |||
266 | __oom_kill_task(q, message); | 283 | __oom_kill_task(q, message); |
267 | while_each_thread(g, q); | 284 | while_each_thread(g, q); |
268 | 285 | ||
269 | return mm; | 286 | return 0; |
270 | } | 287 | } |
271 | 288 | ||
272 | static struct mm_struct *oom_kill_process(struct task_struct *p, | 289 | static int oom_kill_process(struct task_struct *p, unsigned long points, |
273 | unsigned long points, const char *message) | 290 | const char *message) |
274 | { | 291 | { |
275 | struct mm_struct *mm; | ||
276 | struct task_struct *c; | 292 | struct task_struct *c; |
277 | struct list_head *tsk; | 293 | struct list_head *tsk; |
278 | 294 | ||
@@ -283,9 +299,8 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, | |||
283 | c = list_entry(tsk, struct task_struct, sibling); | 299 | c = list_entry(tsk, struct task_struct, sibling); |
284 | if (c->mm == p->mm) | 300 | if (c->mm == p->mm) |
285 | continue; | 301 | continue; |
286 | mm = oom_kill_task(c, message); | 302 | if (!oom_kill_task(c, message)) |
287 | if (mm) | 303 | return 0; |
288 | return mm; | ||
289 | } | 304 | } |
290 | return oom_kill_task(p, message); | 305 | return oom_kill_task(p, message); |
291 | } | 306 | } |
@@ -300,7 +315,6 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, | |||
300 | */ | 315 | */ |
301 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) | 316 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) |
302 | { | 317 | { |
303 | struct mm_struct *mm = NULL; | ||
304 | task_t *p; | 318 | task_t *p; |
305 | unsigned long points = 0; | 319 | unsigned long points = 0; |
306 | 320 | ||
@@ -320,12 +334,12 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) | |||
320 | */ | 334 | */ |
321 | switch (constrained_alloc(zonelist, gfp_mask)) { | 335 | switch (constrained_alloc(zonelist, gfp_mask)) { |
322 | case CONSTRAINT_MEMORY_POLICY: | 336 | case CONSTRAINT_MEMORY_POLICY: |
323 | mm = oom_kill_process(current, points, | 337 | oom_kill_process(current, points, |
324 | "No available memory (MPOL_BIND)"); | 338 | "No available memory (MPOL_BIND)"); |
325 | break; | 339 | break; |
326 | 340 | ||
327 | case CONSTRAINT_CPUSET: | 341 | case CONSTRAINT_CPUSET: |
328 | mm = oom_kill_process(current, points, | 342 | oom_kill_process(current, points, |
329 | "No available memory in cpuset"); | 343 | "No available memory in cpuset"); |
330 | break; | 344 | break; |
331 | 345 | ||
@@ -347,8 +361,7 @@ retry: | |||
347 | panic("Out of memory and no killable processes...\n"); | 361 | panic("Out of memory and no killable processes...\n"); |
348 | } | 362 | } |
349 | 363 | ||
350 | mm = oom_kill_process(p, points, "Out of memory"); | 364 | if (oom_kill_process(p, points, "Out of memory")) |
351 | if (!mm) | ||
352 | goto retry; | 365 | goto retry; |
353 | 366 | ||
354 | break; | 367 | break; |
@@ -357,8 +370,6 @@ retry: | |||
357 | out: | 370 | out: |
358 | read_unlock(&tasklist_lock); | 371 | read_unlock(&tasklist_lock); |
359 | cpuset_unlock(); | 372 | cpuset_unlock(); |
360 | if (mm) | ||
361 | mmput(mm); | ||
362 | 373 | ||
363 | /* | 374 | /* |
364 | * Give "p" a good chance of killing itself before we | 375 | * Give "p" a good chance of killing itself before we |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 97d6827c7d66..253a450c400d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/mempolicy.h> | 39 | #include <linux/mempolicy.h> |
40 | 40 | ||
41 | #include <asm/tlbflush.h> | 41 | #include <asm/tlbflush.h> |
42 | #include <asm/div64.h> | ||
42 | #include "internal.h" | 43 | #include "internal.h" |
43 | 44 | ||
44 | /* | 45 | /* |
@@ -232,11 +233,13 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) | |||
232 | * zone->lock is already acquired when we use these. | 233 | * zone->lock is already acquired when we use these. |
233 | * So, we don't need atomic page->flags operations here. | 234 | * So, we don't need atomic page->flags operations here. |
234 | */ | 235 | */ |
235 | static inline unsigned long page_order(struct page *page) { | 236 | static inline unsigned long page_order(struct page *page) |
237 | { | ||
236 | return page_private(page); | 238 | return page_private(page); |
237 | } | 239 | } |
238 | 240 | ||
239 | static inline void set_page_order(struct page *page, int order) { | 241 | static inline void set_page_order(struct page *page, int order) |
242 | { | ||
240 | set_page_private(page, order); | 243 | set_page_private(page, order); |
241 | __SetPageBuddy(page); | 244 | __SetPageBuddy(page); |
242 | } | 245 | } |
@@ -299,9 +302,9 @@ static inline int page_is_buddy(struct page *page, int order) | |||
299 | 302 | ||
300 | if (PageBuddy(page) && page_order(page) == order) { | 303 | if (PageBuddy(page) && page_order(page) == order) { |
301 | BUG_ON(page_count(page) != 0); | 304 | BUG_ON(page_count(page) != 0); |
302 | return 1; | 305 | return 1; |
303 | } | 306 | } |
304 | return 0; | 307 | return 0; |
305 | } | 308 | } |
306 | 309 | ||
307 | /* | 310 | /* |
@@ -948,7 +951,7 @@ restart: | |||
948 | goto got_pg; | 951 | goto got_pg; |
949 | 952 | ||
950 | do { | 953 | do { |
951 | if (cpuset_zone_allowed(*z, gfp_mask)) | 954 | if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL)) |
952 | wakeup_kswapd(*z, order); | 955 | wakeup_kswapd(*z, order); |
953 | } while (*(++z)); | 956 | } while (*(++z)); |
954 | 957 | ||
@@ -967,7 +970,8 @@ restart: | |||
967 | alloc_flags |= ALLOC_HARDER; | 970 | alloc_flags |= ALLOC_HARDER; |
968 | if (gfp_mask & __GFP_HIGH) | 971 | if (gfp_mask & __GFP_HIGH) |
969 | alloc_flags |= ALLOC_HIGH; | 972 | alloc_flags |= ALLOC_HIGH; |
970 | alloc_flags |= ALLOC_CPUSET; | 973 | if (wait) |
974 | alloc_flags |= ALLOC_CPUSET; | ||
971 | 975 | ||
972 | /* | 976 | /* |
973 | * Go through the zonelist again. Let __GFP_HIGH and allocations | 977 | * Go through the zonelist again. Let __GFP_HIGH and allocations |
@@ -1960,7 +1964,7 @@ static inline void free_zone_pagesets(int cpu) | |||
1960 | } | 1964 | } |
1961 | } | 1965 | } |
1962 | 1966 | ||
1963 | static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, | 1967 | static int pageset_cpuup_callback(struct notifier_block *nfb, |
1964 | unsigned long action, | 1968 | unsigned long action, |
1965 | void *hcpu) | 1969 | void *hcpu) |
1966 | { | 1970 | { |
@@ -2121,14 +2125,22 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat) | |||
2121 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 2125 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
2122 | /* ia64 gets its own node_mem_map, before this, without bootmem */ | 2126 | /* ia64 gets its own node_mem_map, before this, without bootmem */ |
2123 | if (!pgdat->node_mem_map) { | 2127 | if (!pgdat->node_mem_map) { |
2124 | unsigned long size; | 2128 | unsigned long size, start, end; |
2125 | struct page *map; | 2129 | struct page *map; |
2126 | 2130 | ||
2127 | size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); | 2131 | /* |
2132 | * The zone's endpoints aren't required to be MAX_ORDER | ||
2133 | * aligned but the node_mem_map endpoints must be in order | ||
2134 | * for the buddy allocator to function correctly. | ||
2135 | */ | ||
2136 | start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); | ||
2137 | end = pgdat->node_start_pfn + pgdat->node_spanned_pages; | ||
2138 | end = ALIGN(end, MAX_ORDER_NR_PAGES); | ||
2139 | size = (end - start) * sizeof(struct page); | ||
2128 | map = alloc_remap(pgdat->node_id, size); | 2140 | map = alloc_remap(pgdat->node_id, size); |
2129 | if (!map) | 2141 | if (!map) |
2130 | map = alloc_bootmem_node(pgdat, size); | 2142 | map = alloc_bootmem_node(pgdat, size); |
2131 | pgdat->node_mem_map = map; | 2143 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); |
2132 | } | 2144 | } |
2133 | #ifdef CONFIG_FLATMEM | 2145 | #ifdef CONFIG_FLATMEM |
2134 | /* | 2146 | /* |
@@ -2564,9 +2576,11 @@ void setup_per_zone_pages_min(void) | |||
2564 | } | 2576 | } |
2565 | 2577 | ||
2566 | for_each_zone(zone) { | 2578 | for_each_zone(zone) { |
2567 | unsigned long tmp; | 2579 | u64 tmp; |
2580 | |||
2568 | spin_lock_irqsave(&zone->lru_lock, flags); | 2581 | spin_lock_irqsave(&zone->lru_lock, flags); |
2569 | tmp = (pages_min * zone->present_pages) / lowmem_pages; | 2582 | tmp = (u64)pages_min * zone->present_pages; |
2583 | do_div(tmp, lowmem_pages); | ||
2570 | if (is_highmem(zone)) { | 2584 | if (is_highmem(zone)) { |
2571 | /* | 2585 | /* |
2572 | * __GFP_HIGH and PF_MEMALLOC allocations usually don't | 2586 | * __GFP_HIGH and PF_MEMALLOC allocations usually don't |
@@ -2593,8 +2607,8 @@ void setup_per_zone_pages_min(void) | |||
2593 | zone->pages_min = tmp; | 2607 | zone->pages_min = tmp; |
2594 | } | 2608 | } |
2595 | 2609 | ||
2596 | zone->pages_low = zone->pages_min + tmp / 4; | 2610 | zone->pages_low = zone->pages_min + (tmp >> 2); |
2597 | zone->pages_high = zone->pages_min + tmp / 2; | 2611 | zone->pages_high = zone->pages_min + (tmp >> 1); |
2598 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 2612 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
2599 | } | 2613 | } |
2600 | 2614 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index 37eaf42ed2c6..4c5e68e4e9ae 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -46,6 +46,8 @@ | |||
46 | #include <linux/mempolicy.h> | 46 | #include <linux/mempolicy.h> |
47 | #include <linux/namei.h> | 47 | #include <linux/namei.h> |
48 | #include <linux/ctype.h> | 48 | #include <linux/ctype.h> |
49 | #include <linux/migrate.h> | ||
50 | |||
49 | #include <asm/uaccess.h> | 51 | #include <asm/uaccess.h> |
50 | #include <asm/div64.h> | 52 | #include <asm/div64.h> |
51 | #include <asm/pgtable.h> | 53 | #include <asm/pgtable.h> |
@@ -2173,6 +2175,7 @@ static struct address_space_operations shmem_aops = { | |||
2173 | .prepare_write = shmem_prepare_write, | 2175 | .prepare_write = shmem_prepare_write, |
2174 | .commit_write = simple_commit_write, | 2176 | .commit_write = simple_commit_write, |
2175 | #endif | 2177 | #endif |
2178 | .migratepage = migrate_page, | ||
2176 | }; | 2179 | }; |
2177 | 2180 | ||
2178 | static struct file_operations shmem_file_operations = { | 2181 | static struct file_operations shmem_file_operations = { |
@@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t; | |||
207 | #define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) | 207 | #define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) |
208 | #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) | 208 | #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) |
209 | 209 | ||
210 | /* Max number of objs-per-slab for caches which use off-slab slabs. | ||
211 | * Needed to avoid a possible looping condition in cache_grow(). | ||
212 | */ | ||
213 | static unsigned long offslab_limit; | ||
214 | |||
215 | /* | 210 | /* |
216 | * struct slab | 211 | * struct slab |
217 | * | 212 | * |
@@ -700,6 +695,14 @@ static enum { | |||
700 | FULL | 695 | FULL |
701 | } g_cpucache_up; | 696 | } g_cpucache_up; |
702 | 697 | ||
698 | /* | ||
699 | * used by boot code to determine if it can use slab based allocator | ||
700 | */ | ||
701 | int slab_is_available(void) | ||
702 | { | ||
703 | return g_cpucache_up == FULL; | ||
704 | } | ||
705 | |||
703 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 706 | static DEFINE_PER_CPU(struct work_struct, reap_work); |
704 | 707 | ||
705 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 708 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
@@ -979,7 +982,8 @@ static void __drain_alien_cache(struct kmem_cache *cachep, | |||
979 | * That way we could avoid the overhead of putting the objects | 982 | * That way we could avoid the overhead of putting the objects |
980 | * into the free lists and getting them back later. | 983 | * into the free lists and getting them back later. |
981 | */ | 984 | */ |
982 | transfer_objects(rl3->shared, ac, ac->limit); | 985 | if (rl3->shared) |
986 | transfer_objects(rl3->shared, ac, ac->limit); | ||
983 | 987 | ||
984 | free_block(cachep, ac->entry, ac->avail, node); | 988 | free_block(cachep, ac->entry, ac->avail, node); |
985 | ac->avail = 0; | 989 | ac->avail = 0; |
@@ -1036,7 +1040,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) | |||
1036 | 1040 | ||
1037 | #endif | 1041 | #endif |
1038 | 1042 | ||
1039 | static int __devinit cpuup_callback(struct notifier_block *nfb, | 1043 | static int cpuup_callback(struct notifier_block *nfb, |
1040 | unsigned long action, void *hcpu) | 1044 | unsigned long action, void *hcpu) |
1041 | { | 1045 | { |
1042 | long cpu = (long)hcpu; | 1046 | long cpu = (long)hcpu; |
@@ -1347,12 +1351,6 @@ void __init kmem_cache_init(void) | |||
1347 | NULL, NULL); | 1351 | NULL, NULL); |
1348 | } | 1352 | } |
1349 | 1353 | ||
1350 | /* Inc off-slab bufctl limit until the ceiling is hit. */ | ||
1351 | if (!(OFF_SLAB(sizes->cs_cachep))) { | ||
1352 | offslab_limit = sizes->cs_size - sizeof(struct slab); | ||
1353 | offslab_limit /= sizeof(kmem_bufctl_t); | ||
1354 | } | ||
1355 | |||
1356 | sizes->cs_dmacachep = kmem_cache_create(names->name_dma, | 1354 | sizes->cs_dmacachep = kmem_cache_create(names->name_dma, |
1357 | sizes->cs_size, | 1355 | sizes->cs_size, |
1358 | ARCH_KMALLOC_MINALIGN, | 1356 | ARCH_KMALLOC_MINALIGN, |
@@ -1771,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index) | |||
1771 | static size_t calculate_slab_order(struct kmem_cache *cachep, | 1769 | static size_t calculate_slab_order(struct kmem_cache *cachep, |
1772 | size_t size, size_t align, unsigned long flags) | 1770 | size_t size, size_t align, unsigned long flags) |
1773 | { | 1771 | { |
1772 | unsigned long offslab_limit; | ||
1774 | size_t left_over = 0; | 1773 | size_t left_over = 0; |
1775 | int gfporder; | 1774 | int gfporder; |
1776 | 1775 | ||
@@ -1782,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
1782 | if (!num) | 1781 | if (!num) |
1783 | continue; | 1782 | continue; |
1784 | 1783 | ||
1785 | /* More than offslab_limit objects will cause problems */ | 1784 | if (flags & CFLGS_OFF_SLAB) { |
1786 | if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) | 1785 | /* |
1787 | break; | 1786 | * Max number of objs-per-slab for caches which |
1787 | * use off-slab slabs. Needed to avoid a possible | ||
1788 | * looping condition in cache_grow(). | ||
1789 | */ | ||
1790 | offslab_limit = size - sizeof(struct slab); | ||
1791 | offslab_limit /= sizeof(kmem_bufctl_t); | ||
1792 | |||
1793 | if (num > offslab_limit) | ||
1794 | break; | ||
1795 | } | ||
1788 | 1796 | ||
1789 | /* Found something acceptable - save it away */ | 1797 | /* Found something acceptable - save it away */ |
1790 | cachep->num = num; | 1798 | cachep->num = num; |
@@ -2191,11 +2199,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep) | |||
2191 | check_irq_on(); | 2199 | check_irq_on(); |
2192 | for_each_online_node(node) { | 2200 | for_each_online_node(node) { |
2193 | l3 = cachep->nodelists[node]; | 2201 | l3 = cachep->nodelists[node]; |
2194 | if (l3) { | 2202 | if (l3 && l3->alien) |
2203 | drain_alien_cache(cachep, l3->alien); | ||
2204 | } | ||
2205 | |||
2206 | for_each_online_node(node) { | ||
2207 | l3 = cachep->nodelists[node]; | ||
2208 | if (l3) | ||
2195 | drain_array(cachep, l3, l3->shared, 1, node); | 2209 | drain_array(cachep, l3, l3->shared, 1, node); |
2196 | if (l3->alien) | ||
2197 | drain_alien_cache(cachep, l3->alien); | ||
2198 | } | ||
2199 | } | 2210 | } |
2200 | } | 2211 | } |
2201 | 2212 | ||
@@ -354,9 +354,7 @@ void *__alloc_percpu(size_t size) | |||
354 | if (!pdata) | 354 | if (!pdata) |
355 | return NULL; | 355 | return NULL; |
356 | 356 | ||
357 | for (i = 0; i < NR_CPUS; i++) { | 357 | for_each_possible_cpu(i) { |
358 | if (!cpu_possible(i)) | ||
359 | continue; | ||
360 | pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); | 358 | pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); |
361 | if (!pdata->ptrs[i]) | 359 | if (!pdata->ptrs[i]) |
362 | goto unwind_oom; | 360 | goto unwind_oom; |
@@ -383,11 +381,9 @@ free_percpu(const void *objp) | |||
383 | int i; | 381 | int i; |
384 | struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); | 382 | struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); |
385 | 383 | ||
386 | for (i = 0; i < NR_CPUS; i++) { | 384 | for_each_possible_cpu(i) |
387 | if (!cpu_possible(i)) | ||
388 | continue; | ||
389 | kfree(p->ptrs[i]); | 385 | kfree(p->ptrs[i]); |
390 | } | 386 | |
391 | kfree(p); | 387 | kfree(p); |
392 | } | 388 | } |
393 | EXPORT_SYMBOL(free_percpu); | 389 | EXPORT_SYMBOL(free_percpu); |
diff --git a/mm/sparse.c b/mm/sparse.c index 0a51f36ba3a1..100040c0dfb6 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -32,7 +32,10 @@ static struct mem_section *sparse_index_alloc(int nid) | |||
32 | unsigned long array_size = SECTIONS_PER_ROOT * | 32 | unsigned long array_size = SECTIONS_PER_ROOT * |
33 | sizeof(struct mem_section); | 33 | sizeof(struct mem_section); |
34 | 34 | ||
35 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); | 35 | if (slab_is_available()) |
36 | section = kmalloc_node(array_size, GFP_KERNEL, nid); | ||
37 | else | ||
38 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); | ||
36 | 39 | ||
37 | if (section) | 40 | if (section) |
38 | memset(section, 0, array_size); | 41 | memset(section, 0, array_size); |
@@ -84,11 +87,8 @@ int __section_nr(struct mem_section* ms) | |||
84 | unsigned long root_nr; | 87 | unsigned long root_nr; |
85 | struct mem_section* root; | 88 | struct mem_section* root; |
86 | 89 | ||
87 | for (root_nr = 0; | 90 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
88 | root_nr < NR_MEM_SECTIONS; | 91 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); |
89 | root_nr += SECTIONS_PER_ROOT) { | ||
90 | root = __nr_to_section(root_nr); | ||
91 | |||
92 | if (!root) | 92 | if (!root) |
93 | continue; | 93 | continue; |
94 | 94 | ||
@@ -281,9 +281,9 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, | |||
281 | 281 | ||
282 | ret = sparse_init_one_section(ms, section_nr, memmap); | 282 | ret = sparse_init_one_section(ms, section_nr, memmap); |
283 | 283 | ||
284 | if (ret <= 0) | ||
285 | __kfree_section_memmap(memmap, nr_pages); | ||
286 | out: | 284 | out: |
287 | pgdat_resize_unlock(pgdat, &flags); | 285 | pgdat_resize_unlock(pgdat, &flags); |
286 | if (ret <= 0) | ||
287 | __kfree_section_memmap(memmap, nr_pages); | ||
288 | return ret; | 288 | return ret; |
289 | } | 289 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index acdf001d6941..4649a63a8cb6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1328,7 +1328,7 @@ repeat: | |||
1328 | not required for correctness. So if the last cpu in a node goes | 1328 | not required for correctness. So if the last cpu in a node goes |
1329 | away, we get changed to run anywhere: as the first one comes back, | 1329 | away, we get changed to run anywhere: as the first one comes back, |
1330 | restore their cpu bindings. */ | 1330 | restore their cpu bindings. */ |
1331 | static int __devinit cpu_callback(struct notifier_block *nfb, | 1331 | static int cpu_callback(struct notifier_block *nfb, |
1332 | unsigned long action, void *hcpu) | 1332 | unsigned long action, void *hcpu) |
1333 | { | 1333 | { |
1334 | pg_data_t *pgdat; | 1334 | pg_data_t *pgdat; |