diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bootmem.c | 9 | ||||
-rw-r--r-- | mm/migrate.c | 8 | ||||
-rw-r--r-- | mm/mmap.c | 34 | ||||
-rw-r--r-- | mm/nommu.c | 18 | ||||
-rw-r--r-- | mm/page-writeback.c | 5 | ||||
-rw-r--r-- | mm/page_alloc.c | 70 | ||||
-rw-r--r-- | mm/slab.c | 30 |
7 files changed, 133 insertions, 41 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index d3e3bd2ffcea..d213feded10d 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -401,7 +401,7 @@ unsigned long __init free_all_bootmem (void) | |||
401 | return(free_all_bootmem_core(NODE_DATA(0))); | 401 | return(free_all_bootmem_core(NODE_DATA(0))); |
402 | } | 402 | } |
403 | 403 | ||
404 | void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) | 404 | void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal) |
405 | { | 405 | { |
406 | bootmem_data_t *bdata; | 406 | bootmem_data_t *bdata; |
407 | void *ptr; | 407 | void *ptr; |
@@ -409,7 +409,14 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned | |||
409 | list_for_each_entry(bdata, &bdata_list, list) | 409 | list_for_each_entry(bdata, &bdata_list, list) |
410 | if ((ptr = __alloc_bootmem_core(bdata, size, align, goal, 0))) | 410 | if ((ptr = __alloc_bootmem_core(bdata, size, align, goal, 0))) |
411 | return(ptr); | 411 | return(ptr); |
412 | return NULL; | ||
413 | } | ||
412 | 414 | ||
415 | void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) | ||
416 | { | ||
417 | void *mem = __alloc_bootmem_nopanic(size,align,goal); | ||
418 | if (mem) | ||
419 | return mem; | ||
413 | /* | 420 | /* |
414 | * Whoops, we cannot satisfy the allocation request. | 421 | * Whoops, we cannot satisfy the allocation request. |
415 | */ | 422 | */ |
diff --git a/mm/migrate.c b/mm/migrate.c index 09f6e4aa87fc..d444229f2599 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -16,8 +16,7 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/swap.h> | 17 | #include <linux/swap.h> |
18 | #include <linux/pagemap.h> | 18 | #include <linux/pagemap.h> |
19 | #include <linux/buffer_head.h> /* for try_to_release_page(), | 19 | #include <linux/buffer_head.h> |
20 | buffer_heads_over_limit */ | ||
21 | #include <linux/mm_inline.h> | 20 | #include <linux/mm_inline.h> |
22 | #include <linux/pagevec.h> | 21 | #include <linux/pagevec.h> |
23 | #include <linux/rmap.h> | 22 | #include <linux/rmap.h> |
@@ -28,8 +27,6 @@ | |||
28 | 27 | ||
29 | #include "internal.h" | 28 | #include "internal.h" |
30 | 29 | ||
31 | #include "internal.h" | ||
32 | |||
33 | /* The maximum number of pages to take off the LRU for migration */ | 30 | /* The maximum number of pages to take off the LRU for migration */ |
34 | #define MIGRATE_CHUNK_SIZE 256 | 31 | #define MIGRATE_CHUNK_SIZE 256 |
35 | 32 | ||
@@ -176,7 +173,6 @@ unlock_retry: | |||
176 | retry: | 173 | retry: |
177 | return -EAGAIN; | 174 | return -EAGAIN; |
178 | } | 175 | } |
179 | EXPORT_SYMBOL(swap_page); | ||
180 | 176 | ||
181 | /* | 177 | /* |
182 | * Remove references for a page and establish the new page with the correct | 178 | * Remove references for a page and establish the new page with the correct |
@@ -234,7 +230,7 @@ int migrate_page_remove_references(struct page *newpage, | |||
234 | if (!page_mapping(page) || page_count(page) != nr_refs || | 230 | if (!page_mapping(page) || page_count(page) != nr_refs || |
235 | *radix_pointer != page) { | 231 | *radix_pointer != page) { |
236 | write_unlock_irq(&mapping->tree_lock); | 232 | write_unlock_irq(&mapping->tree_lock); |
237 | return 1; | 233 | return -EAGAIN; |
238 | } | 234 | } |
239 | 235 | ||
240 | /* | 236 | /* |
@@ -121,14 +121,26 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
121 | * only call if we're about to fail. | 121 | * only call if we're about to fail. |
122 | */ | 122 | */ |
123 | n = nr_free_pages(); | 123 | n = nr_free_pages(); |
124 | |||
125 | /* | ||
126 | * Leave reserved pages. The pages are not for anonymous pages. | ||
127 | */ | ||
128 | if (n <= totalreserve_pages) | ||
129 | goto error; | ||
130 | else | ||
131 | n -= totalreserve_pages; | ||
132 | |||
133 | /* | ||
134 | * Leave the last 3% for root | ||
135 | */ | ||
124 | if (!cap_sys_admin) | 136 | if (!cap_sys_admin) |
125 | n -= n / 32; | 137 | n -= n / 32; |
126 | free += n; | 138 | free += n; |
127 | 139 | ||
128 | if (free > pages) | 140 | if (free > pages) |
129 | return 0; | 141 | return 0; |
130 | vm_unacct_memory(pages); | 142 | |
131 | return -ENOMEM; | 143 | goto error; |
132 | } | 144 | } |
133 | 145 | ||
134 | allowed = (totalram_pages - hugetlb_total_pages()) | 146 | allowed = (totalram_pages - hugetlb_total_pages()) |
@@ -150,7 +162,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
150 | */ | 162 | */ |
151 | if (atomic_read(&vm_committed_space) < (long)allowed) | 163 | if (atomic_read(&vm_committed_space) < (long)allowed) |
152 | return 0; | 164 | return 0; |
153 | 165 | error: | |
154 | vm_unacct_memory(pages); | 166 | vm_unacct_memory(pages); |
155 | 167 | ||
156 | return -ENOMEM; | 168 | return -ENOMEM; |
@@ -220,6 +232,17 @@ asmlinkage unsigned long sys_brk(unsigned long brk) | |||
220 | 232 | ||
221 | if (brk < mm->end_code) | 233 | if (brk < mm->end_code) |
222 | goto out; | 234 | goto out; |
235 | |||
236 | /* | ||
237 | * Check against rlimit here. If this check is done later after the test | ||
238 | * of oldbrk with newbrk then it can escape the test and let the data | ||
239 | * segment grow beyond its set limit the in case where the limit is | ||
240 | * not page aligned -Ram Gupta | ||
241 | */ | ||
242 | rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; | ||
243 | if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) | ||
244 | goto out; | ||
245 | |||
223 | newbrk = PAGE_ALIGN(brk); | 246 | newbrk = PAGE_ALIGN(brk); |
224 | oldbrk = PAGE_ALIGN(mm->brk); | 247 | oldbrk = PAGE_ALIGN(mm->brk); |
225 | if (oldbrk == newbrk) | 248 | if (oldbrk == newbrk) |
@@ -232,11 +255,6 @@ asmlinkage unsigned long sys_brk(unsigned long brk) | |||
232 | goto out; | 255 | goto out; |
233 | } | 256 | } |
234 | 257 | ||
235 | /* Check against rlimit.. */ | ||
236 | rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; | ||
237 | if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) | ||
238 | goto out; | ||
239 | |||
240 | /* Check against existing mmap mappings. */ | 258 | /* Check against existing mmap mappings. */ |
241 | if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) | 259 | if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) |
242 | goto out; | 260 | goto out; |
diff --git a/mm/nommu.c b/mm/nommu.c index db45efac17cc..029fadac0fb5 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1147,14 +1147,26 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
1147 | * only call if we're about to fail. | 1147 | * only call if we're about to fail. |
1148 | */ | 1148 | */ |
1149 | n = nr_free_pages(); | 1149 | n = nr_free_pages(); |
1150 | |||
1151 | /* | ||
1152 | * Leave reserved pages. The pages are not for anonymous pages. | ||
1153 | */ | ||
1154 | if (n <= totalreserve_pages) | ||
1155 | goto error; | ||
1156 | else | ||
1157 | n -= totalreserve_pages; | ||
1158 | |||
1159 | /* | ||
1160 | * Leave the last 3% for root | ||
1161 | */ | ||
1150 | if (!cap_sys_admin) | 1162 | if (!cap_sys_admin) |
1151 | n -= n / 32; | 1163 | n -= n / 32; |
1152 | free += n; | 1164 | free += n; |
1153 | 1165 | ||
1154 | if (free > pages) | 1166 | if (free > pages) |
1155 | return 0; | 1167 | return 0; |
1156 | vm_unacct_memory(pages); | 1168 | |
1157 | return -ENOMEM; | 1169 | goto error; |
1158 | } | 1170 | } |
1159 | 1171 | ||
1160 | allowed = totalram_pages * sysctl_overcommit_ratio / 100; | 1172 | allowed = totalram_pages * sysctl_overcommit_ratio / 100; |
@@ -1175,7 +1187,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
1175 | */ | 1187 | */ |
1176 | if (atomic_read(&vm_committed_space) < (long)allowed) | 1188 | if (atomic_read(&vm_committed_space) < (long)allowed) |
1177 | return 0; | 1189 | return 0; |
1178 | 1190 | error: | |
1179 | vm_unacct_memory(pages); | 1191 | vm_unacct_memory(pages); |
1180 | 1192 | ||
1181 | return -ENOMEM; | 1193 | return -ENOMEM; |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6dcce3a4bbdc..75d7f48b79bb 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -72,13 +72,12 @@ int dirty_background_ratio = 10; | |||
72 | int vm_dirty_ratio = 40; | 72 | int vm_dirty_ratio = 40; |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * The interval between `kupdate'-style writebacks, in centiseconds | 75 | * The interval between `kupdate'-style writebacks, in jiffies |
76 | * (hundredths of a second) | ||
77 | */ | 76 | */ |
78 | int dirty_writeback_interval = 5 * HZ; | 77 | int dirty_writeback_interval = 5 * HZ; |
79 | 78 | ||
80 | /* | 79 | /* |
81 | * The longest number of centiseconds for which data is allowed to remain dirty | 80 | * The longest number of jiffies for which data is allowed to remain dirty |
82 | */ | 81 | */ |
83 | int dirty_expire_interval = 30 * HZ; | 82 | int dirty_expire_interval = 30 * HZ; |
84 | 83 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dc523a1f270d..97d6827c7d66 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -51,6 +51,7 @@ nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; | |||
51 | EXPORT_SYMBOL(node_possible_map); | 51 | EXPORT_SYMBOL(node_possible_map); |
52 | unsigned long totalram_pages __read_mostly; | 52 | unsigned long totalram_pages __read_mostly; |
53 | unsigned long totalhigh_pages __read_mostly; | 53 | unsigned long totalhigh_pages __read_mostly; |
54 | unsigned long totalreserve_pages __read_mostly; | ||
54 | long nr_swap_pages; | 55 | long nr_swap_pages; |
55 | int percpu_pagelist_fraction; | 56 | int percpu_pagelist_fraction; |
56 | 57 | ||
@@ -151,7 +152,8 @@ static void bad_page(struct page *page) | |||
151 | 1 << PG_reclaim | | 152 | 1 << PG_reclaim | |
152 | 1 << PG_slab | | 153 | 1 << PG_slab | |
153 | 1 << PG_swapcache | | 154 | 1 << PG_swapcache | |
154 | 1 << PG_writeback ); | 155 | 1 << PG_writeback | |
156 | 1 << PG_buddy ); | ||
155 | set_page_count(page, 0); | 157 | set_page_count(page, 0); |
156 | reset_page_mapcount(page); | 158 | reset_page_mapcount(page); |
157 | page->mapping = NULL; | 159 | page->mapping = NULL; |
@@ -236,12 +238,12 @@ static inline unsigned long page_order(struct page *page) { | |||
236 | 238 | ||
237 | static inline void set_page_order(struct page *page, int order) { | 239 | static inline void set_page_order(struct page *page, int order) { |
238 | set_page_private(page, order); | 240 | set_page_private(page, order); |
239 | __SetPagePrivate(page); | 241 | __SetPageBuddy(page); |
240 | } | 242 | } |
241 | 243 | ||
242 | static inline void rmv_page_order(struct page *page) | 244 | static inline void rmv_page_order(struct page *page) |
243 | { | 245 | { |
244 | __ClearPagePrivate(page); | 246 | __ClearPageBuddy(page); |
245 | set_page_private(page, 0); | 247 | set_page_private(page, 0); |
246 | } | 248 | } |
247 | 249 | ||
@@ -280,11 +282,13 @@ __find_combined_index(unsigned long page_idx, unsigned int order) | |||
280 | * This function checks whether a page is free && is the buddy | 282 | * This function checks whether a page is free && is the buddy |
281 | * we can do coalesce a page and its buddy if | 283 | * we can do coalesce a page and its buddy if |
282 | * (a) the buddy is not in a hole && | 284 | * (a) the buddy is not in a hole && |
283 | * (b) the buddy is free && | 285 | * (b) the buddy is in the buddy system && |
284 | * (c) the buddy is on the buddy system && | 286 | * (c) a page and its buddy have the same order. |
285 | * (d) a page and its buddy have the same order. | 287 | * |
286 | * for recording page's order, we use page_private(page) and PG_private. | 288 | * For recording whether a page is in the buddy system, we use PG_buddy. |
289 | * Setting, clearing, and testing PG_buddy is serialized by zone->lock. | ||
287 | * | 290 | * |
291 | * For recording page's order, we use page_private(page). | ||
288 | */ | 292 | */ |
289 | static inline int page_is_buddy(struct page *page, int order) | 293 | static inline int page_is_buddy(struct page *page, int order) |
290 | { | 294 | { |
@@ -293,10 +297,10 @@ static inline int page_is_buddy(struct page *page, int order) | |||
293 | return 0; | 297 | return 0; |
294 | #endif | 298 | #endif |
295 | 299 | ||
296 | if (PagePrivate(page) && | 300 | if (PageBuddy(page) && page_order(page) == order) { |
297 | (page_order(page) == order) && | 301 | BUG_ON(page_count(page) != 0); |
298 | page_count(page) == 0) | ||
299 | return 1; | 302 | return 1; |
303 | } | ||
300 | return 0; | 304 | return 0; |
301 | } | 305 | } |
302 | 306 | ||
@@ -313,7 +317,7 @@ static inline int page_is_buddy(struct page *page, int order) | |||
313 | * as necessary, plus some accounting needed to play nicely with other | 317 | * as necessary, plus some accounting needed to play nicely with other |
314 | * parts of the VM system. | 318 | * parts of the VM system. |
315 | * At each level, we keep a list of pages, which are heads of continuous | 319 | * At each level, we keep a list of pages, which are heads of continuous |
316 | * free pages of length of (1 << order) and marked with PG_Private.Page's | 320 | * free pages of length of (1 << order) and marked with PG_buddy. Page's |
317 | * order is recorded in page_private(page) field. | 321 | * order is recorded in page_private(page) field. |
318 | * So when we are allocating or freeing one, we can derive the state of the | 322 | * So when we are allocating or freeing one, we can derive the state of the |
319 | * other. That is, if we allocate a small block, and both were | 323 | * other. That is, if we allocate a small block, and both were |
@@ -376,7 +380,8 @@ static inline int free_pages_check(struct page *page) | |||
376 | 1 << PG_slab | | 380 | 1 << PG_slab | |
377 | 1 << PG_swapcache | | 381 | 1 << PG_swapcache | |
378 | 1 << PG_writeback | | 382 | 1 << PG_writeback | |
379 | 1 << PG_reserved )))) | 383 | 1 << PG_reserved | |
384 | 1 << PG_buddy )))) | ||
380 | bad_page(page); | 385 | bad_page(page); |
381 | if (PageDirty(page)) | 386 | if (PageDirty(page)) |
382 | __ClearPageDirty(page); | 387 | __ClearPageDirty(page); |
@@ -524,7 +529,8 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) | |||
524 | 1 << PG_slab | | 529 | 1 << PG_slab | |
525 | 1 << PG_swapcache | | 530 | 1 << PG_swapcache | |
526 | 1 << PG_writeback | | 531 | 1 << PG_writeback | |
527 | 1 << PG_reserved )))) | 532 | 1 << PG_reserved | |
533 | 1 << PG_buddy )))) | ||
528 | bad_page(page); | 534 | bad_page(page); |
529 | 535 | ||
530 | /* | 536 | /* |
@@ -2472,6 +2478,38 @@ void __init page_alloc_init(void) | |||
2472 | } | 2478 | } |
2473 | 2479 | ||
2474 | /* | 2480 | /* |
2481 | * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio | ||
2482 | * or min_free_kbytes changes. | ||
2483 | */ | ||
2484 | static void calculate_totalreserve_pages(void) | ||
2485 | { | ||
2486 | struct pglist_data *pgdat; | ||
2487 | unsigned long reserve_pages = 0; | ||
2488 | int i, j; | ||
2489 | |||
2490 | for_each_online_pgdat(pgdat) { | ||
2491 | for (i = 0; i < MAX_NR_ZONES; i++) { | ||
2492 | struct zone *zone = pgdat->node_zones + i; | ||
2493 | unsigned long max = 0; | ||
2494 | |||
2495 | /* Find valid and maximum lowmem_reserve in the zone */ | ||
2496 | for (j = i; j < MAX_NR_ZONES; j++) { | ||
2497 | if (zone->lowmem_reserve[j] > max) | ||
2498 | max = zone->lowmem_reserve[j]; | ||
2499 | } | ||
2500 | |||
2501 | /* we treat pages_high as reserved pages. */ | ||
2502 | max += zone->pages_high; | ||
2503 | |||
2504 | if (max > zone->present_pages) | ||
2505 | max = zone->present_pages; | ||
2506 | reserve_pages += max; | ||
2507 | } | ||
2508 | } | ||
2509 | totalreserve_pages = reserve_pages; | ||
2510 | } | ||
2511 | |||
2512 | /* | ||
2475 | * setup_per_zone_lowmem_reserve - called whenever | 2513 | * setup_per_zone_lowmem_reserve - called whenever |
2476 | * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone | 2514 | * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone |
2477 | * has a correct pages reserved value, so an adequate number of | 2515 | * has a correct pages reserved value, so an adequate number of |
@@ -2502,6 +2540,9 @@ static void setup_per_zone_lowmem_reserve(void) | |||
2502 | } | 2540 | } |
2503 | } | 2541 | } |
2504 | } | 2542 | } |
2543 | |||
2544 | /* update totalreserve_pages */ | ||
2545 | calculate_totalreserve_pages(); | ||
2505 | } | 2546 | } |
2506 | 2547 | ||
2507 | /* | 2548 | /* |
@@ -2556,6 +2597,9 @@ void setup_per_zone_pages_min(void) | |||
2556 | zone->pages_high = zone->pages_min + tmp / 2; | 2597 | zone->pages_high = zone->pages_min + tmp / 2; |
2557 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 2598 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
2558 | } | 2599 | } |
2600 | |||
2601 | /* update totalreserve_pages */ | ||
2602 | calculate_totalreserve_pages(); | ||
2559 | } | 2603 | } |
2560 | 2604 | ||
2561 | /* | 2605 | /* |
@@ -420,6 +420,7 @@ struct kmem_cache { | |||
420 | unsigned long max_freeable; | 420 | unsigned long max_freeable; |
421 | unsigned long node_allocs; | 421 | unsigned long node_allocs; |
422 | unsigned long node_frees; | 422 | unsigned long node_frees; |
423 | unsigned long node_overflow; | ||
423 | atomic_t allochit; | 424 | atomic_t allochit; |
424 | atomic_t allocmiss; | 425 | atomic_t allocmiss; |
425 | atomic_t freehit; | 426 | atomic_t freehit; |
@@ -465,6 +466,7 @@ struct kmem_cache { | |||
465 | #define STATS_INC_ERR(x) ((x)->errors++) | 466 | #define STATS_INC_ERR(x) ((x)->errors++) |
466 | #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) | 467 | #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) |
467 | #define STATS_INC_NODEFREES(x) ((x)->node_frees++) | 468 | #define STATS_INC_NODEFREES(x) ((x)->node_frees++) |
469 | #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) | ||
468 | #define STATS_SET_FREEABLE(x, i) \ | 470 | #define STATS_SET_FREEABLE(x, i) \ |
469 | do { \ | 471 | do { \ |
470 | if ((x)->max_freeable < i) \ | 472 | if ((x)->max_freeable < i) \ |
@@ -484,6 +486,7 @@ struct kmem_cache { | |||
484 | #define STATS_INC_ERR(x) do { } while (0) | 486 | #define STATS_INC_ERR(x) do { } while (0) |
485 | #define STATS_INC_NODEALLOCS(x) do { } while (0) | 487 | #define STATS_INC_NODEALLOCS(x) do { } while (0) |
486 | #define STATS_INC_NODEFREES(x) do { } while (0) | 488 | #define STATS_INC_NODEFREES(x) do { } while (0) |
489 | #define STATS_INC_ACOVERFLOW(x) do { } while (0) | ||
487 | #define STATS_SET_FREEABLE(x, i) do { } while (0) | 490 | #define STATS_SET_FREEABLE(x, i) do { } while (0) |
488 | #define STATS_INC_ALLOCHIT(x) do { } while (0) | 491 | #define STATS_INC_ALLOCHIT(x) do { } while (0) |
489 | #define STATS_INC_ALLOCMISS(x) do { } while (0) | 492 | #define STATS_INC_ALLOCMISS(x) do { } while (0) |
@@ -1453,7 +1456,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1453 | int i; | 1456 | int i; |
1454 | 1457 | ||
1455 | flags |= cachep->gfpflags; | 1458 | flags |= cachep->gfpflags; |
1459 | #ifndef CONFIG_MMU | ||
1460 | /* nommu uses slab's for process anonymous memory allocations, so | ||
1461 | * requires __GFP_COMP to properly refcount higher order allocations" | ||
1462 | */ | ||
1463 | page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder); | ||
1464 | #else | ||
1456 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | 1465 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); |
1466 | #endif | ||
1457 | if (!page) | 1467 | if (!page) |
1458 | return NULL; | 1468 | return NULL; |
1459 | addr = page_address(page); | 1469 | addr = page_address(page); |
@@ -2318,13 +2328,15 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
2318 | 2328 | ||
2319 | /* Get the memory for a slab management obj. */ | 2329 | /* Get the memory for a slab management obj. */ |
2320 | static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | 2330 | static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, |
2321 | int colour_off, gfp_t local_flags) | 2331 | int colour_off, gfp_t local_flags, |
2332 | int nodeid) | ||
2322 | { | 2333 | { |
2323 | struct slab *slabp; | 2334 | struct slab *slabp; |
2324 | 2335 | ||
2325 | if (OFF_SLAB(cachep)) { | 2336 | if (OFF_SLAB(cachep)) { |
2326 | /* Slab management obj is off-slab. */ | 2337 | /* Slab management obj is off-slab. */ |
2327 | slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); | 2338 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, |
2339 | local_flags, nodeid); | ||
2328 | if (!slabp) | 2340 | if (!slabp) |
2329 | return NULL; | 2341 | return NULL; |
2330 | } else { | 2342 | } else { |
@@ -2334,6 +2346,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | |||
2334 | slabp->inuse = 0; | 2346 | slabp->inuse = 0; |
2335 | slabp->colouroff = colour_off; | 2347 | slabp->colouroff = colour_off; |
2336 | slabp->s_mem = objp + colour_off; | 2348 | slabp->s_mem = objp + colour_off; |
2349 | slabp->nodeid = nodeid; | ||
2337 | return slabp; | 2350 | return slabp; |
2338 | } | 2351 | } |
2339 | 2352 | ||
@@ -2519,7 +2532,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
2519 | goto failed; | 2532 | goto failed; |
2520 | 2533 | ||
2521 | /* Get slab management. */ | 2534 | /* Get slab management. */ |
2522 | slabp = alloc_slabmgmt(cachep, objp, offset, local_flags); | 2535 | slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid); |
2523 | if (!slabp) | 2536 | if (!slabp) |
2524 | goto opps1; | 2537 | goto opps1; |
2525 | 2538 | ||
@@ -3080,9 +3093,11 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3080 | if (l3->alien && l3->alien[nodeid]) { | 3093 | if (l3->alien && l3->alien[nodeid]) { |
3081 | alien = l3->alien[nodeid]; | 3094 | alien = l3->alien[nodeid]; |
3082 | spin_lock(&alien->lock); | 3095 | spin_lock(&alien->lock); |
3083 | if (unlikely(alien->avail == alien->limit)) | 3096 | if (unlikely(alien->avail == alien->limit)) { |
3097 | STATS_INC_ACOVERFLOW(cachep); | ||
3084 | __drain_alien_cache(cachep, | 3098 | __drain_alien_cache(cachep, |
3085 | alien, nodeid); | 3099 | alien, nodeid); |
3100 | } | ||
3086 | alien->entry[alien->avail++] = objp; | 3101 | alien->entry[alien->avail++] = objp; |
3087 | spin_unlock(&alien->lock); | 3102 | spin_unlock(&alien->lock); |
3088 | } else { | 3103 | } else { |
@@ -3760,7 +3775,7 @@ static void print_slabinfo_header(struct seq_file *m) | |||
3760 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | 3775 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); |
3761 | #if STATS | 3776 | #if STATS |
3762 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " | 3777 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " |
3763 | "<error> <maxfreeable> <nodeallocs> <remotefrees>"); | 3778 | "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); |
3764 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); | 3779 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); |
3765 | #endif | 3780 | #endif |
3766 | seq_putc(m, '\n'); | 3781 | seq_putc(m, '\n'); |
@@ -3874,11 +3889,12 @@ static int s_show(struct seq_file *m, void *p) | |||
3874 | unsigned long max_freeable = cachep->max_freeable; | 3889 | unsigned long max_freeable = cachep->max_freeable; |
3875 | unsigned long node_allocs = cachep->node_allocs; | 3890 | unsigned long node_allocs = cachep->node_allocs; |
3876 | unsigned long node_frees = cachep->node_frees; | 3891 | unsigned long node_frees = cachep->node_frees; |
3892 | unsigned long overflows = cachep->node_overflow; | ||
3877 | 3893 | ||
3878 | seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ | 3894 | seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ |
3879 | %4lu %4lu %4lu %4lu", allocs, high, grown, | 3895 | %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, |
3880 | reaped, errors, max_freeable, node_allocs, | 3896 | reaped, errors, max_freeable, node_allocs, |
3881 | node_frees); | 3897 | node_frees, overflows); |
3882 | } | 3898 | } |
3883 | /* cpu stats */ | 3899 | /* cpu stats */ |
3884 | { | 3900 | { |