diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 6 | ||||
-rw-r--r-- | mm/mmap.c | 3 | ||||
-rw-r--r-- | mm/nommu.c | 10 | ||||
-rw-r--r-- | mm/oom_kill.c | 64 | ||||
-rw-r--r-- | mm/page_alloc.c | 10 | ||||
-rw-r--r-- | mm/percpu.c | 50 | ||||
-rw-r--r-- | mm/rmap.c | 1 | ||||
-rw-r--r-- | mm/slub.c | 4 | ||||
-rw-r--r-- | mm/vmscan.c | 9 |
9 files changed, 97 insertions, 60 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index c948d4ca8bde..fe5f674d7a7d 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -225,9 +225,9 @@ config DEFAULT_MMAP_MIN_ADDR | |||
225 | For most ia64, ppc64 and x86 users with lots of address space | 225 | For most ia64, ppc64 and x86 users with lots of address space |
226 | a value of 65536 is reasonable and should cause no problems. | 226 | a value of 65536 is reasonable and should cause no problems. |
227 | On arm and other archs it should not be higher than 32768. | 227 | On arm and other archs it should not be higher than 32768. |
228 | Programs which use vm86 functionality would either need additional | 228 | Programs which use vm86 functionality or have some need to map |
229 | permissions from either the LSM or the capabilities module or have | 229 | this low address space will need CAP_SYS_RAWIO or disable this |
230 | this protection disabled. | 230 | protection by setting the value to 0. |
231 | 231 | ||
232 | This value can be changed after boot using the | 232 | This value can be changed after boot using the |
233 | /proc/sys/vm/mmap_min_addr tunable. | 233 | /proc/sys/vm/mmap_min_addr tunable. |
@@ -88,9 +88,6 @@ int sysctl_overcommit_ratio = 50; /* default is 50% */ | |||
88 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | 88 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; |
89 | struct percpu_counter vm_committed_as; | 89 | struct percpu_counter vm_committed_as; |
90 | 90 | ||
91 | /* amount of vm to protect from userspace access */ | ||
92 | unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; | ||
93 | |||
94 | /* | 91 | /* |
95 | * Check that a process has enough memory to allocate a new virtual | 92 | * Check that a process has enough memory to allocate a new virtual |
96 | * mapping. 0 means there is enough memory for the allocation to | 93 | * mapping. 0 means there is enough memory for the allocation to |
diff --git a/mm/nommu.c b/mm/nommu.c index 53cab10fece4..66e81e7e9fe9 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -69,9 +69,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | |||
69 | int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; | 69 | int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; |
70 | int heap_stack_gap = 0; | 70 | int heap_stack_gap = 0; |
71 | 71 | ||
72 | /* amount of vm to protect from userspace access */ | ||
73 | unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; | ||
74 | |||
75 | atomic_long_t mmap_pages_allocated; | 72 | atomic_long_t mmap_pages_allocated; |
76 | 73 | ||
77 | EXPORT_SYMBOL(mem_map); | 74 | EXPORT_SYMBOL(mem_map); |
@@ -922,6 +919,10 @@ static int validate_mmap_request(struct file *file, | |||
922 | if (!file->f_op->read) | 919 | if (!file->f_op->read) |
923 | capabilities &= ~BDI_CAP_MAP_COPY; | 920 | capabilities &= ~BDI_CAP_MAP_COPY; |
924 | 921 | ||
922 | /* The file shall have been opened with read permission. */ | ||
923 | if (!(file->f_mode & FMODE_READ)) | ||
924 | return -EACCES; | ||
925 | |||
925 | if (flags & MAP_SHARED) { | 926 | if (flags & MAP_SHARED) { |
926 | /* do checks for writing, appending and locking */ | 927 | /* do checks for writing, appending and locking */ |
927 | if ((prot & PROT_WRITE) && | 928 | if ((prot & PROT_WRITE) && |
@@ -1351,6 +1352,7 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1351 | } | 1352 | } |
1352 | 1353 | ||
1353 | vma->vm_region = region; | 1354 | vma->vm_region = region; |
1355 | add_nommu_region(region); | ||
1354 | 1356 | ||
1355 | /* set up the mapping */ | 1357 | /* set up the mapping */ |
1356 | if (file && vma->vm_flags & VM_SHARED) | 1358 | if (file && vma->vm_flags & VM_SHARED) |
@@ -1360,8 +1362,6 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1360 | if (ret < 0) | 1362 | if (ret < 0) |
1361 | goto error_put_region; | 1363 | goto error_put_region; |
1362 | 1364 | ||
1363 | add_nommu_region(region); | ||
1364 | |||
1365 | /* okay... we have a mapping; now we have to register it */ | 1365 | /* okay... we have a mapping; now we have to register it */ |
1366 | result = vma->vm_start; | 1366 | result = vma->vm_start; |
1367 | 1367 | ||
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 175a67a78a99..a7b2460e922b 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -58,7 +58,6 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
58 | unsigned long points, cpu_time, run_time; | 58 | unsigned long points, cpu_time, run_time; |
59 | struct mm_struct *mm; | 59 | struct mm_struct *mm; |
60 | struct task_struct *child; | 60 | struct task_struct *child; |
61 | int oom_adj; | ||
62 | 61 | ||
63 | task_lock(p); | 62 | task_lock(p); |
64 | mm = p->mm; | 63 | mm = p->mm; |
@@ -66,11 +65,6 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
66 | task_unlock(p); | 65 | task_unlock(p); |
67 | return 0; | 66 | return 0; |
68 | } | 67 | } |
69 | oom_adj = mm->oom_adj; | ||
70 | if (oom_adj == OOM_DISABLE) { | ||
71 | task_unlock(p); | ||
72 | return 0; | ||
73 | } | ||
74 | 68 | ||
75 | /* | 69 | /* |
76 | * The memory size of the process is the basis for the badness. | 70 | * The memory size of the process is the basis for the badness. |
@@ -154,15 +148,15 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
154 | points /= 8; | 148 | points /= 8; |
155 | 149 | ||
156 | /* | 150 | /* |
157 | * Adjust the score by oom_adj. | 151 | * Adjust the score by oomkilladj. |
158 | */ | 152 | */ |
159 | if (oom_adj) { | 153 | if (p->oomkilladj) { |
160 | if (oom_adj > 0) { | 154 | if (p->oomkilladj > 0) { |
161 | if (!points) | 155 | if (!points) |
162 | points = 1; | 156 | points = 1; |
163 | points <<= oom_adj; | 157 | points <<= p->oomkilladj; |
164 | } else | 158 | } else |
165 | points >>= -(oom_adj); | 159 | points >>= -(p->oomkilladj); |
166 | } | 160 | } |
167 | 161 | ||
168 | #ifdef DEBUG | 162 | #ifdef DEBUG |
@@ -257,8 +251,11 @@ static struct task_struct *select_bad_process(unsigned long *ppoints, | |||
257 | *ppoints = ULONG_MAX; | 251 | *ppoints = ULONG_MAX; |
258 | } | 252 | } |
259 | 253 | ||
254 | if (p->oomkilladj == OOM_DISABLE) | ||
255 | continue; | ||
256 | |||
260 | points = badness(p, uptime.tv_sec); | 257 | points = badness(p, uptime.tv_sec); |
261 | if (points > *ppoints) { | 258 | if (points > *ppoints || !chosen) { |
262 | chosen = p; | 259 | chosen = p; |
263 | *ppoints = points; | 260 | *ppoints = points; |
264 | } | 261 | } |
@@ -307,7 +304,8 @@ static void dump_tasks(const struct mem_cgroup *mem) | |||
307 | } | 304 | } |
308 | printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", | 305 | printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", |
309 | p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm, | 306 | p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm, |
310 | get_mm_rss(mm), (int)task_cpu(p), mm->oom_adj, p->comm); | 307 | get_mm_rss(mm), (int)task_cpu(p), p->oomkilladj, |
308 | p->comm); | ||
311 | task_unlock(p); | 309 | task_unlock(p); |
312 | } while_each_thread(g, p); | 310 | } while_each_thread(g, p); |
313 | } | 311 | } |
@@ -325,8 +323,11 @@ static void __oom_kill_task(struct task_struct *p, int verbose) | |||
325 | return; | 323 | return; |
326 | } | 324 | } |
327 | 325 | ||
328 | if (!p->mm) | 326 | if (!p->mm) { |
327 | WARN_ON(1); | ||
328 | printk(KERN_WARNING "tried to kill an mm-less task!\n"); | ||
329 | return; | 329 | return; |
330 | } | ||
330 | 331 | ||
331 | if (verbose) | 332 | if (verbose) |
332 | printk(KERN_ERR "Killed process %d (%s)\n", | 333 | printk(KERN_ERR "Killed process %d (%s)\n", |
@@ -348,13 +349,28 @@ static int oom_kill_task(struct task_struct *p) | |||
348 | struct mm_struct *mm; | 349 | struct mm_struct *mm; |
349 | struct task_struct *g, *q; | 350 | struct task_struct *g, *q; |
350 | 351 | ||
351 | task_lock(p); | ||
352 | mm = p->mm; | 352 | mm = p->mm; |
353 | if (!mm || mm->oom_adj == OOM_DISABLE) { | 353 | |
354 | task_unlock(p); | 354 | /* WARNING: mm may not be dereferenced since we did not obtain its |
355 | * value from get_task_mm(p). This is OK since all we need to do is | ||
356 | * compare mm to q->mm below. | ||
357 | * | ||
358 | * Furthermore, even if mm contains a non-NULL value, p->mm may | ||
359 | * change to NULL at any time since we do not hold task_lock(p). | ||
360 | * However, this is of no concern to us. | ||
361 | */ | ||
362 | |||
363 | if (mm == NULL) | ||
355 | return 1; | 364 | return 1; |
356 | } | 365 | |
357 | task_unlock(p); | 366 | /* |
367 | * Don't kill the process if any threads are set to OOM_DISABLE | ||
368 | */ | ||
369 | do_each_thread(g, q) { | ||
370 | if (q->mm == mm && q->oomkilladj == OOM_DISABLE) | ||
371 | return 1; | ||
372 | } while_each_thread(g, q); | ||
373 | |||
358 | __oom_kill_task(p, 1); | 374 | __oom_kill_task(p, 1); |
359 | 375 | ||
360 | /* | 376 | /* |
@@ -377,11 +393,10 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
377 | struct task_struct *c; | 393 | struct task_struct *c; |
378 | 394 | ||
379 | if (printk_ratelimit()) { | 395 | if (printk_ratelimit()) { |
380 | task_lock(current); | ||
381 | printk(KERN_WARNING "%s invoked oom-killer: " | 396 | printk(KERN_WARNING "%s invoked oom-killer: " |
382 | "gfp_mask=0x%x, order=%d, oom_adj=%d\n", | 397 | "gfp_mask=0x%x, order=%d, oomkilladj=%d\n", |
383 | current->comm, gfp_mask, order, | 398 | current->comm, gfp_mask, order, current->oomkilladj); |
384 | current->mm ? current->mm->oom_adj : OOM_DISABLE); | 399 | task_lock(current); |
385 | cpuset_print_task_mems_allowed(current); | 400 | cpuset_print_task_mems_allowed(current); |
386 | task_unlock(current); | 401 | task_unlock(current); |
387 | dump_stack(); | 402 | dump_stack(); |
@@ -394,9 +409,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
394 | /* | 409 | /* |
395 | * If the task is already exiting, don't alarm the sysadmin or kill | 410 | * If the task is already exiting, don't alarm the sysadmin or kill |
396 | * its children or threads, just set TIF_MEMDIE so it can die quickly | 411 | * its children or threads, just set TIF_MEMDIE so it can die quickly |
397 | * if its mm is still attached. | ||
398 | */ | 412 | */ |
399 | if (p->mm && (p->flags & PF_EXITING)) { | 413 | if (p->flags & PF_EXITING) { |
400 | __oom_kill_task(p, 0); | 414 | __oom_kill_task(p, 0); |
401 | return 0; | 415 | return 0; |
402 | } | 416 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d052abbe3063..a0de15f46987 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -817,13 +817,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) | |||
817 | * agressive about taking ownership of free pages | 817 | * agressive about taking ownership of free pages |
818 | */ | 818 | */ |
819 | if (unlikely(current_order >= (pageblock_order >> 1)) || | 819 | if (unlikely(current_order >= (pageblock_order >> 1)) || |
820 | start_migratetype == MIGRATE_RECLAIMABLE) { | 820 | start_migratetype == MIGRATE_RECLAIMABLE || |
821 | page_group_by_mobility_disabled) { | ||
821 | unsigned long pages; | 822 | unsigned long pages; |
822 | pages = move_freepages_block(zone, page, | 823 | pages = move_freepages_block(zone, page, |
823 | start_migratetype); | 824 | start_migratetype); |
824 | 825 | ||
825 | /* Claim the whole block if over half of it is free */ | 826 | /* Claim the whole block if over half of it is free */ |
826 | if (pages >= (1 << (pageblock_order-1))) | 827 | if (pages >= (1 << (pageblock_order-1)) || |
828 | page_group_by_mobility_disabled) | ||
827 | set_pageblock_migratetype(page, | 829 | set_pageblock_migratetype(page, |
828 | start_migratetype); | 830 | start_migratetype); |
829 | 831 | ||
@@ -2544,7 +2546,6 @@ static void build_zonelists(pg_data_t *pgdat) | |||
2544 | prev_node = local_node; | 2546 | prev_node = local_node; |
2545 | nodes_clear(used_mask); | 2547 | nodes_clear(used_mask); |
2546 | 2548 | ||
2547 | memset(node_load, 0, sizeof(node_load)); | ||
2548 | memset(node_order, 0, sizeof(node_order)); | 2549 | memset(node_order, 0, sizeof(node_order)); |
2549 | j = 0; | 2550 | j = 0; |
2550 | 2551 | ||
@@ -2653,6 +2654,9 @@ static int __build_all_zonelists(void *dummy) | |||
2653 | { | 2654 | { |
2654 | int nid; | 2655 | int nid; |
2655 | 2656 | ||
2657 | #ifdef CONFIG_NUMA | ||
2658 | memset(node_load, 0, sizeof(node_load)); | ||
2659 | #endif | ||
2656 | for_each_online_node(nid) { | 2660 | for_each_online_node(nid) { |
2657 | pg_data_t *pgdat = NODE_DATA(nid); | 2661 | pg_data_t *pgdat = NODE_DATA(nid); |
2658 | 2662 | ||
diff --git a/mm/percpu.c b/mm/percpu.c index b70f2acd8853..3311c8919f37 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -8,12 +8,12 @@ | |||
8 | * | 8 | * |
9 | * This is percpu allocator which can handle both static and dynamic | 9 | * This is percpu allocator which can handle both static and dynamic |
10 | * areas. Percpu areas are allocated in chunks in vmalloc area. Each | 10 | * areas. Percpu areas are allocated in chunks in vmalloc area. Each |
11 | * chunk is consisted of num_possible_cpus() units and the first chunk | 11 | * chunk is consisted of nr_cpu_ids units and the first chunk is used |
12 | * is used for static percpu variables in the kernel image (special | 12 | * for static percpu variables in the kernel image (special boot time |
13 | * boot time alloc/init handling necessary as these areas need to be | 13 | * alloc/init handling necessary as these areas need to be brought up |
14 | * brought up before allocation services are running). Unit grows as | 14 | * before allocation services are running). Unit grows as necessary |
15 | * necessary and all units grow or shrink in unison. When a chunk is | 15 | * and all units grow or shrink in unison. When a chunk is filled up, |
16 | * filled up, another chunk is allocated. ie. in vmalloc area | 16 | * another chunk is allocated. ie. in vmalloc area |
17 | * | 17 | * |
18 | * c0 c1 c2 | 18 | * c0 c1 c2 |
19 | * ------------------- ------------------- ------------ | 19 | * ------------------- ------------------- ------------ |
@@ -197,7 +197,12 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, | |||
197 | static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, | 197 | static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, |
198 | int page_idx) | 198 | int page_idx) |
199 | { | 199 | { |
200 | return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; | 200 | /* |
201 | * Any possible cpu id can be used here, so there's no need to | ||
202 | * worry about preemption or cpu hotplug. | ||
203 | */ | ||
204 | return *pcpu_chunk_pagep(chunk, raw_smp_processor_id(), | ||
205 | page_idx) != NULL; | ||
201 | } | 206 | } |
202 | 207 | ||
203 | /* set the pointer to a chunk in a page struct */ | 208 | /* set the pointer to a chunk in a page struct */ |
@@ -297,6 +302,14 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |||
297 | return pcpu_first_chunk; | 302 | return pcpu_first_chunk; |
298 | } | 303 | } |
299 | 304 | ||
305 | /* | ||
306 | * The address is relative to unit0 which might be unused and | ||
307 | * thus unmapped. Offset the address to the unit space of the | ||
308 | * current processor before looking it up in the vmalloc | ||
309 | * space. Note that any possible cpu id can be used here, so | ||
310 | * there's no need to worry about preemption or cpu hotplug. | ||
311 | */ | ||
312 | addr += raw_smp_processor_id() * pcpu_unit_size; | ||
300 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); | 313 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); |
301 | } | 314 | } |
302 | 315 | ||
@@ -558,7 +571,7 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) | |||
558 | static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, | 571 | static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, |
559 | bool flush_tlb) | 572 | bool flush_tlb) |
560 | { | 573 | { |
561 | unsigned int last = num_possible_cpus() - 1; | 574 | unsigned int last = nr_cpu_ids - 1; |
562 | unsigned int cpu; | 575 | unsigned int cpu; |
563 | 576 | ||
564 | /* unmap must not be done on immutable chunk */ | 577 | /* unmap must not be done on immutable chunk */ |
@@ -643,7 +656,7 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size, | |||
643 | */ | 656 | */ |
644 | static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end) | 657 | static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end) |
645 | { | 658 | { |
646 | unsigned int last = num_possible_cpus() - 1; | 659 | unsigned int last = nr_cpu_ids - 1; |
647 | unsigned int cpu; | 660 | unsigned int cpu; |
648 | int err; | 661 | int err; |
649 | 662 | ||
@@ -749,7 +762,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void) | |||
749 | chunk->map[chunk->map_used++] = pcpu_unit_size; | 762 | chunk->map[chunk->map_used++] = pcpu_unit_size; |
750 | chunk->page = chunk->page_ar; | 763 | chunk->page = chunk->page_ar; |
751 | 764 | ||
752 | chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL); | 765 | chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC); |
753 | if (!chunk->vm) { | 766 | if (!chunk->vm) { |
754 | free_pcpu_chunk(chunk); | 767 | free_pcpu_chunk(chunk); |
755 | return NULL; | 768 | return NULL; |
@@ -1067,9 +1080,9 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | |||
1067 | PFN_UP(size_sum)); | 1080 | PFN_UP(size_sum)); |
1068 | 1081 | ||
1069 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; | 1082 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
1070 | pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; | 1083 | pcpu_chunk_size = nr_cpu_ids * pcpu_unit_size; |
1071 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) | 1084 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) |
1072 | + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *); | 1085 | + nr_cpu_ids * pcpu_unit_pages * sizeof(struct page *); |
1073 | 1086 | ||
1074 | if (dyn_size < 0) | 1087 | if (dyn_size < 0) |
1075 | dyn_size = pcpu_unit_size - static_size - reserved_size; | 1088 | dyn_size = pcpu_unit_size - static_size - reserved_size; |
@@ -1248,7 +1261,7 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |||
1248 | } else | 1261 | } else |
1249 | pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); | 1262 | pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); |
1250 | 1263 | ||
1251 | chunk_size = pcpue_unit_size * num_possible_cpus(); | 1264 | chunk_size = pcpue_unit_size * nr_cpu_ids; |
1252 | 1265 | ||
1253 | pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, | 1266 | pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, |
1254 | __pa(MAX_DMA_ADDRESS)); | 1267 | __pa(MAX_DMA_ADDRESS)); |
@@ -1259,12 +1272,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |||
1259 | } | 1272 | } |
1260 | 1273 | ||
1261 | /* return the leftover and copy */ | 1274 | /* return the leftover and copy */ |
1262 | for_each_possible_cpu(cpu) { | 1275 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { |
1263 | void *ptr = pcpue_ptr + cpu * pcpue_unit_size; | 1276 | void *ptr = pcpue_ptr + cpu * pcpue_unit_size; |
1264 | 1277 | ||
1265 | free_bootmem(__pa(ptr + pcpue_size), | 1278 | if (cpu_possible(cpu)) { |
1266 | pcpue_unit_size - pcpue_size); | 1279 | free_bootmem(__pa(ptr + pcpue_size), |
1267 | memcpy(ptr, __per_cpu_load, static_size); | 1280 | pcpue_unit_size - pcpue_size); |
1281 | memcpy(ptr, __per_cpu_load, static_size); | ||
1282 | } else | ||
1283 | free_bootmem(__pa(ptr), pcpue_unit_size); | ||
1268 | } | 1284 | } |
1269 | 1285 | ||
1270 | /* we're ready, commit */ | 1286 | /* we're ready, commit */ |
@@ -358,6 +358,7 @@ static int page_referenced_one(struct page *page, | |||
358 | */ | 358 | */ |
359 | if (vma->vm_flags & VM_LOCKED) { | 359 | if (vma->vm_flags & VM_LOCKED) { |
360 | *mapcount = 1; /* break early from loop */ | 360 | *mapcount = 1; /* break early from loop */ |
361 | *vm_flags |= VM_LOCKED; | ||
361 | goto out_unmap; | 362 | goto out_unmap; |
362 | } | 363 | } |
363 | 364 | ||
@@ -2594,8 +2594,6 @@ static inline int kmem_cache_close(struct kmem_cache *s) | |||
2594 | */ | 2594 | */ |
2595 | void kmem_cache_destroy(struct kmem_cache *s) | 2595 | void kmem_cache_destroy(struct kmem_cache *s) |
2596 | { | 2596 | { |
2597 | if (s->flags & SLAB_DESTROY_BY_RCU) | ||
2598 | rcu_barrier(); | ||
2599 | down_write(&slub_lock); | 2597 | down_write(&slub_lock); |
2600 | s->refcount--; | 2598 | s->refcount--; |
2601 | if (!s->refcount) { | 2599 | if (!s->refcount) { |
@@ -2606,6 +2604,8 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
2606 | "still has objects.\n", s->name, __func__); | 2604 | "still has objects.\n", s->name, __func__); |
2607 | dump_stack(); | 2605 | dump_stack(); |
2608 | } | 2606 | } |
2607 | if (s->flags & SLAB_DESTROY_BY_RCU) | ||
2608 | rcu_barrier(); | ||
2609 | sysfs_slab_remove(s); | 2609 | sysfs_slab_remove(s); |
2610 | } else | 2610 | } else |
2611 | up_write(&slub_lock); | 2611 | up_write(&slub_lock); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index dea7abd31098..94e86dd6954c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -630,9 +630,14 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
630 | 630 | ||
631 | referenced = page_referenced(page, 1, | 631 | referenced = page_referenced(page, 1, |
632 | sc->mem_cgroup, &vm_flags); | 632 | sc->mem_cgroup, &vm_flags); |
633 | /* In active use or really unfreeable? Activate it. */ | 633 | /* |
634 | * In active use or really unfreeable? Activate it. | ||
635 | * If page which have PG_mlocked lost isoltation race, | ||
636 | * try_to_unmap moves it to unevictable list | ||
637 | */ | ||
634 | if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && | 638 | if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && |
635 | referenced && page_mapping_inuse(page)) | 639 | referenced && page_mapping_inuse(page) |
640 | && !(vm_flags & VM_LOCKED)) | ||
636 | goto activate_locked; | 641 | goto activate_locked; |
637 | 642 | ||
638 | /* | 643 | /* |