diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 26 | ||||
-rw-r--r-- | mm/oom_kill.c | 4 | ||||
-rw-r--r-- | mm/sparse.c | 10 | ||||
-rw-r--r-- | mm/vmstat.c | 1 |
4 files changed, 34 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9b648bd63451..2e0bfc93484b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -533,6 +533,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
533 | unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; | 533 | unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; |
534 | struct mem_cgroup_per_zone *mz; | 534 | struct mem_cgroup_per_zone *mz; |
535 | 535 | ||
536 | if (mem_cgroup_subsys.disabled) | ||
537 | return 0; | ||
538 | |||
536 | /* | 539 | /* |
537 | * Should page_cgroup's go to their own slab? | 540 | * Should page_cgroup's go to their own slab? |
538 | * One could optimize the performance of the charging routine | 541 | * One could optimize the performance of the charging routine |
@@ -665,6 +668,9 @@ void mem_cgroup_uncharge_page(struct page *page) | |||
665 | struct mem_cgroup_per_zone *mz; | 668 | struct mem_cgroup_per_zone *mz; |
666 | unsigned long flags; | 669 | unsigned long flags; |
667 | 670 | ||
671 | if (mem_cgroup_subsys.disabled) | ||
672 | return; | ||
673 | |||
668 | /* | 674 | /* |
669 | * Check if our page_cgroup is valid | 675 | * Check if our page_cgroup is valid |
670 | */ | 676 | */ |
@@ -705,6 +711,9 @@ int mem_cgroup_prepare_migration(struct page *page) | |||
705 | { | 711 | { |
706 | struct page_cgroup *pc; | 712 | struct page_cgroup *pc; |
707 | 713 | ||
714 | if (mem_cgroup_subsys.disabled) | ||
715 | return 0; | ||
716 | |||
708 | lock_page_cgroup(page); | 717 | lock_page_cgroup(page); |
709 | pc = page_get_page_cgroup(page); | 718 | pc = page_get_page_cgroup(page); |
710 | if (pc) | 719 | if (pc) |
@@ -803,6 +812,9 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem) | |||
803 | int ret = -EBUSY; | 812 | int ret = -EBUSY; |
804 | int node, zid; | 813 | int node, zid; |
805 | 814 | ||
815 | if (mem_cgroup_subsys.disabled) | ||
816 | return 0; | ||
817 | |||
806 | css_get(&mem->css); | 818 | css_get(&mem->css); |
807 | /* | 819 | /* |
808 | * page reclaim code (kswapd etc..) will move pages between | 820 | * page reclaim code (kswapd etc..) will move pages between |
@@ -966,7 +978,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | |||
966 | { | 978 | { |
967 | struct mem_cgroup_per_node *pn; | 979 | struct mem_cgroup_per_node *pn; |
968 | struct mem_cgroup_per_zone *mz; | 980 | struct mem_cgroup_per_zone *mz; |
969 | int zone; | 981 | int zone, tmp = node; |
970 | /* | 982 | /* |
971 | * This routine is called against possible nodes. | 983 | * This routine is called against possible nodes. |
972 | * But it's BUG to call kmalloc() against offline node. | 984 | * But it's BUG to call kmalloc() against offline node. |
@@ -975,10 +987,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | |||
975 | * never be onlined. It's better to use memory hotplug callback | 987 | * never be onlined. It's better to use memory hotplug callback |
976 | * function. | 988 | * function. |
977 | */ | 989 | */ |
978 | if (node_state(node, N_HIGH_MEMORY)) | 990 | if (!node_state(node, N_NORMAL_MEMORY)) |
979 | pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node); | 991 | tmp = -1; |
980 | else | 992 | pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp); |
981 | pn = kmalloc(sizeof(*pn), GFP_KERNEL); | ||
982 | if (!pn) | 993 | if (!pn) |
983 | return 1; | 994 | return 1; |
984 | 995 | ||
@@ -1053,6 +1064,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss, | |||
1053 | static int mem_cgroup_populate(struct cgroup_subsys *ss, | 1064 | static int mem_cgroup_populate(struct cgroup_subsys *ss, |
1054 | struct cgroup *cont) | 1065 | struct cgroup *cont) |
1055 | { | 1066 | { |
1067 | if (mem_cgroup_subsys.disabled) | ||
1068 | return 0; | ||
1056 | return cgroup_add_files(cont, ss, mem_cgroup_files, | 1069 | return cgroup_add_files(cont, ss, mem_cgroup_files, |
1057 | ARRAY_SIZE(mem_cgroup_files)); | 1070 | ARRAY_SIZE(mem_cgroup_files)); |
1058 | } | 1071 | } |
@@ -1065,6 +1078,9 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
1065 | struct mm_struct *mm; | 1078 | struct mm_struct *mm; |
1066 | struct mem_cgroup *mem, *old_mem; | 1079 | struct mem_cgroup *mem, *old_mem; |
1067 | 1080 | ||
1081 | if (mem_cgroup_subsys.disabled) | ||
1082 | return; | ||
1083 | |||
1068 | mm = get_task_mm(p); | 1084 | mm = get_task_mm(p); |
1069 | if (mm == NULL) | 1085 | if (mm == NULL) |
1070 | return; | 1086 | return; |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index f255eda693b0..beb592fe9389 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -423,7 +423,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) | |||
423 | struct task_struct *p; | 423 | struct task_struct *p; |
424 | 424 | ||
425 | cgroup_lock(); | 425 | cgroup_lock(); |
426 | rcu_read_lock(); | 426 | read_lock(&tasklist_lock); |
427 | retry: | 427 | retry: |
428 | p = select_bad_process(&points, mem); | 428 | p = select_bad_process(&points, mem); |
429 | if (PTR_ERR(p) == -1UL) | 429 | if (PTR_ERR(p) == -1UL) |
@@ -436,7 +436,7 @@ retry: | |||
436 | "Memory cgroup out of memory")) | 436 | "Memory cgroup out of memory")) |
437 | goto retry; | 437 | goto retry; |
438 | out: | 438 | out: |
439 | rcu_read_unlock(); | 439 | read_unlock(&tasklist_lock); |
440 | cgroup_unlock(); | 440 | cgroup_unlock(); |
441 | } | 441 | } |
442 | #endif | 442 | #endif |
diff --git a/mm/sparse.c b/mm/sparse.c index f6a43c09c322..98d6b39c3472 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -149,8 +149,18 @@ static inline int sparse_early_nid(struct mem_section *section) | |||
149 | /* Record a memory area against a node. */ | 149 | /* Record a memory area against a node. */ |
150 | void __init memory_present(int nid, unsigned long start, unsigned long end) | 150 | void __init memory_present(int nid, unsigned long start, unsigned long end) |
151 | { | 151 | { |
152 | unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); | ||
152 | unsigned long pfn; | 153 | unsigned long pfn; |
153 | 154 | ||
155 | /* | ||
156 | * Sanity checks - do not allow an architecture to pass | ||
157 | * in larger pfns than the maximum scope of sparsemem: | ||
158 | */ | ||
159 | if (start >= max_arch_pfn) | ||
160 | return; | ||
161 | if (end >= max_arch_pfn) | ||
162 | end = max_arch_pfn; | ||
163 | |||
154 | start &= PAGE_SECTION_MASK; | 164 | start &= PAGE_SECTION_MASK; |
155 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { | 165 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
156 | unsigned long section = pfn_to_section_nr(pfn); | 166 | unsigned long section = pfn_to_section_nr(pfn); |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 422d960ffcd8..7c7286e9506d 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -388,6 +388,7 @@ static char * const migratetype_names[MIGRATE_TYPES] = { | |||
388 | "Reclaimable", | 388 | "Reclaimable", |
389 | "Movable", | 389 | "Movable", |
390 | "Reserve", | 390 | "Reserve", |
391 | "Isolate", | ||
391 | }; | 392 | }; |
392 | 393 | ||
393 | static void *frag_start(struct seq_file *m, loff_t *pos) | 394 | static void *frag_start(struct seq_file *m, loff_t *pos) |