diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-04-02 13:37:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-04-02 13:38:10 -0400 |
commit | ec5e61aabeac58670691bd0613388d16697d0d81 (patch) | |
tree | 59838509358f27334874b90756505785cde29b02 /mm | |
parent | 75ec5a245c7763c397f31ec8964d0a46c54a7386 (diff) | |
parent | 8bb39f9aa068262732fe44b965d7a6eb5a5a7d67 (diff) |
Merge branch 'perf/urgent' into perf/core
Conflicts:
arch/x86/kernel/cpu/perf_event.c
Merge reason: Resolve the conflict, pick up fixes
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bootmem.c | 13 | ||||
-rw-r--r-- | mm/ksm.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 50 | ||||
-rw-r--r-- | mm/memory.c | 1 | ||||
-rw-r--r-- | mm/mempolicy.c | 50 | ||||
-rw-r--r-- | mm/mmu_context.c | 1 | ||||
-rw-r--r-- | mm/nommu.c | 13 |
7 files changed, 67 insertions, 63 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index d7c791ef0036..9b134460b016 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -180,19 +180,12 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) | |||
180 | end_aligned = end & ~(BITS_PER_LONG - 1); | 180 | end_aligned = end & ~(BITS_PER_LONG - 1); |
181 | 181 | ||
182 | if (end_aligned <= start_aligned) { | 182 | if (end_aligned <= start_aligned) { |
183 | #if 1 | ||
184 | printk(KERN_DEBUG " %lx - %lx\n", start, end); | ||
185 | #endif | ||
186 | for (i = start; i < end; i++) | 183 | for (i = start; i < end; i++) |
187 | __free_pages_bootmem(pfn_to_page(i), 0); | 184 | __free_pages_bootmem(pfn_to_page(i), 0); |
188 | 185 | ||
189 | return; | 186 | return; |
190 | } | 187 | } |
191 | 188 | ||
192 | #if 1 | ||
193 | printk(KERN_DEBUG " %lx %lx - %lx %lx\n", | ||
194 | start, start_aligned, end_aligned, end); | ||
195 | #endif | ||
196 | for (i = start; i < start_aligned; i++) | 189 | for (i = start; i < start_aligned; i++) |
197 | __free_pages_bootmem(pfn_to_page(i), 0); | 190 | __free_pages_bootmem(pfn_to_page(i), 0); |
198 | 191 | ||
@@ -428,9 +421,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
428 | { | 421 | { |
429 | #ifdef CONFIG_NO_BOOTMEM | 422 | #ifdef CONFIG_NO_BOOTMEM |
430 | free_early(physaddr, physaddr + size); | 423 | free_early(physaddr, physaddr + size); |
431 | #if 0 | ||
432 | printk(KERN_DEBUG "free %lx %lx\n", physaddr, size); | ||
433 | #endif | ||
434 | #else | 424 | #else |
435 | unsigned long start, end; | 425 | unsigned long start, end; |
436 | 426 | ||
@@ -456,9 +446,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size) | |||
456 | { | 446 | { |
457 | #ifdef CONFIG_NO_BOOTMEM | 447 | #ifdef CONFIG_NO_BOOTMEM |
458 | free_early(addr, addr + size); | 448 | free_early(addr, addr + size); |
459 | #if 0 | ||
460 | printk(KERN_DEBUG "free %lx %lx\n", addr, size); | ||
461 | #endif | ||
462 | #else | 449 | #else |
463 | unsigned long start, end; | 450 | unsigned long start, end; |
464 | 451 | ||
@@ -751,7 +751,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
751 | * page | 751 | * page |
752 | */ | 752 | */ |
753 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { | 753 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
754 | set_pte_at_notify(mm, addr, ptep, entry); | 754 | set_pte_at(mm, addr, ptep, entry); |
755 | goto out_unlock; | 755 | goto out_unlock; |
756 | } | 756 | } |
757 | entry = pte_wrprotect(entry); | 757 | entry = pte_wrprotect(entry); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7973b5221fb8..9ed760dc7448 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3691,8 +3691,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void) | |||
3691 | else | 3691 | else |
3692 | mem = vmalloc(size); | 3692 | mem = vmalloc(size); |
3693 | 3693 | ||
3694 | if (mem) | 3694 | if (!mem) |
3695 | memset(mem, 0, size); | 3695 | return NULL; |
3696 | |||
3697 | memset(mem, 0, size); | ||
3696 | mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); | 3698 | mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); |
3697 | if (!mem->stat) { | 3699 | if (!mem->stat) { |
3698 | if (size < PAGE_SIZE) | 3700 | if (size < PAGE_SIZE) |
@@ -3946,28 +3948,6 @@ one_by_one: | |||
3946 | } | 3948 | } |
3947 | return ret; | 3949 | return ret; |
3948 | } | 3950 | } |
3949 | #else /* !CONFIG_MMU */ | ||
3950 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | ||
3951 | struct cgroup *cgroup, | ||
3952 | struct task_struct *p, | ||
3953 | bool threadgroup) | ||
3954 | { | ||
3955 | return 0; | ||
3956 | } | ||
3957 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | ||
3958 | struct cgroup *cgroup, | ||
3959 | struct task_struct *p, | ||
3960 | bool threadgroup) | ||
3961 | { | ||
3962 | } | ||
3963 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | ||
3964 | struct cgroup *cont, | ||
3965 | struct cgroup *old_cont, | ||
3966 | struct task_struct *p, | ||
3967 | bool threadgroup) | ||
3968 | { | ||
3969 | } | ||
3970 | #endif | ||
3971 | 3951 | ||
3972 | /** | 3952 | /** |
3973 | * is_target_pte_for_mc - check a pte whether it is valid for move charge | 3953 | * is_target_pte_for_mc - check a pte whether it is valid for move charge |
@@ -4330,6 +4310,28 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
4330 | } | 4310 | } |
4331 | mem_cgroup_clear_mc(); | 4311 | mem_cgroup_clear_mc(); |
4332 | } | 4312 | } |
4313 | #else /* !CONFIG_MMU */ | ||
4314 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | ||
4315 | struct cgroup *cgroup, | ||
4316 | struct task_struct *p, | ||
4317 | bool threadgroup) | ||
4318 | { | ||
4319 | return 0; | ||
4320 | } | ||
4321 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | ||
4322 | struct cgroup *cgroup, | ||
4323 | struct task_struct *p, | ||
4324 | bool threadgroup) | ||
4325 | { | ||
4326 | } | ||
4327 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | ||
4328 | struct cgroup *cont, | ||
4329 | struct cgroup *old_cont, | ||
4330 | struct task_struct *p, | ||
4331 | bool threadgroup) | ||
4332 | { | ||
4333 | } | ||
4334 | #endif | ||
4333 | 4335 | ||
4334 | struct cgroup_subsys mem_cgroup_subsys = { | 4336 | struct cgroup_subsys mem_cgroup_subsys = { |
4335 | .name = "memory", | 4337 | .name = "memory", |
diff --git a/mm/memory.c b/mm/memory.c index 5b7f2002e54b..bc9ba5a1f5b9 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -130,6 +130,7 @@ void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) | |||
130 | 130 | ||
131 | for (i = 0; i < NR_MM_COUNTERS; i++) { | 131 | for (i = 0; i < NR_MM_COUNTERS; i++) { |
132 | if (task->rss_stat.count[i]) { | 132 | if (task->rss_stat.count[i]) { |
133 | BUG_ON(!mm); | ||
133 | add_mm_counter(mm, i, task->rss_stat.count[i]); | 134 | add_mm_counter(mm, i, task->rss_stat.count[i]); |
134 | task->rss_stat.count[i] = 0; | 135 | task->rss_stat.count[i] = 0; |
135 | } | 136 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 643f66e10187..8034abd3a135 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -806,9 +806,13 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, | |||
806 | 806 | ||
807 | err = 0; | 807 | err = 0; |
808 | if (nmask) { | 808 | if (nmask) { |
809 | task_lock(current); | 809 | if (mpol_store_user_nodemask(pol)) { |
810 | get_policy_nodemask(pol, nmask); | 810 | *nmask = pol->w.user_nodemask; |
811 | task_unlock(current); | 811 | } else { |
812 | task_lock(current); | ||
813 | get_policy_nodemask(pol, nmask); | ||
814 | task_unlock(current); | ||
815 | } | ||
812 | } | 816 | } |
813 | 817 | ||
814 | out: | 818 | out: |
@@ -2195,8 +2199,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2195 | char *rest = nodelist; | 2199 | char *rest = nodelist; |
2196 | while (isdigit(*rest)) | 2200 | while (isdigit(*rest)) |
2197 | rest++; | 2201 | rest++; |
2198 | if (!*rest) | 2202 | if (*rest) |
2199 | err = 0; | 2203 | goto out; |
2200 | } | 2204 | } |
2201 | break; | 2205 | break; |
2202 | case MPOL_INTERLEAVE: | 2206 | case MPOL_INTERLEAVE: |
@@ -2205,7 +2209,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2205 | */ | 2209 | */ |
2206 | if (!nodelist) | 2210 | if (!nodelist) |
2207 | nodes = node_states[N_HIGH_MEMORY]; | 2211 | nodes = node_states[N_HIGH_MEMORY]; |
2208 | err = 0; | ||
2209 | break; | 2212 | break; |
2210 | case MPOL_LOCAL: | 2213 | case MPOL_LOCAL: |
2211 | /* | 2214 | /* |
@@ -2215,11 +2218,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2215 | goto out; | 2218 | goto out; |
2216 | mode = MPOL_PREFERRED; | 2219 | mode = MPOL_PREFERRED; |
2217 | break; | 2220 | break; |
2218 | 2221 | case MPOL_DEFAULT: | |
2219 | /* | 2222 | /* |
2220 | * case MPOL_BIND: mpol_new() enforces non-empty nodemask. | 2223 | * Insist on a empty nodelist |
2221 | * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags. | 2224 | */ |
2222 | */ | 2225 | if (!nodelist) |
2226 | err = 0; | ||
2227 | goto out; | ||
2228 | case MPOL_BIND: | ||
2229 | /* | ||
2230 | * Insist on a nodelist | ||
2231 | */ | ||
2232 | if (!nodelist) | ||
2233 | goto out; | ||
2223 | } | 2234 | } |
2224 | 2235 | ||
2225 | mode_flags = 0; | 2236 | mode_flags = 0; |
@@ -2233,13 +2244,14 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2233 | else if (!strcmp(flags, "relative")) | 2244 | else if (!strcmp(flags, "relative")) |
2234 | mode_flags |= MPOL_F_RELATIVE_NODES; | 2245 | mode_flags |= MPOL_F_RELATIVE_NODES; |
2235 | else | 2246 | else |
2236 | err = 1; | 2247 | goto out; |
2237 | } | 2248 | } |
2238 | 2249 | ||
2239 | new = mpol_new(mode, mode_flags, &nodes); | 2250 | new = mpol_new(mode, mode_flags, &nodes); |
2240 | if (IS_ERR(new)) | 2251 | if (IS_ERR(new)) |
2241 | err = 1; | 2252 | goto out; |
2242 | else { | 2253 | |
2254 | { | ||
2243 | int ret; | 2255 | int ret; |
2244 | NODEMASK_SCRATCH(scratch); | 2256 | NODEMASK_SCRATCH(scratch); |
2245 | if (scratch) { | 2257 | if (scratch) { |
@@ -2250,13 +2262,15 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2250 | ret = -ENOMEM; | 2262 | ret = -ENOMEM; |
2251 | NODEMASK_SCRATCH_FREE(scratch); | 2263 | NODEMASK_SCRATCH_FREE(scratch); |
2252 | if (ret) { | 2264 | if (ret) { |
2253 | err = 1; | ||
2254 | mpol_put(new); | 2265 | mpol_put(new); |
2255 | } else if (no_context) { | 2266 | goto out; |
2256 | /* save for contextualization */ | ||
2257 | new->w.user_nodemask = nodes; | ||
2258 | } | 2267 | } |
2259 | } | 2268 | } |
2269 | err = 0; | ||
2270 | if (no_context) { | ||
2271 | /* save for contextualization */ | ||
2272 | new->w.user_nodemask = nodes; | ||
2273 | } | ||
2260 | 2274 | ||
2261 | out: | 2275 | out: |
2262 | /* Restore string for error message */ | 2276 | /* Restore string for error message */ |
diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 0777654147c9..9e82e937000e 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c | |||
@@ -53,6 +53,7 @@ void unuse_mm(struct mm_struct *mm) | |||
53 | struct task_struct *tsk = current; | 53 | struct task_struct *tsk = current; |
54 | 54 | ||
55 | task_lock(tsk); | 55 | task_lock(tsk); |
56 | sync_mm_rss(tsk, mm); | ||
56 | tsk->mm = NULL; | 57 | tsk->mm = NULL; |
57 | /* active_mm is still 'mm' */ | 58 | /* active_mm is still 'mm' */ |
58 | enter_lazy_tlb(mm, tsk); | 59 | enter_lazy_tlb(mm, tsk); |
diff --git a/mm/nommu.c b/mm/nommu.c index 605ace8982a8..63fa17d121f0 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -146,7 +146,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
146 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 146 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); |
147 | 147 | ||
148 | for (i = 0; i < nr_pages; i++) { | 148 | for (i = 0; i < nr_pages; i++) { |
149 | vma = find_extend_vma(mm, start); | 149 | vma = find_vma(mm, start); |
150 | if (!vma) | 150 | if (!vma) |
151 | goto finish_or_fault; | 151 | goto finish_or_fault; |
152 | 152 | ||
@@ -162,7 +162,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
162 | } | 162 | } |
163 | if (vmas) | 163 | if (vmas) |
164 | vmas[i] = vma; | 164 | vmas[i] = vma; |
165 | start += PAGE_SIZE; | 165 | start = (start + PAGE_SIZE) & PAGE_MASK; |
166 | } | 166 | } |
167 | 167 | ||
168 | return i; | 168 | return i; |
@@ -764,7 +764,7 @@ EXPORT_SYMBOL(find_vma); | |||
764 | */ | 764 | */ |
765 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) | 765 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) |
766 | { | 766 | { |
767 | return find_vma(mm, addr & PAGE_MASK); | 767 | return find_vma(mm, addr); |
768 | } | 768 | } |
769 | 769 | ||
770 | /* | 770 | /* |
@@ -1040,10 +1040,9 @@ static int do_mmap_shared_file(struct vm_area_struct *vma) | |||
1040 | if (ret != -ENOSYS) | 1040 | if (ret != -ENOSYS) |
1041 | return ret; | 1041 | return ret; |
1042 | 1042 | ||
1043 | /* getting an ENOSYS error indicates that direct mmap isn't | 1043 | /* getting -ENOSYS indicates that direct mmap isn't possible (as |
1044 | * possible (as opposed to tried but failed) so we'll fall | 1044 | * opposed to tried but failed) so we can only give a suitable error as |
1045 | * through to making a private copy of the data and mapping | 1045 | * it's not possible to make a private copy if MAP_SHARED was given */ |
1046 | * that if we can */ | ||
1047 | return -ENODEV; | 1046 | return -ENODEV; |
1048 | } | 1047 | } |
1049 | 1048 | ||