diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/ksm.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 50 | ||||
-rw-r--r-- | mm/memory.c | 1 | ||||
-rw-r--r-- | mm/mempolicy.c | 50 | ||||
-rw-r--r-- | mm/mmu_context.c | 1 | ||||
-rw-r--r-- | mm/nommu.c | 13 | ||||
-rw-r--r-- | mm/page_cgroup.c | 20 |
7 files changed, 83 insertions, 54 deletions
@@ -751,7 +751,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
751 | * page | 751 | * page |
752 | */ | 752 | */ |
753 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { | 753 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
754 | set_pte_at_notify(mm, addr, ptep, entry); | 754 | set_pte_at(mm, addr, ptep, entry); |
755 | goto out_unlock; | 755 | goto out_unlock; |
756 | } | 756 | } |
757 | entry = pte_wrprotect(entry); | 757 | entry = pte_wrprotect(entry); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7973b5221fb8..9ed760dc7448 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3691,8 +3691,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void) | |||
3691 | else | 3691 | else |
3692 | mem = vmalloc(size); | 3692 | mem = vmalloc(size); |
3693 | 3693 | ||
3694 | if (mem) | 3694 | if (!mem) |
3695 | memset(mem, 0, size); | 3695 | return NULL; |
3696 | |||
3697 | memset(mem, 0, size); | ||
3696 | mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); | 3698 | mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); |
3697 | if (!mem->stat) { | 3699 | if (!mem->stat) { |
3698 | if (size < PAGE_SIZE) | 3700 | if (size < PAGE_SIZE) |
@@ -3946,28 +3948,6 @@ one_by_one: | |||
3946 | } | 3948 | } |
3947 | return ret; | 3949 | return ret; |
3948 | } | 3950 | } |
3949 | #else /* !CONFIG_MMU */ | ||
3950 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | ||
3951 | struct cgroup *cgroup, | ||
3952 | struct task_struct *p, | ||
3953 | bool threadgroup) | ||
3954 | { | ||
3955 | return 0; | ||
3956 | } | ||
3957 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | ||
3958 | struct cgroup *cgroup, | ||
3959 | struct task_struct *p, | ||
3960 | bool threadgroup) | ||
3961 | { | ||
3962 | } | ||
3963 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | ||
3964 | struct cgroup *cont, | ||
3965 | struct cgroup *old_cont, | ||
3966 | struct task_struct *p, | ||
3967 | bool threadgroup) | ||
3968 | { | ||
3969 | } | ||
3970 | #endif | ||
3971 | 3951 | ||
3972 | /** | 3952 | /** |
3973 | * is_target_pte_for_mc - check a pte whether it is valid for move charge | 3953 | * is_target_pte_for_mc - check a pte whether it is valid for move charge |
@@ -4330,6 +4310,28 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
4330 | } | 4310 | } |
4331 | mem_cgroup_clear_mc(); | 4311 | mem_cgroup_clear_mc(); |
4332 | } | 4312 | } |
4313 | #else /* !CONFIG_MMU */ | ||
4314 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | ||
4315 | struct cgroup *cgroup, | ||
4316 | struct task_struct *p, | ||
4317 | bool threadgroup) | ||
4318 | { | ||
4319 | return 0; | ||
4320 | } | ||
4321 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | ||
4322 | struct cgroup *cgroup, | ||
4323 | struct task_struct *p, | ||
4324 | bool threadgroup) | ||
4325 | { | ||
4326 | } | ||
4327 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | ||
4328 | struct cgroup *cont, | ||
4329 | struct cgroup *old_cont, | ||
4330 | struct task_struct *p, | ||
4331 | bool threadgroup) | ||
4332 | { | ||
4333 | } | ||
4334 | #endif | ||
4333 | 4335 | ||
4334 | struct cgroup_subsys mem_cgroup_subsys = { | 4336 | struct cgroup_subsys mem_cgroup_subsys = { |
4335 | .name = "memory", | 4337 | .name = "memory", |
diff --git a/mm/memory.c b/mm/memory.c index 5b7f2002e54b..bc9ba5a1f5b9 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -130,6 +130,7 @@ void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) | |||
130 | 130 | ||
131 | for (i = 0; i < NR_MM_COUNTERS; i++) { | 131 | for (i = 0; i < NR_MM_COUNTERS; i++) { |
132 | if (task->rss_stat.count[i]) { | 132 | if (task->rss_stat.count[i]) { |
133 | BUG_ON(!mm); | ||
133 | add_mm_counter(mm, i, task->rss_stat.count[i]); | 134 | add_mm_counter(mm, i, task->rss_stat.count[i]); |
134 | task->rss_stat.count[i] = 0; | 135 | task->rss_stat.count[i] = 0; |
135 | } | 136 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 643f66e10187..8034abd3a135 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -806,9 +806,13 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, | |||
806 | 806 | ||
807 | err = 0; | 807 | err = 0; |
808 | if (nmask) { | 808 | if (nmask) { |
809 | task_lock(current); | 809 | if (mpol_store_user_nodemask(pol)) { |
810 | get_policy_nodemask(pol, nmask); | 810 | *nmask = pol->w.user_nodemask; |
811 | task_unlock(current); | 811 | } else { |
812 | task_lock(current); | ||
813 | get_policy_nodemask(pol, nmask); | ||
814 | task_unlock(current); | ||
815 | } | ||
812 | } | 816 | } |
813 | 817 | ||
814 | out: | 818 | out: |
@@ -2195,8 +2199,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2195 | char *rest = nodelist; | 2199 | char *rest = nodelist; |
2196 | while (isdigit(*rest)) | 2200 | while (isdigit(*rest)) |
2197 | rest++; | 2201 | rest++; |
2198 | if (!*rest) | 2202 | if (*rest) |
2199 | err = 0; | 2203 | goto out; |
2200 | } | 2204 | } |
2201 | break; | 2205 | break; |
2202 | case MPOL_INTERLEAVE: | 2206 | case MPOL_INTERLEAVE: |
@@ -2205,7 +2209,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2205 | */ | 2209 | */ |
2206 | if (!nodelist) | 2210 | if (!nodelist) |
2207 | nodes = node_states[N_HIGH_MEMORY]; | 2211 | nodes = node_states[N_HIGH_MEMORY]; |
2208 | err = 0; | ||
2209 | break; | 2212 | break; |
2210 | case MPOL_LOCAL: | 2213 | case MPOL_LOCAL: |
2211 | /* | 2214 | /* |
@@ -2215,11 +2218,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2215 | goto out; | 2218 | goto out; |
2216 | mode = MPOL_PREFERRED; | 2219 | mode = MPOL_PREFERRED; |
2217 | break; | 2220 | break; |
2218 | 2221 | case MPOL_DEFAULT: | |
2219 | /* | 2222 | /* |
2220 | * case MPOL_BIND: mpol_new() enforces non-empty nodemask. | 2223 | * Insist on a empty nodelist |
2221 | * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags. | 2224 | */ |
2222 | */ | 2225 | if (!nodelist) |
2226 | err = 0; | ||
2227 | goto out; | ||
2228 | case MPOL_BIND: | ||
2229 | /* | ||
2230 | * Insist on a nodelist | ||
2231 | */ | ||
2232 | if (!nodelist) | ||
2233 | goto out; | ||
2223 | } | 2234 | } |
2224 | 2235 | ||
2225 | mode_flags = 0; | 2236 | mode_flags = 0; |
@@ -2233,13 +2244,14 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2233 | else if (!strcmp(flags, "relative")) | 2244 | else if (!strcmp(flags, "relative")) |
2234 | mode_flags |= MPOL_F_RELATIVE_NODES; | 2245 | mode_flags |= MPOL_F_RELATIVE_NODES; |
2235 | else | 2246 | else |
2236 | err = 1; | 2247 | goto out; |
2237 | } | 2248 | } |
2238 | 2249 | ||
2239 | new = mpol_new(mode, mode_flags, &nodes); | 2250 | new = mpol_new(mode, mode_flags, &nodes); |
2240 | if (IS_ERR(new)) | 2251 | if (IS_ERR(new)) |
2241 | err = 1; | 2252 | goto out; |
2242 | else { | 2253 | |
2254 | { | ||
2243 | int ret; | 2255 | int ret; |
2244 | NODEMASK_SCRATCH(scratch); | 2256 | NODEMASK_SCRATCH(scratch); |
2245 | if (scratch) { | 2257 | if (scratch) { |
@@ -2250,13 +2262,15 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2250 | ret = -ENOMEM; | 2262 | ret = -ENOMEM; |
2251 | NODEMASK_SCRATCH_FREE(scratch); | 2263 | NODEMASK_SCRATCH_FREE(scratch); |
2252 | if (ret) { | 2264 | if (ret) { |
2253 | err = 1; | ||
2254 | mpol_put(new); | 2265 | mpol_put(new); |
2255 | } else if (no_context) { | 2266 | goto out; |
2256 | /* save for contextualization */ | ||
2257 | new->w.user_nodemask = nodes; | ||
2258 | } | 2267 | } |
2259 | } | 2268 | } |
2269 | err = 0; | ||
2270 | if (no_context) { | ||
2271 | /* save for contextualization */ | ||
2272 | new->w.user_nodemask = nodes; | ||
2273 | } | ||
2260 | 2274 | ||
2261 | out: | 2275 | out: |
2262 | /* Restore string for error message */ | 2276 | /* Restore string for error message */ |
diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 0777654147c9..9e82e937000e 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c | |||
@@ -53,6 +53,7 @@ void unuse_mm(struct mm_struct *mm) | |||
53 | struct task_struct *tsk = current; | 53 | struct task_struct *tsk = current; |
54 | 54 | ||
55 | task_lock(tsk); | 55 | task_lock(tsk); |
56 | sync_mm_rss(tsk, mm); | ||
56 | tsk->mm = NULL; | 57 | tsk->mm = NULL; |
57 | /* active_mm is still 'mm' */ | 58 | /* active_mm is still 'mm' */ |
58 | enter_lazy_tlb(mm, tsk); | 59 | enter_lazy_tlb(mm, tsk); |
diff --git a/mm/nommu.c b/mm/nommu.c index 605ace8982a8..63fa17d121f0 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -146,7 +146,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
146 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 146 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); |
147 | 147 | ||
148 | for (i = 0; i < nr_pages; i++) { | 148 | for (i = 0; i < nr_pages; i++) { |
149 | vma = find_extend_vma(mm, start); | 149 | vma = find_vma(mm, start); |
150 | if (!vma) | 150 | if (!vma) |
151 | goto finish_or_fault; | 151 | goto finish_or_fault; |
152 | 152 | ||
@@ -162,7 +162,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
162 | } | 162 | } |
163 | if (vmas) | 163 | if (vmas) |
164 | vmas[i] = vma; | 164 | vmas[i] = vma; |
165 | start += PAGE_SIZE; | 165 | start = (start + PAGE_SIZE) & PAGE_MASK; |
166 | } | 166 | } |
167 | 167 | ||
168 | return i; | 168 | return i; |
@@ -764,7 +764,7 @@ EXPORT_SYMBOL(find_vma); | |||
764 | */ | 764 | */ |
765 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) | 765 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) |
766 | { | 766 | { |
767 | return find_vma(mm, addr & PAGE_MASK); | 767 | return find_vma(mm, addr); |
768 | } | 768 | } |
769 | 769 | ||
770 | /* | 770 | /* |
@@ -1040,10 +1040,9 @@ static int do_mmap_shared_file(struct vm_area_struct *vma) | |||
1040 | if (ret != -ENOSYS) | 1040 | if (ret != -ENOSYS) |
1041 | return ret; | 1041 | return ret; |
1042 | 1042 | ||
1043 | /* getting an ENOSYS error indicates that direct mmap isn't | 1043 | /* getting -ENOSYS indicates that direct mmap isn't possible (as |
1044 | * possible (as opposed to tried but failed) so we'll fall | 1044 | * opposed to tried but failed) so we can only give a suitable error as |
1045 | * through to making a private copy of the data and mapping | 1045 | * it's not possible to make a private copy if MAP_SHARED was given */ |
1046 | * that if we can */ | ||
1047 | return -ENODEV; | 1046 | return -ENODEV; |
1048 | } | 1047 | } |
1049 | 1048 | ||
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 3dd88539a0e6..6c0081441a32 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -284,6 +284,7 @@ static DEFINE_MUTEX(swap_cgroup_mutex); | |||
284 | struct swap_cgroup_ctrl { | 284 | struct swap_cgroup_ctrl { |
285 | struct page **map; | 285 | struct page **map; |
286 | unsigned long length; | 286 | unsigned long length; |
287 | spinlock_t lock; | ||
287 | }; | 288 | }; |
288 | 289 | ||
289 | struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; | 290 | struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; |
@@ -353,16 +354,22 @@ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, | |||
353 | struct swap_cgroup_ctrl *ctrl; | 354 | struct swap_cgroup_ctrl *ctrl; |
354 | struct page *mappage; | 355 | struct page *mappage; |
355 | struct swap_cgroup *sc; | 356 | struct swap_cgroup *sc; |
357 | unsigned long flags; | ||
358 | unsigned short retval; | ||
356 | 359 | ||
357 | ctrl = &swap_cgroup_ctrl[type]; | 360 | ctrl = &swap_cgroup_ctrl[type]; |
358 | 361 | ||
359 | mappage = ctrl->map[idx]; | 362 | mappage = ctrl->map[idx]; |
360 | sc = page_address(mappage); | 363 | sc = page_address(mappage); |
361 | sc += pos; | 364 | sc += pos; |
362 | if (cmpxchg(&sc->id, old, new) == old) | 365 | spin_lock_irqsave(&ctrl->lock, flags); |
363 | return old; | 366 | retval = sc->id; |
367 | if (retval == old) | ||
368 | sc->id = new; | ||
364 | else | 369 | else |
365 | return 0; | 370 | retval = 0; |
371 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
372 | return retval; | ||
366 | } | 373 | } |
367 | 374 | ||
368 | /** | 375 | /** |
@@ -383,13 +390,17 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) | |||
383 | struct page *mappage; | 390 | struct page *mappage; |
384 | struct swap_cgroup *sc; | 391 | struct swap_cgroup *sc; |
385 | unsigned short old; | 392 | unsigned short old; |
393 | unsigned long flags; | ||
386 | 394 | ||
387 | ctrl = &swap_cgroup_ctrl[type]; | 395 | ctrl = &swap_cgroup_ctrl[type]; |
388 | 396 | ||
389 | mappage = ctrl->map[idx]; | 397 | mappage = ctrl->map[idx]; |
390 | sc = page_address(mappage); | 398 | sc = page_address(mappage); |
391 | sc += pos; | 399 | sc += pos; |
392 | old = xchg(&sc->id, id); | 400 | spin_lock_irqsave(&ctrl->lock, flags); |
401 | old = sc->id; | ||
402 | sc->id = id; | ||
403 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
393 | 404 | ||
394 | return old; | 405 | return old; |
395 | } | 406 | } |
@@ -441,6 +452,7 @@ int swap_cgroup_swapon(int type, unsigned long max_pages) | |||
441 | mutex_lock(&swap_cgroup_mutex); | 452 | mutex_lock(&swap_cgroup_mutex); |
442 | ctrl->length = length; | 453 | ctrl->length = length; |
443 | ctrl->map = array; | 454 | ctrl->map = array; |
455 | spin_lock_init(&ctrl->lock); | ||
444 | if (swap_cgroup_prepare(type)) { | 456 | if (swap_cgroup_prepare(type)) { |
445 | /* memory shortage */ | 457 | /* memory shortage */ |
446 | ctrl->map = NULL; | 458 | ctrl->map = NULL; |