aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 20:08:13 -0400
committerTejun Heo <tj@kernel.org>2013-04-01 21:45:36 -0400
commit229641a6f1f09e27a1f12fba38980f33f4c92975 (patch)
tree234a6f8aea0910de3242af0bbe6d7494fcf81847 /mm
parentd55262c4d164759a8debe772da6c9b16059dec47 (diff)
parent07961ac7c0ee8b546658717034fe692fd12eefa9 (diff)
Merge tag 'v3.9-rc5' into wq/for-3.10
Writeback conversion to workqueue will be based on top of wq/for-3.10 branch to take advantage of custom attrs and NUMA support for unbound workqueues. Mainline currently contains two commits which result in non-trivial merge conflicts with wq/for-3.10 and because block/for-3.10/core is based on v3.9-rc3 which contains one of the conflicting commits, we need a pre-merge-window merge anyway. Let's pull v3.9-rc5 into wq/for-3.10 so that the block tree doesn't suffer from workqueue merge conflicts. The two conflicts and their resolutions: * e68035fb65 ("workqueue: convert to idr_alloc()") in mainline changes worker_pool_assign_id() to use idr_alloc() instead of the old idr interface. worker_pool_assign_id() goes through multiple locking changes in wq/for-3.10 causing the following conflict. static int worker_pool_assign_id(struct worker_pool *pool) { int ret; <<<<<<< HEAD lockdep_assert_held(&wq_pool_mutex); do { if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) return -ENOMEM; ret = idr_get_new(&worker_pool_idr, pool, &pool->id); } while (ret == -EAGAIN); ======= mutex_lock(&worker_pool_idr_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) pool->id = ret; mutex_unlock(&worker_pool_idr_mutex); >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 return ret < 0 ? ret : 0; } We want locking from the former and idr_alloc() usage from the latter, which can be combined to the following. static int worker_pool_assign_id(struct worker_pool *pool) { int ret; lockdep_assert_held(&wq_pool_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) { pool->id = ret; return 0; } return ret; } * eb2834285c ("workqueue: fix possible pool stall bug in wq_unbind_fn()") updated wq_unbind_fn() such that it has single larger for_each_std_worker_pool() loop instead of two separate loops with a schedule() call inbetween. wq/for-3.10 renamed pool->assoc_mutex to pool->manager_mutex causing the following conflict (earlier function body and comments omitted for brevity). static void wq_unbind_fn(struct work_struct *work) { ... spin_unlock_irq(&pool->lock); <<<<<<< HEAD mutex_unlock(&pool->manager_mutex); } ======= mutex_unlock(&pool->assoc_mutex); >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 schedule(); <<<<<<< HEAD for_each_cpu_worker_pool(pool, cpu) ======= >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 atomic_set(&pool->nr_running, 0); spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } The resolution is mostly trivial. We want the control flow of the latter with the rename of the former. static void wq_unbind_fn(struct work_struct *work) { ... spin_unlock_irq(&pool->lock); mutex_unlock(&pool->manager_mutex); schedule(); atomic_set(&pool->nr_running, 0); spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/fremap.c17
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/memory_hotplug.c8
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/mlock.c11
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/process_vm_access.c8
10 files changed, 40 insertions, 38 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index ae55c1e04d10..3bea74f1ccfe 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -286,8 +286,12 @@ config NR_QUICK
286 default "1" 286 default "1"
287 287
288config VIRT_TO_BUS 288config VIRT_TO_BUS
289 def_bool y 289 bool
290 depends on HAVE_VIRT_TO_BUS 290 help
291 An architecture should select this if it implements the
292 deprecated interface virt_to_bus(). All new architectures
293 should probably not select this.
294
291 295
292config MMU_NOTIFIER 296config MMU_NOTIFIER
293 bool 297 bool
diff --git a/mm/fremap.c b/mm/fremap.c
index 0cd4c11488ed..87da3590c61e 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -129,7 +129,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
129 struct vm_area_struct *vma; 129 struct vm_area_struct *vma;
130 int err = -EINVAL; 130 int err = -EINVAL;
131 int has_write_lock = 0; 131 int has_write_lock = 0;
132 vm_flags_t vm_flags; 132 vm_flags_t vm_flags = 0;
133 133
134 if (prot) 134 if (prot)
135 return err; 135 return err;
@@ -204,10 +204,8 @@ get_write_lock:
204 unsigned long addr; 204 unsigned long addr;
205 struct file *file = get_file(vma->vm_file); 205 struct file *file = get_file(vma->vm_file);
206 206
207 vm_flags = vma->vm_flags; 207 addr = mmap_region(file, start, size,
208 if (!(flags & MAP_NONBLOCK)) 208 vma->vm_flags, pgoff);
209 vm_flags |= VM_POPULATE;
210 addr = mmap_region(file, start, size, vm_flags, pgoff);
211 fput(file); 209 fput(file);
212 if (IS_ERR_VALUE(addr)) { 210 if (IS_ERR_VALUE(addr)) {
213 err = addr; 211 err = addr;
@@ -226,12 +224,6 @@ get_write_lock:
226 mutex_unlock(&mapping->i_mmap_mutex); 224 mutex_unlock(&mapping->i_mmap_mutex);
227 } 225 }
228 226
229 if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) {
230 if (!has_write_lock)
231 goto get_write_lock;
232 vma->vm_flags |= VM_POPULATE;
233 }
234
235 if (vma->vm_flags & VM_LOCKED) { 227 if (vma->vm_flags & VM_LOCKED) {
236 /* 228 /*
237 * drop PG_Mlocked flag for over-mapped range 229 * drop PG_Mlocked flag for over-mapped range
@@ -254,7 +246,8 @@ get_write_lock:
254 */ 246 */
255 247
256out: 248out:
257 vm_flags = vma->vm_flags; 249 if (vma)
250 vm_flags = vma->vm_flags;
258 if (likely(!has_write_lock)) 251 if (likely(!has_write_lock))
259 up_read(&mm->mmap_sem); 252 up_read(&mm->mmap_sem);
260 else 253 else
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0a0be33bb199..ca9a7c6d7e97 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
2124/* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 2124/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2125unsigned long hugetlb_total_pages(void) 2125unsigned long hugetlb_total_pages(void)
2126{ 2126{
2127 struct hstate *h = &default_hstate; 2127 struct hstate *h;
2128 return h->nr_huge_pages * pages_per_huge_page(h); 2128 unsigned long nr_total_pages = 0;
2129
2130 for_each_hstate(h)
2131 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2132 return nr_total_pages;
2129} 2133}
2130 2134
2131static int hugetlb_acct_memory(struct hstate *h, long delta) 2135static int hugetlb_acct_memory(struct hstate *h, long delta)
diff --git a/mm/ksm.c b/mm/ksm.c
index 85bfd4c16346..b6afe0c440d8 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -489,7 +489,7 @@ out: page = NULL;
489 */ 489 */
490static inline int get_kpfn_nid(unsigned long kpfn) 490static inline int get_kpfn_nid(unsigned long kpfn)
491{ 491{
492 return ksm_merge_across_nodes ? 0 : pfn_to_nid(kpfn); 492 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
493} 493}
494 494
495static void remove_node_from_stable_tree(struct stable_node *stable_node) 495static void remove_node_from_stable_tree(struct stable_node *stable_node)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 53b8201b31eb..2b552224f5cf 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3012,6 +3012,8 @@ void memcg_update_array_size(int num)
3012 memcg_limited_groups_array_size = memcg_caches_array_size(num); 3012 memcg_limited_groups_array_size = memcg_caches_array_size(num);
3013} 3013}
3014 3014
3015static void kmem_cache_destroy_work_func(struct work_struct *w);
3016
3015int memcg_update_cache_size(struct kmem_cache *s, int num_groups) 3017int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3016{ 3018{
3017 struct memcg_cache_params *cur_params = s->memcg_params; 3019 struct memcg_cache_params *cur_params = s->memcg_params;
@@ -3031,6 +3033,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3031 return -ENOMEM; 3033 return -ENOMEM;
3032 } 3034 }
3033 3035
3036 INIT_WORK(&s->memcg_params->destroy,
3037 kmem_cache_destroy_work_func);
3034 s->memcg_params->is_root_cache = true; 3038 s->memcg_params->is_root_cache = true;
3035 3039
3036 /* 3040 /*
@@ -3078,6 +3082,8 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3078 if (!s->memcg_params) 3082 if (!s->memcg_params)
3079 return -ENOMEM; 3083 return -ENOMEM;
3080 3084
3085 INIT_WORK(&s->memcg_params->destroy,
3086 kmem_cache_destroy_work_func);
3081 if (memcg) { 3087 if (memcg) {
3082 s->memcg_params->memcg = memcg; 3088 s->memcg_params->memcg = memcg;
3083 s->memcg_params->root_cache = root_cache; 3089 s->memcg_params->root_cache = root_cache;
@@ -3358,8 +3364,6 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3358 list_for_each_entry(params, &memcg->memcg_slab_caches, list) { 3364 list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
3359 cachep = memcg_params_to_cache(params); 3365 cachep = memcg_params_to_cache(params);
3360 cachep->memcg_params->dead = true; 3366 cachep->memcg_params->dead = true;
3361 INIT_WORK(&cachep->memcg_params->destroy,
3362 kmem_cache_destroy_work_func);
3363 schedule_work(&cachep->memcg_params->destroy); 3367 schedule_work(&cachep->memcg_params->destroy);
3364 } 3368 }
3365 mutex_unlock(&memcg->slab_caches_mutex); 3369 mutex_unlock(&memcg->slab_caches_mutex);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b81a367b9f39..ee3765760818 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1779,7 +1779,11 @@ void try_offline_node(int nid)
1779 for (i = 0; i < MAX_NR_ZONES; i++) { 1779 for (i = 0; i < MAX_NR_ZONES; i++) {
1780 struct zone *zone = pgdat->node_zones + i; 1780 struct zone *zone = pgdat->node_zones + i;
1781 1781
1782 if (zone->wait_table) 1782 /*
1783 * wait_table may be allocated from boot memory,
1784 * here only free if it's allocated by vmalloc.
1785 */
1786 if (is_vmalloc_addr(zone->wait_table))
1783 vfree(zone->wait_table); 1787 vfree(zone->wait_table);
1784 } 1788 }
1785 1789
@@ -1801,7 +1805,7 @@ int __ref remove_memory(int nid, u64 start, u64 size)
1801 int retry = 1; 1805 int retry = 1;
1802 1806
1803 start_pfn = PFN_DOWN(start); 1807 start_pfn = PFN_DOWN(start);
1804 end_pfn = start_pfn + PFN_DOWN(size); 1808 end_pfn = PFN_UP(start + size - 1);
1805 1809
1806 /* 1810 /*
1807 * When CONFIG_MEMCG is on, one memory block may be used by other 1811 * When CONFIG_MEMCG is on, one memory block may be used by other
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 31d26637b658..74310017296e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2390,9 +2390,9 @@ restart:
2390 2390
2391 *mpol_new = *n->policy; 2391 *mpol_new = *n->policy;
2392 atomic_set(&mpol_new->refcnt, 1); 2392 atomic_set(&mpol_new->refcnt, 1);
2393 sp_node_init(n_new, n->end, end, mpol_new); 2393 sp_node_init(n_new, end, n->end, mpol_new);
2394 sp_insert(sp, n_new);
2395 n->end = start; 2394 n->end = start;
2395 sp_insert(sp, n_new);
2396 n_new = NULL; 2396 n_new = NULL;
2397 mpol_new = NULL; 2397 mpol_new = NULL;
2398 break; 2398 break;
diff --git a/mm/mlock.c b/mm/mlock.c
index 1c5e33fce639..79b7cf7d1bca 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
358 358
359 newflags = vma->vm_flags & ~VM_LOCKED; 359 newflags = vma->vm_flags & ~VM_LOCKED;
360 if (on) 360 if (on)
361 newflags |= VM_LOCKED | VM_POPULATE; 361 newflags |= VM_LOCKED;
362 362
363 tmp = vma->vm_end; 363 tmp = vma->vm_end;
364 if (tmp > end) 364 if (tmp > end)
@@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
418 * range with the first VMA. Also, skip undesirable VMA types. 418 * range with the first VMA. Also, skip undesirable VMA types.
419 */ 419 */
420 nend = min(end, vma->vm_end); 420 nend = min(end, vma->vm_end);
421 if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) != 421 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
422 VM_POPULATE)
423 continue; 422 continue;
424 if (nstart < vma->vm_start) 423 if (nstart < vma->vm_start)
425 nstart = vma->vm_start; 424 nstart = vma->vm_start;
@@ -492,9 +491,9 @@ static int do_mlockall(int flags)
492 struct vm_area_struct * vma, * prev = NULL; 491 struct vm_area_struct * vma, * prev = NULL;
493 492
494 if (flags & MCL_FUTURE) 493 if (flags & MCL_FUTURE)
495 current->mm->def_flags |= VM_LOCKED | VM_POPULATE; 494 current->mm->def_flags |= VM_LOCKED;
496 else 495 else
497 current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE); 496 current->mm->def_flags &= ~VM_LOCKED;
498 if (flags == MCL_FUTURE) 497 if (flags == MCL_FUTURE)
499 goto out; 498 goto out;
500 499
@@ -503,7 +502,7 @@ static int do_mlockall(int flags)
503 502
504 newflags = vma->vm_flags & ~VM_LOCKED; 503 newflags = vma->vm_flags & ~VM_LOCKED;
505 if (flags & MCL_CURRENT) 504 if (flags & MCL_CURRENT)
506 newflags |= VM_LOCKED | VM_POPULATE; 505 newflags |= VM_LOCKED;
507 506
508 /* Ignore errors */ 507 /* Ignore errors */
509 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 508 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
diff --git a/mm/mmap.c b/mm/mmap.c
index 2664a47cec93..6466699b16cb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1306 } 1306 }
1307 1307
1308 addr = mmap_region(file, addr, len, vm_flags, pgoff); 1308 addr = mmap_region(file, addr, len, vm_flags, pgoff);
1309 if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE)) 1309 if (!IS_ERR_VALUE(addr) &&
1310 ((vm_flags & VM_LOCKED) ||
1311 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1310 *populate = len; 1312 *populate = len;
1311 return addr; 1313 return addr;
1312} 1314}
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 926b46649749..fd26d0433509 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -429,12 +429,6 @@ compat_process_vm_rw(compat_pid_t pid,
429 if (flags != 0) 429 if (flags != 0)
430 return -EINVAL; 430 return -EINVAL;
431 431
432 if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec)))
433 goto out;
434
435 if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec)))
436 goto out;
437
438 if (vm_write) 432 if (vm_write)
439 rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt, 433 rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
440 UIO_FASTIOV, iovstack_l, 434 UIO_FASTIOV, iovstack_l,
@@ -459,8 +453,6 @@ free_iovecs:
459 kfree(iov_r); 453 kfree(iov_r);
460 if (iov_l != iovstack_l) 454 if (iov_l != iovstack_l)
461 kfree(iov_l); 455 kfree(iov_l);
462
463out:
464 return rc; 456 return rc;
465} 457}
466 458