aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:04:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:04:48 -0400
commit95211279c5ad00a317c98221d7e4365e02f20836 (patch)
tree2ddc8625378d2915b8c96392f3cf6663b705ed55 /kernel
parent5375871d432ae9fc581014ac117b96aaee3cd0c7 (diff)
parent12724850e8064f64b6223d26d78c0597c742c65a (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge first batch of patches from Andrew Morton: "A few misc things and all the MM queue" * emailed from Andrew Morton <akpm@linux-foundation.org>: (92 commits) memcg: avoid THP split in task migration thp: add HPAGE_PMD_* definitions for !CONFIG_TRANSPARENT_HUGEPAGE memcg: clean up existing move charge code mm/memcontrol.c: remove unnecessary 'break' in mem_cgroup_read() mm/memcontrol.c: remove redundant BUG_ON() in mem_cgroup_usage_unregister_event() mm/memcontrol.c: s/stealed/stolen/ memcg: fix performance of mem_cgroup_begin_update_page_stat() memcg: remove PCG_FILE_MAPPED memcg: use new logic for page stat accounting memcg: remove PCG_MOVE_LOCK flag from page_cgroup memcg: simplify move_account() check memcg: remove EXPORT_SYMBOL(mem_cgroup_update_page_stat) memcg: kill dead prev_priority stubs memcg: remove PCG_CACHE page_cgroup flag memcg: let css_get_next() rely upon rcu_read_lock() cgroup: revert ss_id_lock to spinlock idr: make idr_get_next() good for rcu_read_lock() memcg: remove unnecessary thp check in page stat accounting memcg: remove redundant returns memcg: enum lru_list lru ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c19
-rw-r--r--kernel/cpuset.c43
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c22
4 files changed, 37 insertions, 49 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 1ece8e20fdb5..f4ea4b6f3cf1 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4881,9 +4881,9 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
4881 4881
4882 rcu_assign_pointer(id->css, NULL); 4882 rcu_assign_pointer(id->css, NULL);
4883 rcu_assign_pointer(css->id, NULL); 4883 rcu_assign_pointer(css->id, NULL);
4884 write_lock(&ss->id_lock); 4884 spin_lock(&ss->id_lock);
4885 idr_remove(&ss->idr, id->id); 4885 idr_remove(&ss->idr, id->id);
4886 write_unlock(&ss->id_lock); 4886 spin_unlock(&ss->id_lock);
4887 kfree_rcu(id, rcu_head); 4887 kfree_rcu(id, rcu_head);
4888} 4888}
4889EXPORT_SYMBOL_GPL(free_css_id); 4889EXPORT_SYMBOL_GPL(free_css_id);
@@ -4909,10 +4909,10 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
4909 error = -ENOMEM; 4909 error = -ENOMEM;
4910 goto err_out; 4910 goto err_out;
4911 } 4911 }
4912 write_lock(&ss->id_lock); 4912 spin_lock(&ss->id_lock);
4913 /* Don't use 0. allocates an ID of 1-65535 */ 4913 /* Don't use 0. allocates an ID of 1-65535 */
4914 error = idr_get_new_above(&ss->idr, newid, 1, &myid); 4914 error = idr_get_new_above(&ss->idr, newid, 1, &myid);
4915 write_unlock(&ss->id_lock); 4915 spin_unlock(&ss->id_lock);
4916 4916
4917 /* Returns error when there are no free spaces for new ID.*/ 4917 /* Returns error when there are no free spaces for new ID.*/
4918 if (error) { 4918 if (error) {
@@ -4927,9 +4927,9 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
4927 return newid; 4927 return newid;
4928remove_idr: 4928remove_idr:
4929 error = -ENOSPC; 4929 error = -ENOSPC;
4930 write_lock(&ss->id_lock); 4930 spin_lock(&ss->id_lock);
4931 idr_remove(&ss->idr, myid); 4931 idr_remove(&ss->idr, myid);
4932 write_unlock(&ss->id_lock); 4932 spin_unlock(&ss->id_lock);
4933err_out: 4933err_out:
4934 kfree(newid); 4934 kfree(newid);
4935 return ERR_PTR(error); 4935 return ERR_PTR(error);
@@ -4941,7 +4941,7 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
4941{ 4941{
4942 struct css_id *newid; 4942 struct css_id *newid;
4943 4943
4944 rwlock_init(&ss->id_lock); 4944 spin_lock_init(&ss->id_lock);
4945 idr_init(&ss->idr); 4945 idr_init(&ss->idr);
4946 4946
4947 newid = get_new_cssid(ss, 0); 4947 newid = get_new_cssid(ss, 0);
@@ -5029,6 +5029,8 @@ css_get_next(struct cgroup_subsys *ss, int id,
5029 return NULL; 5029 return NULL;
5030 5030
5031 BUG_ON(!ss->use_id); 5031 BUG_ON(!ss->use_id);
5032 WARN_ON_ONCE(!rcu_read_lock_held());
5033
5032 /* fill start point for scan */ 5034 /* fill start point for scan */
5033 tmpid = id; 5035 tmpid = id;
5034 while (1) { 5036 while (1) {
@@ -5036,10 +5038,7 @@ css_get_next(struct cgroup_subsys *ss, int id,
5036 * scan next entry from bitmap(tree), tmpid is updated after 5038 * scan next entry from bitmap(tree), tmpid is updated after
5037 * idr_get_next(). 5039 * idr_get_next().
5038 */ 5040 */
5039 read_lock(&ss->id_lock);
5040 tmp = idr_get_next(&ss->idr, &tmpid); 5041 tmp = idr_get_next(&ss->idr, &tmpid);
5041 read_unlock(&ss->id_lock);
5042
5043 if (!tmp) 5042 if (!tmp)
5044 break; 5043 break;
5045 if (tmp->depth >= depth && tmp->stack[depth] == rootid) { 5044 if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 5d575836dba6..1010cc61931f 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -964,7 +964,6 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
964{ 964{
965 bool need_loop; 965 bool need_loop;
966 966
967repeat:
968 /* 967 /*
969 * Allow tasks that have access to memory reserves because they have 968 * Allow tasks that have access to memory reserves because they have
970 * been OOM killed to get memory anywhere. 969 * been OOM killed to get memory anywhere.
@@ -983,45 +982,19 @@ repeat:
983 */ 982 */
984 need_loop = task_has_mempolicy(tsk) || 983 need_loop = task_has_mempolicy(tsk) ||
985 !nodes_intersects(*newmems, tsk->mems_allowed); 984 !nodes_intersects(*newmems, tsk->mems_allowed);
986 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
987 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
988 985
989 /* 986 if (need_loop)
990 * ensure checking ->mems_allowed_change_disable after setting all new 987 write_seqcount_begin(&tsk->mems_allowed_seq);
991 * allowed nodes.
992 *
993 * the read-side task can see an nodemask with new allowed nodes and
994 * old allowed nodes. and if it allocates page when cpuset clears newly
995 * disallowed ones continuous, it can see the new allowed bits.
996 *
997 * And if setting all new allowed nodes is after the checking, setting
998 * all new allowed nodes and clearing newly disallowed ones will be done
999 * continuous, and the read-side task may find no node to alloc page.
1000 */
1001 smp_mb();
1002 988
1003 /* 989 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1004 * Allocation of memory is very fast, we needn't sleep when waiting 990 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
1005 * for the read-side.
1006 */
1007 while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
1008 task_unlock(tsk);
1009 if (!task_curr(tsk))
1010 yield();
1011 goto repeat;
1012 }
1013
1014 /*
1015 * ensure checking ->mems_allowed_change_disable before clearing all new
1016 * disallowed nodes.
1017 *
1018 * if clearing newly disallowed bits before the checking, the read-side
1019 * task may find no node to alloc page.
1020 */
1021 smp_mb();
1022 991
1023 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2); 992 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1024 tsk->mems_allowed = *newmems; 993 tsk->mems_allowed = *newmems;
994
995 if (need_loop)
996 write_seqcount_end(&tsk->mems_allowed_seq);
997
1025 task_unlock(tsk); 998 task_unlock(tsk);
1026} 999}
1027 1000
diff --git a/kernel/exit.c b/kernel/exit.c
index 7ad335c3045a..16b07bfac224 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -935,7 +935,7 @@ void do_exit(long code)
935 acct_update_integrals(tsk); 935 acct_update_integrals(tsk);
936 /* sync mm's RSS info before statistics gathering */ 936 /* sync mm's RSS info before statistics gathering */
937 if (tsk->mm) 937 if (tsk->mm)
938 sync_mm_rss(tsk, tsk->mm); 938 sync_mm_rss(tsk->mm);
939 group_dead = atomic_dec_and_test(&tsk->signal->live); 939 group_dead = atomic_dec_and_test(&tsk->signal->live);
940 if (group_dead) { 940 if (group_dead) {
941 hrtimer_cancel(&tsk->signal->real_timer); 941 hrtimer_cancel(&tsk->signal->real_timer);
diff --git a/kernel/fork.c b/kernel/fork.c
index 26a7138bb849..37674ec55cde 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -512,6 +512,23 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
512 return NULL; 512 return NULL;
513} 513}
514 514
515static void check_mm(struct mm_struct *mm)
516{
517 int i;
518
519 for (i = 0; i < NR_MM_COUNTERS; i++) {
520 long x = atomic_long_read(&mm->rss_stat.count[i]);
521
522 if (unlikely(x))
523 printk(KERN_ALERT "BUG: Bad rss-counter state "
524 "mm:%p idx:%d val:%ld\n", mm, i, x);
525 }
526
527#ifdef CONFIG_TRANSPARENT_HUGEPAGE
528 VM_BUG_ON(mm->pmd_huge_pte);
529#endif
530}
531
515/* 532/*
516 * Allocate and initialize an mm_struct. 533 * Allocate and initialize an mm_struct.
517 */ 534 */
@@ -539,9 +556,7 @@ void __mmdrop(struct mm_struct *mm)
539 mm_free_pgd(mm); 556 mm_free_pgd(mm);
540 destroy_context(mm); 557 destroy_context(mm);
541 mmu_notifier_mm_destroy(mm); 558 mmu_notifier_mm_destroy(mm);
542#ifdef CONFIG_TRANSPARENT_HUGEPAGE 559 check_mm(mm);
543 VM_BUG_ON(mm->pmd_huge_pte);
544#endif
545 free_mm(mm); 560 free_mm(mm);
546} 561}
547EXPORT_SYMBOL_GPL(__mmdrop); 562EXPORT_SYMBOL_GPL(__mmdrop);
@@ -1223,6 +1238,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1223#ifdef CONFIG_CPUSETS 1238#ifdef CONFIG_CPUSETS
1224 p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 1239 p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1225 p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 1240 p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1241 seqcount_init(&p->mems_allowed_seq);
1226#endif 1242#endif
1227#ifdef CONFIG_TRACE_IRQFLAGS 1243#ifdef CONFIG_TRACE_IRQFLAGS
1228 p->irq_events = 0; 1244 p->irq_events = 0;