aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-11-09 22:10:31 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-09 22:10:35 -0500
commit7e1a2766e67a529f62c8cfba0a47d63fc4f7fa8a (patch)
tree197950369a773afdf04af9bdfc4a2ce1d2b5d3af
parentc5e0cb3ddc5f14cedcfc50c0fb3b5fc6b56576da (diff)
parent83f5b01ffbbaea6f97c9a79d21e240dbfb69f2f1 (diff)
Merge branch 'core/urgent' into core/rcu
Merge reason: Pick up RCU fixlet to base further commits on. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/futex.c9
-rw-r--r--kernel/rcutree.c31
-rw-r--r--kernel/rcutree.h13
-rw-r--r--kernel/rcutree_plugin.h25
-rw-r--r--kernel/user.c2
5 files changed, 53 insertions, 27 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 06938e560ac9..fb65e822fc41 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1029,7 +1029,6 @@ static inline
1029void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, 1029void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1030 struct futex_hash_bucket *hb) 1030 struct futex_hash_bucket *hb)
1031{ 1031{
1032 drop_futex_key_refs(&q->key);
1033 get_futex_key_refs(key); 1032 get_futex_key_refs(key);
1034 q->key = *key; 1033 q->key = *key;
1035 1034
@@ -1227,6 +1226,7 @@ retry_private:
1227 */ 1226 */
1228 if (ret == 1) { 1227 if (ret == 1) {
1229 WARN_ON(pi_state); 1228 WARN_ON(pi_state);
1229 drop_count++;
1230 task_count++; 1230 task_count++;
1231 ret = get_futex_value_locked(&curval2, uaddr2); 1231 ret = get_futex_value_locked(&curval2, uaddr2);
1232 if (!ret) 1232 if (!ret)
@@ -1305,6 +1305,7 @@ retry_private:
1305 if (ret == 1) { 1305 if (ret == 1) {
1306 /* We got the lock. */ 1306 /* We got the lock. */
1307 requeue_pi_wake_futex(this, &key2, hb2); 1307 requeue_pi_wake_futex(this, &key2, hb2);
1308 drop_count++;
1308 continue; 1309 continue;
1309 } else if (ret) { 1310 } else if (ret) {
1310 /* -EDEADLK */ 1311 /* -EDEADLK */
@@ -2126,7 +2127,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2126 plist_del(&q->list, &q->list.plist); 2127 plist_del(&q->list, &q->list.plist);
2127 2128
2128 /* Handle spurious wakeups gracefully */ 2129 /* Handle spurious wakeups gracefully */
2129 ret = -EAGAIN; 2130 ret = -EWOULDBLOCK;
2130 if (timeout && !timeout->task) 2131 if (timeout && !timeout->task)
2131 ret = -ETIMEDOUT; 2132 ret = -ETIMEDOUT;
2132 else if (signal_pending(current)) 2133 else if (signal_pending(current))
@@ -2207,7 +2208,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2207 debug_rt_mutex_init_waiter(&rt_waiter); 2208 debug_rt_mutex_init_waiter(&rt_waiter);
2208 rt_waiter.task = NULL; 2209 rt_waiter.task = NULL;
2209 2210
2210retry:
2211 key2 = FUTEX_KEY_INIT; 2211 key2 = FUTEX_KEY_INIT;
2212 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); 2212 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
2213 if (unlikely(ret != 0)) 2213 if (unlikely(ret != 0))
@@ -2302,9 +2302,6 @@ out_put_keys:
2302out_key2: 2302out_key2:
2303 put_futex_key(fshared, &key2); 2303 put_futex_key(fshared, &key2);
2304 2304
2305 /* Spurious wakeup ? */
2306 if (ret == -EAGAIN)
2307 goto retry;
2308out: 2305out:
2309 if (to) { 2306 if (to) {
2310 hrtimer_cancel(&to->timer); 2307 hrtimer_cancel(&to->timer);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 055f1a941a9e..69f4efec3449 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -61,7 +61,7 @@ static struct lock_class_key rcu_root_class;
61 NUM_RCU_LVL_2, \ 61 NUM_RCU_LVL_2, \
62 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 62 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
63 }, \ 63 }, \
64 .signaled = RCU_SIGNAL_INIT, \ 64 .signaled = RCU_GP_IDLE, \
65 .gpnum = -300, \ 65 .gpnum = -300, \
66 .completed = -300, \ 66 .completed = -300, \
67 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 67 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
@@ -659,14 +659,17 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
659 * irqs disabled. 659 * irqs disabled.
660 */ 660 */
661 rcu_for_each_node_breadth_first(rsp, rnp) { 661 rcu_for_each_node_breadth_first(rsp, rnp) {
662 spin_lock(&rnp->lock); /* irqs already disabled. */ 662 spin_lock(&rnp->lock); /* irqs already disabled. */
663 rcu_preempt_check_blocked_tasks(rnp); 663 rcu_preempt_check_blocked_tasks(rnp);
664 rnp->qsmask = rnp->qsmaskinit; 664 rnp->qsmask = rnp->qsmaskinit;
665 rnp->gpnum = rsp->gpnum; 665 rnp->gpnum = rsp->gpnum;
666 spin_unlock(&rnp->lock); /* irqs already disabled. */ 666 spin_unlock(&rnp->lock); /* irqs remain disabled. */
667 } 667 }
668 668
669 rnp = rcu_get_root(rsp);
670 spin_lock(&rnp->lock); /* irqs already disabled. */
669 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 671 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
672 spin_unlock(&rnp->lock); /* irqs remain disabled. */
670 spin_unlock_irqrestore(&rsp->onofflock, flags); 673 spin_unlock_irqrestore(&rsp->onofflock, flags);
671} 674}
672 675
@@ -708,6 +711,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
708{ 711{
709 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 712 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
710 rsp->completed = rsp->gpnum; 713 rsp->completed = rsp->gpnum;
714 rsp->signaled = RCU_GP_IDLE;
711 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 715 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
712 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 716 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
713} 717}
@@ -915,7 +919,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
915 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 919 spin_unlock(&rnp->lock); /* irqs remain disabled. */
916 break; 920 break;
917 } 921 }
918 rcu_preempt_offline_tasks(rsp, rnp, rdp); 922
923 /*
924 * If there was a task blocking the current grace period,
925 * and if all CPUs have checked in, we need to propagate
926 * the quiescent state up the rcu_node hierarchy. But that
927 * is inconvenient at the moment due to deadlock issues if
928 * this should end the current grace period. So set the
929 * offlined CPU's bit in ->qsmask in order to force the
930 * next force_quiescent_state() invocation to clean up this
931 * mess in a deadlock-free manner.
932 */
933 if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
934 rnp->qsmask |= mask;
935
919 mask = rnp->grpmask; 936 mask = rnp->grpmask;
920 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 937 spin_unlock(&rnp->lock); /* irqs remain disabled. */
921 rnp = rnp->parent; 938 rnp = rnp->parent;
@@ -1151,9 +1168,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1151 } 1168 }
1152 spin_unlock(&rnp->lock); 1169 spin_unlock(&rnp->lock);
1153 switch (signaled) { 1170 switch (signaled) {
1171 case RCU_GP_IDLE:
1154 case RCU_GP_INIT: 1172 case RCU_GP_INIT:
1155 1173
1156 break; /* grace period still initializing, ignore. */ 1174 break; /* grace period idle or initializing, ignore. */
1157 1175
1158 case RCU_SAVE_DYNTICK: 1176 case RCU_SAVE_DYNTICK:
1159 1177
@@ -1167,7 +1185,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1167 1185
1168 /* Update state, record completion counter. */ 1186 /* Update state, record completion counter. */
1169 spin_lock(&rnp->lock); 1187 spin_lock(&rnp->lock);
1170 if (lastcomp == rsp->completed) { 1188 if (lastcomp == rsp->completed &&
1189 rsp->signaled == RCU_SAVE_DYNTICK) {
1171 rsp->signaled = RCU_FORCE_QS; 1190 rsp->signaled = RCU_FORCE_QS;
1172 dyntick_record_completed(rsp, lastcomp); 1191 dyntick_record_completed(rsp, lastcomp);
1173 } 1192 }
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 599161f309fb..1899023b0962 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -201,9 +201,10 @@ struct rcu_data {
201}; 201};
202 202
203/* Values for signaled field in struct rcu_state. */ 203/* Values for signaled field in struct rcu_state. */
204#define RCU_GP_INIT 0 /* Grace period being initialized. */ 204#define RCU_GP_IDLE 0 /* No grace period in progress. */
205#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ 205#define RCU_GP_INIT 1 /* Grace period being initialized. */
206#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ 206#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
207#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
207#ifdef CONFIG_NO_HZ 208#ifdef CONFIG_NO_HZ
208#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 209#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
209#else /* #ifdef CONFIG_NO_HZ */ 210#else /* #ifdef CONFIG_NO_HZ */
@@ -306,9 +307,9 @@ static void rcu_print_task_stall(struct rcu_node *rnp);
306#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 307#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
307static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 308static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
308#ifdef CONFIG_HOTPLUG_CPU 309#ifdef CONFIG_HOTPLUG_CPU
309static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 310static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
310 struct rcu_node *rnp, 311 struct rcu_node *rnp,
311 struct rcu_data *rdp); 312 struct rcu_data *rdp);
312static void rcu_preempt_offline_cpu(int cpu); 313static void rcu_preempt_offline_cpu(int cpu);
313#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 314#endif /* #ifdef CONFIG_HOTPLUG_CPU */
314static void rcu_preempt_check_callbacks(int cpu); 315static void rcu_preempt_check_callbacks(int cpu);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index ebd20ee7707d..ef2a58c2b9d5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -304,21 +304,25 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
304 * parent is to remove the need for rcu_read_unlock_special() to 304 * parent is to remove the need for rcu_read_unlock_special() to
305 * make more than two attempts to acquire the target rcu_node's lock. 305 * make more than two attempts to acquire the target rcu_node's lock.
306 * 306 *
307 * Returns 1 if there was previously a task blocking the current grace
308 * period on the specified rcu_node structure.
309 *
307 * The caller must hold rnp->lock with irqs disabled. 310 * The caller must hold rnp->lock with irqs disabled.
308 */ 311 */
309static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 312static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
310 struct rcu_node *rnp, 313 struct rcu_node *rnp,
311 struct rcu_data *rdp) 314 struct rcu_data *rdp)
312{ 315{
313 int i; 316 int i;
314 struct list_head *lp; 317 struct list_head *lp;
315 struct list_head *lp_root; 318 struct list_head *lp_root;
319 int retval = rcu_preempted_readers(rnp);
316 struct rcu_node *rnp_root = rcu_get_root(rsp); 320 struct rcu_node *rnp_root = rcu_get_root(rsp);
317 struct task_struct *tp; 321 struct task_struct *tp;
318 322
319 if (rnp == rnp_root) { 323 if (rnp == rnp_root) {
320 WARN_ONCE(1, "Last CPU thought to be offlined?"); 324 WARN_ONCE(1, "Last CPU thought to be offlined?");
321 return; /* Shouldn't happen: at least one CPU online. */ 325 return 0; /* Shouldn't happen: at least one CPU online. */
322 } 326 }
323 WARN_ON_ONCE(rnp != rdp->mynode && 327 WARN_ON_ONCE(rnp != rdp->mynode &&
324 (!list_empty(&rnp->blocked_tasks[0]) || 328 (!list_empty(&rnp->blocked_tasks[0]) ||
@@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
342 spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 346 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
343 } 347 }
344 } 348 }
349
350 return retval;
345} 351}
346 352
347/* 353/*
@@ -532,12 +538,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
532 538
533/* 539/*
534 * Because preemptable RCU does not exist, it never needs to migrate 540 * Because preemptable RCU does not exist, it never needs to migrate
535 * tasks that were blocked within RCU read-side critical sections. 541 * tasks that were blocked within RCU read-side critical sections, and
542 * such non-existent tasks cannot possibly have been blocking the current
543 * grace period.
536 */ 544 */
537static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 545static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
538 struct rcu_node *rnp, 546 struct rcu_node *rnp,
539 struct rcu_data *rdp) 547 struct rcu_data *rdp)
540{ 548{
549 return 0;
541} 550}
542 551
543/* 552/*
diff --git a/kernel/user.c b/kernel/user.c
index 2c000e7132ac..46d0165ca70c 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -330,9 +330,9 @@ done:
330 */ 330 */
331static void free_user(struct user_struct *up, unsigned long flags) 331static void free_user(struct user_struct *up, unsigned long flags)
332{ 332{
333 spin_unlock_irqrestore(&uidhash_lock, flags);
334 INIT_DELAYED_WORK(&up->work, cleanup_user_struct); 333 INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
335 schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); 334 schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
335 spin_unlock_irqrestore(&uidhash_lock, flags);
336} 336}
337 337
338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ 338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */