diff options
| -rw-r--r-- | kernel/rcutree.c | 15 | ||||
| -rw-r--r-- | kernel/rcutree.h | 6 | ||||
| -rw-r--r-- | kernel/rcutree_plugin.h | 25 |
3 files changed, 34 insertions, 12 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index ddbf111e9e18..0536125b0497 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -913,7 +913,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
| 913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
| 914 | break; | 914 | break; |
| 915 | } | 915 | } |
| 916 | rcu_preempt_offline_tasks(rsp, rnp, rdp); | 916 | |
| 917 | /* | ||
| 918 | * If there was a task blocking the current grace period, | ||
| 919 | * and if all CPUs have checked in, we need to propagate | ||
| 920 | * the quiescent state up the rcu_node hierarchy. But that | ||
| 921 | * is inconvenient at the moment due to deadlock issues if | ||
| 922 | * this should end the current grace period. So set the | ||
| 923 | * offlined CPU's bit in ->qsmask in order to force the | ||
| 924 | * next force_quiescent_state() invocation to clean up this | ||
| 925 | * mess in a deadlock-free manner. | ||
| 926 | */ | ||
| 927 | if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) | ||
| 928 | rnp->qsmask |= mask; | ||
| 929 | |||
| 917 | mask = rnp->grpmask; | 930 | mask = rnp->grpmask; |
| 918 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 931 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
| 919 | rnp = rnp->parent; | 932 | rnp = rnp->parent; |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 599161f309fb..1823c6e20609 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
| @@ -306,9 +306,9 @@ static void rcu_print_task_stall(struct rcu_node *rnp); | |||
| 306 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 306 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 307 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 307 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
| 308 | #ifdef CONFIG_HOTPLUG_CPU | 308 | #ifdef CONFIG_HOTPLUG_CPU |
| 309 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 309 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
| 310 | struct rcu_node *rnp, | 310 | struct rcu_node *rnp, |
| 311 | struct rcu_data *rdp); | 311 | struct rcu_data *rdp); |
| 312 | static void rcu_preempt_offline_cpu(int cpu); | 312 | static void rcu_preempt_offline_cpu(int cpu); |
| 313 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 313 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 314 | static void rcu_preempt_check_callbacks(int cpu); | 314 | static void rcu_preempt_check_callbacks(int cpu); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index ebd20ee7707d..ef2a58c2b9d5 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -304,21 +304,25 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
| 304 | * parent is to remove the need for rcu_read_unlock_special() to | 304 | * parent is to remove the need for rcu_read_unlock_special() to |
| 305 | * make more than two attempts to acquire the target rcu_node's lock. | 305 | * make more than two attempts to acquire the target rcu_node's lock. |
| 306 | * | 306 | * |
| 307 | * Returns 1 if there was previously a task blocking the current grace | ||
| 308 | * period on the specified rcu_node structure. | ||
| 309 | * | ||
| 307 | * The caller must hold rnp->lock with irqs disabled. | 310 | * The caller must hold rnp->lock with irqs disabled. |
| 308 | */ | 311 | */ |
| 309 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 312 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
| 310 | struct rcu_node *rnp, | 313 | struct rcu_node *rnp, |
| 311 | struct rcu_data *rdp) | 314 | struct rcu_data *rdp) |
| 312 | { | 315 | { |
| 313 | int i; | 316 | int i; |
| 314 | struct list_head *lp; | 317 | struct list_head *lp; |
| 315 | struct list_head *lp_root; | 318 | struct list_head *lp_root; |
| 319 | int retval = rcu_preempted_readers(rnp); | ||
| 316 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 320 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
| 317 | struct task_struct *tp; | 321 | struct task_struct *tp; |
| 318 | 322 | ||
| 319 | if (rnp == rnp_root) { | 323 | if (rnp == rnp_root) { |
| 320 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | 324 | WARN_ONCE(1, "Last CPU thought to be offlined?"); |
| 321 | return; /* Shouldn't happen: at least one CPU online. */ | 325 | return 0; /* Shouldn't happen: at least one CPU online. */ |
| 322 | } | 326 | } |
| 323 | WARN_ON_ONCE(rnp != rdp->mynode && | 327 | WARN_ON_ONCE(rnp != rdp->mynode && |
| 324 | (!list_empty(&rnp->blocked_tasks[0]) || | 328 | (!list_empty(&rnp->blocked_tasks[0]) || |
| @@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
| 342 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | 346 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ |
| 343 | } | 347 | } |
| 344 | } | 348 | } |
| 349 | |||
| 350 | return retval; | ||
| 345 | } | 351 | } |
| 346 | 352 | ||
| 347 | /* | 353 | /* |
| @@ -532,12 +538,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
| 532 | 538 | ||
| 533 | /* | 539 | /* |
| 534 | * Because preemptable RCU does not exist, it never needs to migrate | 540 | * Because preemptable RCU does not exist, it never needs to migrate |
| 535 | * tasks that were blocked within RCU read-side critical sections. | 541 | * tasks that were blocked within RCU read-side critical sections, and |
| 542 | * such non-existent tasks cannot possibly have been blocking the current | ||
| 543 | * grace period. | ||
| 536 | */ | 544 | */ |
| 537 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 545 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
| 538 | struct rcu_node *rnp, | 546 | struct rcu_node *rnp, |
| 539 | struct rcu_data *rdp) | 547 | struct rcu_data *rdp) |
| 540 | { | 548 | { |
| 549 | return 0; | ||
| 541 | } | 550 | } |
| 542 | 551 | ||
| 543 | /* | 552 | /* |
