diff options
Diffstat (limited to 'kernel/rcutree_plugin.h')
| -rw-r--r-- | kernel/rcutree_plugin.h | 46 |
1 files changed, 38 insertions, 8 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c0cb783aa16a..ef2a58c2b9d5 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -304,21 +304,25 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
| 304 | * parent is to remove the need for rcu_read_unlock_special() to | 304 | * parent is to remove the need for rcu_read_unlock_special() to |
| 305 | * make more than two attempts to acquire the target rcu_node's lock. | 305 | * make more than two attempts to acquire the target rcu_node's lock. |
| 306 | * | 306 | * |
| 307 | * Returns 1 if there was previously a task blocking the current grace | ||
| 308 | * period on the specified rcu_node structure. | ||
| 309 | * | ||
| 307 | * The caller must hold rnp->lock with irqs disabled. | 310 | * The caller must hold rnp->lock with irqs disabled. |
| 308 | */ | 311 | */ |
| 309 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 312 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
| 310 | struct rcu_node *rnp, | 313 | struct rcu_node *rnp, |
| 311 | struct rcu_data *rdp) | 314 | struct rcu_data *rdp) |
| 312 | { | 315 | { |
| 313 | int i; | 316 | int i; |
| 314 | struct list_head *lp; | 317 | struct list_head *lp; |
| 315 | struct list_head *lp_root; | 318 | struct list_head *lp_root; |
| 319 | int retval = rcu_preempted_readers(rnp); | ||
| 316 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 320 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
| 317 | struct task_struct *tp; | 321 | struct task_struct *tp; |
| 318 | 322 | ||
| 319 | if (rnp == rnp_root) { | 323 | if (rnp == rnp_root) { |
| 320 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | 324 | WARN_ONCE(1, "Last CPU thought to be offlined?"); |
| 321 | return; /* Shouldn't happen: at least one CPU online. */ | 325 | return 0; /* Shouldn't happen: at least one CPU online. */ |
| 322 | } | 326 | } |
| 323 | WARN_ON_ONCE(rnp != rdp->mynode && | 327 | WARN_ON_ONCE(rnp != rdp->mynode && |
| 324 | (!list_empty(&rnp->blocked_tasks[0]) || | 328 | (!list_empty(&rnp->blocked_tasks[0]) || |
| @@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
| 342 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | 346 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ |
| 343 | } | 347 | } |
| 344 | } | 348 | } |
| 349 | |||
| 350 | return retval; | ||
| 345 | } | 351 | } |
| 346 | 352 | ||
| 347 | /* | 353 | /* |
| @@ -393,6 +399,17 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
| 393 | EXPORT_SYMBOL_GPL(call_rcu); | 399 | EXPORT_SYMBOL_GPL(call_rcu); |
| 394 | 400 | ||
| 395 | /* | 401 | /* |
| 402 | * Wait for an rcu-preempt grace period. We are supposed to expedite the | ||
| 403 | * grace period, but this is the crude slow compatability hack, so just | ||
| 404 | * invoke synchronize_rcu(). | ||
| 405 | */ | ||
| 406 | void synchronize_rcu_expedited(void) | ||
| 407 | { | ||
| 408 | synchronize_rcu(); | ||
| 409 | } | ||
| 410 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
| 411 | |||
| 412 | /* | ||
| 396 | * Check to see if there is any immediate preemptable-RCU-related work | 413 | * Check to see if there is any immediate preemptable-RCU-related work |
| 397 | * to be done. | 414 | * to be done. |
| 398 | */ | 415 | */ |
| @@ -521,12 +538,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
| 521 | 538 | ||
| 522 | /* | 539 | /* |
| 523 | * Because preemptable RCU does not exist, it never needs to migrate | 540 | * Because preemptable RCU does not exist, it never needs to migrate |
| 524 | * tasks that were blocked within RCU read-side critical sections. | 541 | * tasks that were blocked within RCU read-side critical sections, and |
| 542 | * such non-existent tasks cannot possibly have been blocking the current | ||
| 543 | * grace period. | ||
| 525 | */ | 544 | */ |
| 526 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 545 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
| 527 | struct rcu_node *rnp, | 546 | struct rcu_node *rnp, |
| 528 | struct rcu_data *rdp) | 547 | struct rcu_data *rdp) |
| 529 | { | 548 | { |
| 549 | return 0; | ||
| 530 | } | 550 | } |
| 531 | 551 | ||
| 532 | /* | 552 | /* |
| @@ -565,6 +585,16 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
| 565 | EXPORT_SYMBOL_GPL(call_rcu); | 585 | EXPORT_SYMBOL_GPL(call_rcu); |
| 566 | 586 | ||
| 567 | /* | 587 | /* |
| 588 | * Wait for an rcu-preempt grace period, but make it happen quickly. | ||
| 589 | * But because preemptable RCU does not exist, map to rcu-sched. | ||
| 590 | */ | ||
| 591 | void synchronize_rcu_expedited(void) | ||
| 592 | { | ||
| 593 | synchronize_sched_expedited(); | ||
| 594 | } | ||
| 595 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
| 596 | |||
| 597 | /* | ||
| 568 | * Because preemptable RCU does not exist, it never has any work to do. | 598 | * Because preemptable RCU does not exist, it never has any work to do. |
| 569 | */ | 599 | */ |
| 570 | static int rcu_preempt_pending(int cpu) | 600 | static int rcu_preempt_pending(int cpu) |
