diff options
Diffstat (limited to 'kernel/rcu/tree_exp.h')
-rw-r--r-- | kernel/rcu/tree_exp.h | 124 |
1 files changed, 77 insertions, 47 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 6d86ab6ec2c9..24343eb87b58 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h | |||
@@ -359,7 +359,8 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, | |||
359 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | 359 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
360 | 360 | ||
361 | if (raw_smp_processor_id() == cpu || | 361 | if (raw_smp_processor_id() == cpu || |
362 | !(atomic_add_return(0, &rdtp->dynticks) & 0x1)) | 362 | !(atomic_add_return(0, &rdtp->dynticks) & 0x1) || |
363 | !(rnp->qsmaskinitnext & rdp->grpmask)) | ||
363 | mask_ofl_test |= rdp->grpmask; | 364 | mask_ofl_test |= rdp->grpmask; |
364 | } | 365 | } |
365 | mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; | 366 | mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; |
@@ -384,17 +385,16 @@ retry_ipi: | |||
384 | mask_ofl_ipi &= ~mask; | 385 | mask_ofl_ipi &= ~mask; |
385 | continue; | 386 | continue; |
386 | } | 387 | } |
387 | /* Failed, raced with offline. */ | 388 | /* Failed, raced with CPU hotplug operation. */ |
388 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 389 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
389 | if (cpu_online(cpu) && | 390 | if ((rnp->qsmaskinitnext & mask) && |
390 | (rnp->expmask & mask)) { | 391 | (rnp->expmask & mask)) { |
392 | /* Online, so delay for a bit and try again. */ | ||
391 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 393 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
392 | schedule_timeout_uninterruptible(1); | 394 | schedule_timeout_uninterruptible(1); |
393 | if (cpu_online(cpu) && | 395 | goto retry_ipi; |
394 | (rnp->expmask & mask)) | ||
395 | goto retry_ipi; | ||
396 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | ||
397 | } | 396 | } |
397 | /* CPU really is offline, so we can ignore it. */ | ||
398 | if (!(rnp->expmask & mask)) | 398 | if (!(rnp->expmask & mask)) |
399 | mask_ofl_ipi &= ~mask; | 399 | mask_ofl_ipi &= ~mask; |
400 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 400 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
@@ -427,12 +427,10 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) | |||
427 | jiffies_stall); | 427 | jiffies_stall); |
428 | if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root)) | 428 | if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root)) |
429 | return; | 429 | return; |
430 | if (ret < 0) { | 430 | WARN_ON(ret < 0); /* workqueues should not be signaled. */ |
431 | /* Hit a signal, disable CPU stall warnings. */ | 431 | if (rcu_cpu_stall_suppress) |
432 | swait_event(rsp->expedited_wq, | 432 | continue; |
433 | sync_rcu_preempt_exp_done(rnp_root)); | 433 | panic_on_rcu_stall(); |
434 | return; | ||
435 | } | ||
436 | pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", | 434 | pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", |
437 | rsp->name); | 435 | rsp->name); |
438 | ndetected = 0; | 436 | ndetected = 0; |
@@ -500,7 +498,6 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) | |||
500 | * next GP, to proceed. | 498 | * next GP, to proceed. |
501 | */ | 499 | */ |
502 | mutex_lock(&rsp->exp_wake_mutex); | 500 | mutex_lock(&rsp->exp_wake_mutex); |
503 | mutex_unlock(&rsp->exp_mutex); | ||
504 | 501 | ||
505 | rcu_for_each_node_breadth_first(rsp, rnp) { | 502 | rcu_for_each_node_breadth_first(rsp, rnp) { |
506 | if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { | 503 | if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { |
@@ -516,6 +513,70 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) | |||
516 | mutex_unlock(&rsp->exp_wake_mutex); | 513 | mutex_unlock(&rsp->exp_wake_mutex); |
517 | } | 514 | } |
518 | 515 | ||
516 | /* Let the workqueue handler know what it is supposed to do. */ | ||
517 | struct rcu_exp_work { | ||
518 | smp_call_func_t rew_func; | ||
519 | struct rcu_state *rew_rsp; | ||
520 | unsigned long rew_s; | ||
521 | struct work_struct rew_work; | ||
522 | }; | ||
523 | |||
524 | /* | ||
525 | * Work-queue handler to drive an expedited grace period forward. | ||
526 | */ | ||
527 | static void wait_rcu_exp_gp(struct work_struct *wp) | ||
528 | { | ||
529 | struct rcu_exp_work *rewp; | ||
530 | |||
531 | /* Initialize the rcu_node tree in preparation for the wait. */ | ||
532 | rewp = container_of(wp, struct rcu_exp_work, rew_work); | ||
533 | sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func); | ||
534 | |||
535 | /* Wait and clean up, including waking everyone. */ | ||
536 | rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s); | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * Given an rcu_state pointer and a smp_call_function() handler, kick | ||
541 | * off the specified flavor of expedited grace period. | ||
542 | */ | ||
543 | static void _synchronize_rcu_expedited(struct rcu_state *rsp, | ||
544 | smp_call_func_t func) | ||
545 | { | ||
546 | struct rcu_data *rdp; | ||
547 | struct rcu_exp_work rew; | ||
548 | struct rcu_node *rnp; | ||
549 | unsigned long s; | ||
550 | |||
551 | /* If expedited grace periods are prohibited, fall back to normal. */ | ||
552 | if (rcu_gp_is_normal()) { | ||
553 | wait_rcu_gp(rsp->call); | ||
554 | return; | ||
555 | } | ||
556 | |||
557 | /* Take a snapshot of the sequence number. */ | ||
558 | s = rcu_exp_gp_seq_snap(rsp); | ||
559 | if (exp_funnel_lock(rsp, s)) | ||
560 | return; /* Someone else did our work for us. */ | ||
561 | |||
562 | /* Marshall arguments and schedule the expedited grace period. */ | ||
563 | rew.rew_func = func; | ||
564 | rew.rew_rsp = rsp; | ||
565 | rew.rew_s = s; | ||
566 | INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); | ||
567 | schedule_work(&rew.rew_work); | ||
568 | |||
569 | /* Wait for expedited grace period to complete. */ | ||
570 | rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); | ||
571 | rnp = rcu_get_root(rsp); | ||
572 | wait_event(rnp->exp_wq[(s >> 1) & 0x3], | ||
573 | sync_exp_work_done(rsp, | ||
574 | &rdp->exp_workdone0, s)); | ||
575 | |||
576 | /* Let the next expedited grace period start. */ | ||
577 | mutex_unlock(&rsp->exp_mutex); | ||
578 | } | ||
579 | |||
519 | /** | 580 | /** |
520 | * synchronize_sched_expedited - Brute-force RCU-sched grace period | 581 | * synchronize_sched_expedited - Brute-force RCU-sched grace period |
521 | * | 582 | * |
@@ -534,29 +595,13 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) | |||
534 | */ | 595 | */ |
535 | void synchronize_sched_expedited(void) | 596 | void synchronize_sched_expedited(void) |
536 | { | 597 | { |
537 | unsigned long s; | ||
538 | struct rcu_state *rsp = &rcu_sched_state; | 598 | struct rcu_state *rsp = &rcu_sched_state; |
539 | 599 | ||
540 | /* If only one CPU, this is automatically a grace period. */ | 600 | /* If only one CPU, this is automatically a grace period. */ |
541 | if (rcu_blocking_is_gp()) | 601 | if (rcu_blocking_is_gp()) |
542 | return; | 602 | return; |
543 | 603 | ||
544 | /* If expedited grace periods are prohibited, fall back to normal. */ | 604 | _synchronize_rcu_expedited(rsp, sync_sched_exp_handler); |
545 | if (rcu_gp_is_normal()) { | ||
546 | wait_rcu_gp(call_rcu_sched); | ||
547 | return; | ||
548 | } | ||
549 | |||
550 | /* Take a snapshot of the sequence number. */ | ||
551 | s = rcu_exp_gp_seq_snap(rsp); | ||
552 | if (exp_funnel_lock(rsp, s)) | ||
553 | return; /* Someone else did our work for us. */ | ||
554 | |||
555 | /* Initialize the rcu_node tree in preparation for the wait. */ | ||
556 | sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler); | ||
557 | |||
558 | /* Wait and clean up, including waking everyone. */ | ||
559 | rcu_exp_wait_wake(rsp, s); | ||
560 | } | 605 | } |
561 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | 606 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); |
562 | 607 | ||
@@ -620,23 +665,8 @@ static void sync_rcu_exp_handler(void *info) | |||
620 | void synchronize_rcu_expedited(void) | 665 | void synchronize_rcu_expedited(void) |
621 | { | 666 | { |
622 | struct rcu_state *rsp = rcu_state_p; | 667 | struct rcu_state *rsp = rcu_state_p; |
623 | unsigned long s; | ||
624 | |||
625 | /* If expedited grace periods are prohibited, fall back to normal. */ | ||
626 | if (rcu_gp_is_normal()) { | ||
627 | wait_rcu_gp(call_rcu); | ||
628 | return; | ||
629 | } | ||
630 | |||
631 | s = rcu_exp_gp_seq_snap(rsp); | ||
632 | if (exp_funnel_lock(rsp, s)) | ||
633 | return; /* Someone else did our work for us. */ | ||
634 | |||
635 | /* Initialize the rcu_node tree in preparation for the wait. */ | ||
636 | sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler); | ||
637 | 668 | ||
638 | /* Wait for ->blkd_tasks lists to drain, then wake everyone up. */ | 669 | _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); |
639 | rcu_exp_wait_wake(rsp, s); | ||
640 | } | 670 | } |
641 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 671 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
642 | 672 | ||