diff options
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 210 |
1 files changed, 183 insertions, 27 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7387e46009d9..4fb2376ddf06 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -206,6 +206,9 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch); | |||
206 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 206 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
207 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, | 207 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, |
208 | .dynticks = ATOMIC_INIT(1), | 208 | .dynticks = ATOMIC_INIT(1), |
209 | #if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE) | ||
210 | .ignore_user_qs = true, | ||
211 | #endif | ||
209 | }; | 212 | }; |
210 | 213 | ||
211 | static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ | 214 | static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ |
@@ -322,16 +325,17 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
322 | } | 325 | } |
323 | 326 | ||
324 | /* | 327 | /* |
325 | * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle | 328 | * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state |
326 | * | 329 | * |
327 | * If the new value of the ->dynticks_nesting counter now is zero, | 330 | * If the new value of the ->dynticks_nesting counter now is zero, |
328 | * we really have entered idle, and must do the appropriate accounting. | 331 | * we really have entered idle, and must do the appropriate accounting. |
329 | * The caller must have disabled interrupts. | 332 | * The caller must have disabled interrupts. |
330 | */ | 333 | */ |
331 | static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | 334 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, |
335 | bool user) | ||
332 | { | 336 | { |
333 | trace_rcu_dyntick("Start", oldval, 0); | 337 | trace_rcu_dyntick("Start", oldval, 0); |
334 | if (!is_idle_task(current)) { | 338 | if (!user && !is_idle_task(current)) { |
335 | struct task_struct *idle = idle_task(smp_processor_id()); | 339 | struct task_struct *idle = idle_task(smp_processor_id()); |
336 | 340 | ||
337 | trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); | 341 | trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); |
@@ -348,7 +352,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | |||
348 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); | 352 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
349 | 353 | ||
350 | /* | 354 | /* |
351 | * The idle task is not permitted to enter the idle loop while | 355 | * It is illegal to enter an extended quiescent state while |
352 | * in an RCU read-side critical section. | 356 | * in an RCU read-side critical section. |
353 | */ | 357 | */ |
354 | rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), | 358 | rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), |
@@ -359,6 +363,25 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | |||
359 | "Illegal idle entry in RCU-sched read-side critical section."); | 363 | "Illegal idle entry in RCU-sched read-side critical section."); |
360 | } | 364 | } |
361 | 365 | ||
366 | /* | ||
367 | * Enter an RCU extended quiescent state, which can be either the | ||
368 | * idle loop or adaptive-tickless usermode execution. | ||
369 | */ | ||
370 | static void rcu_eqs_enter(bool user) | ||
371 | { | ||
372 | long long oldval; | ||
373 | struct rcu_dynticks *rdtp; | ||
374 | |||
375 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
376 | oldval = rdtp->dynticks_nesting; | ||
377 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); | ||
378 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | ||
379 | rdtp->dynticks_nesting = 0; | ||
380 | else | ||
381 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | ||
382 | rcu_eqs_enter_common(rdtp, oldval, user); | ||
383 | } | ||
384 | |||
362 | /** | 385 | /** |
363 | * rcu_idle_enter - inform RCU that current CPU is entering idle | 386 | * rcu_idle_enter - inform RCU that current CPU is entering idle |
364 | * | 387 | * |
@@ -374,21 +397,70 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | |||
374 | void rcu_idle_enter(void) | 397 | void rcu_idle_enter(void) |
375 | { | 398 | { |
376 | unsigned long flags; | 399 | unsigned long flags; |
377 | long long oldval; | 400 | |
401 | local_irq_save(flags); | ||
402 | rcu_eqs_enter(false); | ||
403 | local_irq_restore(flags); | ||
404 | } | ||
405 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | ||
406 | |||
407 | #ifdef CONFIG_RCU_USER_QS | ||
408 | /** | ||
409 | * rcu_user_enter - inform RCU that we are resuming userspace. | ||
410 | * | ||
411 | * Enter RCU idle mode right before resuming userspace. No use of RCU | ||
412 | * is permitted between this call and rcu_user_exit(). This way the | ||
413 | * CPU doesn't need to maintain the tick for RCU maintenance purposes | ||
414 | * when the CPU runs in userspace. | ||
415 | */ | ||
416 | void rcu_user_enter(void) | ||
417 | { | ||
418 | unsigned long flags; | ||
378 | struct rcu_dynticks *rdtp; | 419 | struct rcu_dynticks *rdtp; |
379 | 420 | ||
421 | /* | ||
422 | * Some contexts may involve an exception occuring in an irq, | ||
423 | * leading to that nesting: | ||
424 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
425 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
426 | * helpers are enough to protect RCU uses inside the exception. So | ||
427 | * just return immediately if we detect we are in an IRQ. | ||
428 | */ | ||
429 | if (in_interrupt()) | ||
430 | return; | ||
431 | |||
432 | WARN_ON_ONCE(!current->mm); | ||
433 | |||
380 | local_irq_save(flags); | 434 | local_irq_save(flags); |
381 | rdtp = &__get_cpu_var(rcu_dynticks); | 435 | rdtp = &__get_cpu_var(rcu_dynticks); |
382 | oldval = rdtp->dynticks_nesting; | 436 | if (!rdtp->ignore_user_qs && !rdtp->in_user) { |
383 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); | 437 | rdtp->in_user = true; |
384 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | 438 | rcu_eqs_enter(true); |
385 | rdtp->dynticks_nesting = 0; | 439 | } |
386 | else | ||
387 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | ||
388 | rcu_idle_enter_common(rdtp, oldval); | ||
389 | local_irq_restore(flags); | 440 | local_irq_restore(flags); |
390 | } | 441 | } |
391 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 442 | |
443 | /** | ||
444 | * rcu_user_enter_after_irq - inform RCU that we are going to resume userspace | ||
445 | * after the current irq returns. | ||
446 | * | ||
447 | * This is similar to rcu_user_enter() but in the context of a non-nesting | ||
448 | * irq. After this call, RCU enters into idle mode when the interrupt | ||
449 | * returns. | ||
450 | */ | ||
451 | void rcu_user_enter_after_irq(void) | ||
452 | { | ||
453 | unsigned long flags; | ||
454 | struct rcu_dynticks *rdtp; | ||
455 | |||
456 | local_irq_save(flags); | ||
457 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
458 | /* Ensure this irq is interrupting a non-idle RCU state. */ | ||
459 | WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK)); | ||
460 | rdtp->dynticks_nesting = 1; | ||
461 | local_irq_restore(flags); | ||
462 | } | ||
463 | #endif /* CONFIG_RCU_USER_QS */ | ||
392 | 464 | ||
393 | /** | 465 | /** |
394 | * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle | 466 | * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle |
@@ -420,18 +492,19 @@ void rcu_irq_exit(void) | |||
420 | if (rdtp->dynticks_nesting) | 492 | if (rdtp->dynticks_nesting) |
421 | trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); | 493 | trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); |
422 | else | 494 | else |
423 | rcu_idle_enter_common(rdtp, oldval); | 495 | rcu_eqs_enter_common(rdtp, oldval, true); |
424 | local_irq_restore(flags); | 496 | local_irq_restore(flags); |
425 | } | 497 | } |
426 | 498 | ||
427 | /* | 499 | /* |
428 | * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle | 500 | * rcu_eqs_exit_common - current CPU moving away from extended quiescent state |
429 | * | 501 | * |
430 | * If the new value of the ->dynticks_nesting counter was previously zero, | 502 | * If the new value of the ->dynticks_nesting counter was previously zero, |
431 | * we really have exited idle, and must do the appropriate accounting. | 503 | * we really have exited idle, and must do the appropriate accounting. |
432 | * The caller must have disabled interrupts. | 504 | * The caller must have disabled interrupts. |
433 | */ | 505 | */ |
434 | static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | 506 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, |
507 | int user) | ||
435 | { | 508 | { |
436 | smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ | 509 | smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ |
437 | atomic_inc(&rdtp->dynticks); | 510 | atomic_inc(&rdtp->dynticks); |
@@ -440,7 +513,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | |||
440 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 513 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
441 | rcu_cleanup_after_idle(smp_processor_id()); | 514 | rcu_cleanup_after_idle(smp_processor_id()); |
442 | trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); | 515 | trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); |
443 | if (!is_idle_task(current)) { | 516 | if (!user && !is_idle_task(current)) { |
444 | struct task_struct *idle = idle_task(smp_processor_id()); | 517 | struct task_struct *idle = idle_task(smp_processor_id()); |
445 | 518 | ||
446 | trace_rcu_dyntick("Error on exit: not idle task", | 519 | trace_rcu_dyntick("Error on exit: not idle task", |
@@ -452,6 +525,25 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | |||
452 | } | 525 | } |
453 | } | 526 | } |
454 | 527 | ||
528 | /* | ||
529 | * Exit an RCU extended quiescent state, which can be either the | ||
530 | * idle loop or adaptive-tickless usermode execution. | ||
531 | */ | ||
532 | static void rcu_eqs_exit(bool user) | ||
533 | { | ||
534 | struct rcu_dynticks *rdtp; | ||
535 | long long oldval; | ||
536 | |||
537 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
538 | oldval = rdtp->dynticks_nesting; | ||
539 | WARN_ON_ONCE(oldval < 0); | ||
540 | if (oldval & DYNTICK_TASK_NEST_MASK) | ||
541 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | ||
542 | else | ||
543 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
544 | rcu_eqs_exit_common(rdtp, oldval, user); | ||
545 | } | ||
546 | |||
455 | /** | 547 | /** |
456 | * rcu_idle_exit - inform RCU that current CPU is leaving idle | 548 | * rcu_idle_exit - inform RCU that current CPU is leaving idle |
457 | * | 549 | * |
@@ -466,21 +558,67 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | |||
466 | void rcu_idle_exit(void) | 558 | void rcu_idle_exit(void) |
467 | { | 559 | { |
468 | unsigned long flags; | 560 | unsigned long flags; |
561 | |||
562 | local_irq_save(flags); | ||
563 | rcu_eqs_exit(false); | ||
564 | local_irq_restore(flags); | ||
565 | } | ||
566 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | ||
567 | |||
568 | #ifdef CONFIG_RCU_USER_QS | ||
569 | /** | ||
570 | * rcu_user_exit - inform RCU that we are exiting userspace. | ||
571 | * | ||
572 | * Exit RCU idle mode while entering the kernel because it can | ||
573 | * run a RCU read side critical section anytime. | ||
574 | */ | ||
575 | void rcu_user_exit(void) | ||
576 | { | ||
577 | unsigned long flags; | ||
469 | struct rcu_dynticks *rdtp; | 578 | struct rcu_dynticks *rdtp; |
470 | long long oldval; | 579 | |
580 | /* | ||
581 | * Some contexts may involve an exception occuring in an irq, | ||
582 | * leading to that nesting: | ||
583 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
584 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
585 | * helpers are enough to protect RCU uses inside the exception. So | ||
586 | * just return immediately if we detect we are in an IRQ. | ||
587 | */ | ||
588 | if (in_interrupt()) | ||
589 | return; | ||
471 | 590 | ||
472 | local_irq_save(flags); | 591 | local_irq_save(flags); |
473 | rdtp = &__get_cpu_var(rcu_dynticks); | 592 | rdtp = &__get_cpu_var(rcu_dynticks); |
474 | oldval = rdtp->dynticks_nesting; | 593 | if (rdtp->in_user) { |
475 | WARN_ON_ONCE(oldval < 0); | 594 | rdtp->in_user = false; |
476 | if (oldval & DYNTICK_TASK_NEST_MASK) | 595 | rcu_eqs_exit(true); |
477 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | 596 | } |
478 | else | ||
479 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
480 | rcu_idle_exit_common(rdtp, oldval); | ||
481 | local_irq_restore(flags); | 597 | local_irq_restore(flags); |
482 | } | 598 | } |
483 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | 599 | |
600 | /** | ||
601 | * rcu_user_exit_after_irq - inform RCU that we won't resume to userspace | ||
602 | * idle mode after the current non-nesting irq returns. | ||
603 | * | ||
604 | * This is similar to rcu_user_exit() but in the context of an irq. | ||
605 | * This is called when the irq has interrupted a userspace RCU idle mode | ||
606 | * context. When the current non-nesting interrupt returns after this call, | ||
607 | * the CPU won't restore the RCU idle mode. | ||
608 | */ | ||
609 | void rcu_user_exit_after_irq(void) | ||
610 | { | ||
611 | unsigned long flags; | ||
612 | struct rcu_dynticks *rdtp; | ||
613 | |||
614 | local_irq_save(flags); | ||
615 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
616 | /* Ensure we are interrupting an RCU idle mode. */ | ||
617 | WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK); | ||
618 | rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE; | ||
619 | local_irq_restore(flags); | ||
620 | } | ||
621 | #endif /* CONFIG_RCU_USER_QS */ | ||
484 | 622 | ||
485 | /** | 623 | /** |
486 | * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle | 624 | * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle |
@@ -515,7 +653,7 @@ void rcu_irq_enter(void) | |||
515 | if (oldval) | 653 | if (oldval) |
516 | trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); | 654 | trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); |
517 | else | 655 | else |
518 | rcu_idle_exit_common(rdtp, oldval); | 656 | rcu_eqs_exit_common(rdtp, oldval, true); |
519 | local_irq_restore(flags); | 657 | local_irq_restore(flags); |
520 | } | 658 | } |
521 | 659 | ||
@@ -579,6 +717,21 @@ int rcu_is_cpu_idle(void) | |||
579 | } | 717 | } |
580 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 718 | EXPORT_SYMBOL(rcu_is_cpu_idle); |
581 | 719 | ||
720 | #ifdef CONFIG_RCU_USER_QS | ||
721 | void rcu_user_hooks_switch(struct task_struct *prev, | ||
722 | struct task_struct *next) | ||
723 | { | ||
724 | struct rcu_dynticks *rdtp; | ||
725 | |||
726 | /* Interrupts are disabled in context switch */ | ||
727 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
728 | if (!rdtp->ignore_user_qs) { | ||
729 | clear_tsk_thread_flag(prev, TIF_NOHZ); | ||
730 | set_tsk_thread_flag(next, TIF_NOHZ); | ||
731 | } | ||
732 | } | ||
733 | #endif /* #ifdef CONFIG_RCU_USER_QS */ | ||
734 | |||
582 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) | 735 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) |
583 | 736 | ||
584 | /* | 737 | /* |
@@ -2473,6 +2626,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
2473 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | 2626 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); |
2474 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); | 2627 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); |
2475 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); | 2628 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); |
2629 | #ifdef CONFIG_RCU_USER_QS | ||
2630 | WARN_ON_ONCE(rdp->dynticks->in_user); | ||
2631 | #endif | ||
2476 | rdp->cpu = cpu; | 2632 | rdp->cpu = cpu; |
2477 | rdp->rsp = rsp; | 2633 | rdp->rsp = rsp; |
2478 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 2634 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |