diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutree.c | 22 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 14 |
2 files changed, 18 insertions, 18 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 2712b8991143..8eb9cfd9e2b1 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -407,7 +407,7 @@ static void rcu_eqs_enter(bool user) | |||
407 | long long oldval; | 407 | long long oldval; |
408 | struct rcu_dynticks *rdtp; | 408 | struct rcu_dynticks *rdtp; |
409 | 409 | ||
410 | rdtp = &__get_cpu_var(rcu_dynticks); | 410 | rdtp = this_cpu_ptr(&rcu_dynticks); |
411 | oldval = rdtp->dynticks_nesting; | 411 | oldval = rdtp->dynticks_nesting; |
412 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); | 412 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); |
413 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | 413 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) |
@@ -435,7 +435,7 @@ void rcu_idle_enter(void) | |||
435 | 435 | ||
436 | local_irq_save(flags); | 436 | local_irq_save(flags); |
437 | rcu_eqs_enter(false); | 437 | rcu_eqs_enter(false); |
438 | rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0); | 438 | rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0); |
439 | local_irq_restore(flags); | 439 | local_irq_restore(flags); |
440 | } | 440 | } |
441 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 441 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
@@ -478,7 +478,7 @@ void rcu_irq_exit(void) | |||
478 | struct rcu_dynticks *rdtp; | 478 | struct rcu_dynticks *rdtp; |
479 | 479 | ||
480 | local_irq_save(flags); | 480 | local_irq_save(flags); |
481 | rdtp = &__get_cpu_var(rcu_dynticks); | 481 | rdtp = this_cpu_ptr(&rcu_dynticks); |
482 | oldval = rdtp->dynticks_nesting; | 482 | oldval = rdtp->dynticks_nesting; |
483 | rdtp->dynticks_nesting--; | 483 | rdtp->dynticks_nesting--; |
484 | WARN_ON_ONCE(rdtp->dynticks_nesting < 0); | 484 | WARN_ON_ONCE(rdtp->dynticks_nesting < 0); |
@@ -528,7 +528,7 @@ static void rcu_eqs_exit(bool user) | |||
528 | struct rcu_dynticks *rdtp; | 528 | struct rcu_dynticks *rdtp; |
529 | long long oldval; | 529 | long long oldval; |
530 | 530 | ||
531 | rdtp = &__get_cpu_var(rcu_dynticks); | 531 | rdtp = this_cpu_ptr(&rcu_dynticks); |
532 | oldval = rdtp->dynticks_nesting; | 532 | oldval = rdtp->dynticks_nesting; |
533 | WARN_ON_ONCE(oldval < 0); | 533 | WARN_ON_ONCE(oldval < 0); |
534 | if (oldval & DYNTICK_TASK_NEST_MASK) | 534 | if (oldval & DYNTICK_TASK_NEST_MASK) |
@@ -555,7 +555,7 @@ void rcu_idle_exit(void) | |||
555 | 555 | ||
556 | local_irq_save(flags); | 556 | local_irq_save(flags); |
557 | rcu_eqs_exit(false); | 557 | rcu_eqs_exit(false); |
558 | rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0); | 558 | rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0); |
559 | local_irq_restore(flags); | 559 | local_irq_restore(flags); |
560 | } | 560 | } |
561 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | 561 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
@@ -599,7 +599,7 @@ void rcu_irq_enter(void) | |||
599 | long long oldval; | 599 | long long oldval; |
600 | 600 | ||
601 | local_irq_save(flags); | 601 | local_irq_save(flags); |
602 | rdtp = &__get_cpu_var(rcu_dynticks); | 602 | rdtp = this_cpu_ptr(&rcu_dynticks); |
603 | oldval = rdtp->dynticks_nesting; | 603 | oldval = rdtp->dynticks_nesting; |
604 | rdtp->dynticks_nesting++; | 604 | rdtp->dynticks_nesting++; |
605 | WARN_ON_ONCE(rdtp->dynticks_nesting == 0); | 605 | WARN_ON_ONCE(rdtp->dynticks_nesting == 0); |
@@ -620,7 +620,7 @@ void rcu_irq_enter(void) | |||
620 | */ | 620 | */ |
621 | void rcu_nmi_enter(void) | 621 | void rcu_nmi_enter(void) |
622 | { | 622 | { |
623 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | 623 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
624 | 624 | ||
625 | if (rdtp->dynticks_nmi_nesting == 0 && | 625 | if (rdtp->dynticks_nmi_nesting == 0 && |
626 | (atomic_read(&rdtp->dynticks) & 0x1)) | 626 | (atomic_read(&rdtp->dynticks) & 0x1)) |
@@ -642,7 +642,7 @@ void rcu_nmi_enter(void) | |||
642 | */ | 642 | */ |
643 | void rcu_nmi_exit(void) | 643 | void rcu_nmi_exit(void) |
644 | { | 644 | { |
645 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | 645 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
646 | 646 | ||
647 | if (rdtp->dynticks_nmi_nesting == 0 || | 647 | if (rdtp->dynticks_nmi_nesting == 0 || |
648 | --rdtp->dynticks_nmi_nesting != 0) | 648 | --rdtp->dynticks_nmi_nesting != 0) |
@@ -665,7 +665,7 @@ int rcu_is_cpu_idle(void) | |||
665 | int ret; | 665 | int ret; |
666 | 666 | ||
667 | preempt_disable(); | 667 | preempt_disable(); |
668 | ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0; | 668 | ret = (atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1) == 0; |
669 | preempt_enable(); | 669 | preempt_enable(); |
670 | return ret; | 670 | return ret; |
671 | } | 671 | } |
@@ -703,7 +703,7 @@ bool rcu_lockdep_current_cpu_online(void) | |||
703 | if (in_nmi()) | 703 | if (in_nmi()) |
704 | return 1; | 704 | return 1; |
705 | preempt_disable(); | 705 | preempt_disable(); |
706 | rdp = &__get_cpu_var(rcu_sched_data); | 706 | rdp = this_cpu_ptr(&rcu_sched_data); |
707 | rnp = rdp->mynode; | 707 | rnp = rdp->mynode; |
708 | ret = (rdp->grpmask & rnp->qsmaskinit) || | 708 | ret = (rdp->grpmask & rnp->qsmaskinit) || |
709 | !rcu_scheduler_fully_active; | 709 | !rcu_scheduler_fully_active; |
@@ -723,7 +723,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); | |||
723 | */ | 723 | */ |
724 | static int rcu_is_cpu_rrupt_from_idle(void) | 724 | static int rcu_is_cpu_rrupt_from_idle(void) |
725 | { | 725 | { |
726 | return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1; | 726 | return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1; |
727 | } | 727 | } |
728 | 728 | ||
729 | /* | 729 | /* |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 6f9aecef8ab6..c684f7ab37fa 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -660,7 +660,7 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
660 | 660 | ||
661 | static void rcu_preempt_do_callbacks(void) | 661 | static void rcu_preempt_do_callbacks(void) |
662 | { | 662 | { |
663 | rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); | 663 | rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); |
664 | } | 664 | } |
665 | 665 | ||
666 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 666 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
@@ -1332,7 +1332,7 @@ static void invoke_rcu_callbacks_kthread(void) | |||
1332 | */ | 1332 | */ |
1333 | static bool rcu_is_callbacks_kthread(void) | 1333 | static bool rcu_is_callbacks_kthread(void) |
1334 | { | 1334 | { |
1335 | return __get_cpu_var(rcu_cpu_kthread_task) == current; | 1335 | return __this_cpu_read(rcu_cpu_kthread_task) == current; |
1336 | } | 1336 | } |
1337 | 1337 | ||
1338 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) | 1338 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) |
@@ -1382,8 +1382,8 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1382 | 1382 | ||
1383 | static void rcu_kthread_do_work(void) | 1383 | static void rcu_kthread_do_work(void) |
1384 | { | 1384 | { |
1385 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | 1385 | rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); |
1386 | rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | 1386 | rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); |
1387 | rcu_preempt_do_callbacks(); | 1387 | rcu_preempt_do_callbacks(); |
1388 | } | 1388 | } |
1389 | 1389 | ||
@@ -1402,7 +1402,7 @@ static void rcu_cpu_kthread_park(unsigned int cpu) | |||
1402 | 1402 | ||
1403 | static int rcu_cpu_kthread_should_run(unsigned int cpu) | 1403 | static int rcu_cpu_kthread_should_run(unsigned int cpu) |
1404 | { | 1404 | { |
1405 | return __get_cpu_var(rcu_cpu_has_work); | 1405 | return __this_cpu_read(rcu_cpu_has_work); |
1406 | } | 1406 | } |
1407 | 1407 | ||
1408 | /* | 1408 | /* |
@@ -1412,8 +1412,8 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu) | |||
1412 | */ | 1412 | */ |
1413 | static void rcu_cpu_kthread(unsigned int cpu) | 1413 | static void rcu_cpu_kthread(unsigned int cpu) |
1414 | { | 1414 | { |
1415 | unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status); | 1415 | unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); |
1416 | char work, *workp = &__get_cpu_var(rcu_cpu_has_work); | 1416 | char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); |
1417 | int spincnt; | 1417 | int spincnt; |
1418 | 1418 | ||
1419 | for (spincnt = 0; spincnt < 10; spincnt++) { | 1419 | for (spincnt = 0; spincnt < 10; spincnt++) { |