diff options
| author | Robert Richter <robert.richter@amd.com> | 2010-10-25 10:28:14 -0400 | 
|---|---|---|
| committer | Robert Richter <robert.richter@amd.com> | 2010-10-25 10:29:12 -0400 | 
| commit | dbd1e66e04558a582e673bc4a9cd933ce0228d93 (patch) | |
| tree | 85f3633276282cde0a3ac558d988704eaa3e68af /kernel/rcutree.c | |
| parent | 328b8f1ba50b708a1b3c0acd7c41ee1b356822f6 (diff) | |
| parent | 4a60cfa9457749f7987fd4f3c956dbba5a281129 (diff) | |
Merge commit 'linux-2.6/master' (early part) into oprofile/core
This branch depends on these apic patches:
      apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets
      apic, x86: Check if EILVT APIC registers are available (AMD only)
Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'kernel/rcutree.c')
| -rw-r--r-- | kernel/rcutree.c | 92 | 
1 files changed, 58 insertions, 34 deletions
| diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d5bc43976c5a..ccdc04c47981 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -143,6 +143,11 @@ module_param(blimit, int, 0); | |||
| 143 | module_param(qhimark, int, 0); | 143 | module_param(qhimark, int, 0); | 
| 144 | module_param(qlowmark, int, 0); | 144 | module_param(qlowmark, int, 0); | 
| 145 | 145 | ||
| 146 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 147 | int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT; | ||
| 148 | module_param(rcu_cpu_stall_suppress, int, 0644); | ||
| 149 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 150 | |||
| 146 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 151 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 
| 147 | static int rcu_pending(int cpu); | 152 | static int rcu_pending(int cpu); | 
| 148 | 153 | ||
| @@ -450,7 +455,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
| 450 | 455 | ||
| 451 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 456 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 
| 452 | 457 | ||
| 453 | int rcu_cpu_stall_panicking __read_mostly; | 458 | int rcu_cpu_stall_suppress __read_mostly; | 
| 454 | 459 | ||
| 455 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 460 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 
| 456 | { | 461 | { | 
| @@ -482,8 +487,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
| 482 | rcu_print_task_stall(rnp); | 487 | rcu_print_task_stall(rnp); | 
| 483 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 488 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 
| 484 | 489 | ||
| 485 | /* OK, time to rat on our buddy... */ | 490 | /* | 
| 486 | 491 | * OK, time to rat on our buddy... | |
| 492 | * See Documentation/RCU/stallwarn.txt for info on how to debug | ||
| 493 | * RCU CPU stall warnings. | ||
| 494 | */ | ||
| 487 | printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", | 495 | printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", | 
| 488 | rsp->name); | 496 | rsp->name); | 
| 489 | rcu_for_each_leaf_node(rsp, rnp) { | 497 | rcu_for_each_leaf_node(rsp, rnp) { | 
| @@ -512,6 +520,11 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
| 512 | unsigned long flags; | 520 | unsigned long flags; | 
| 513 | struct rcu_node *rnp = rcu_get_root(rsp); | 521 | struct rcu_node *rnp = rcu_get_root(rsp); | 
| 514 | 522 | ||
| 523 | /* | ||
| 524 | * OK, time to rat on ourselves... | ||
| 525 | * See Documentation/RCU/stallwarn.txt for info on how to debug | ||
| 526 | * RCU CPU stall warnings. | ||
| 527 | */ | ||
| 515 | printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", | 528 | printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", | 
| 516 | rsp->name, smp_processor_id(), jiffies - rsp->gp_start); | 529 | rsp->name, smp_processor_id(), jiffies - rsp->gp_start); | 
| 517 | trigger_all_cpu_backtrace(); | 530 | trigger_all_cpu_backtrace(); | 
| @@ -530,11 +543,11 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 530 | long delta; | 543 | long delta; | 
| 531 | struct rcu_node *rnp; | 544 | struct rcu_node *rnp; | 
| 532 | 545 | ||
| 533 | if (rcu_cpu_stall_panicking) | 546 | if (rcu_cpu_stall_suppress) | 
| 534 | return; | 547 | return; | 
| 535 | delta = jiffies - rsp->jiffies_stall; | 548 | delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall); | 
| 536 | rnp = rdp->mynode; | 549 | rnp = rdp->mynode; | 
| 537 | if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { | 550 | if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) { | 
| 538 | 551 | ||
| 539 | /* We haven't checked in, so go dump stack. */ | 552 | /* We haven't checked in, so go dump stack. */ | 
| 540 | print_cpu_stall(rsp); | 553 | print_cpu_stall(rsp); | 
| @@ -548,10 +561,26 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 548 | 561 | ||
| 549 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) | 562 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) | 
| 550 | { | 563 | { | 
| 551 | rcu_cpu_stall_panicking = 1; | 564 | rcu_cpu_stall_suppress = 1; | 
| 552 | return NOTIFY_DONE; | 565 | return NOTIFY_DONE; | 
| 553 | } | 566 | } | 
| 554 | 567 | ||
| 568 | /** | ||
| 569 | * rcu_cpu_stall_reset - prevent further stall warnings in current grace period | ||
| 570 | * | ||
| 571 | * Set the stall-warning timeout way off into the future, thus preventing | ||
| 572 | * any RCU CPU stall-warning messages from appearing in the current set of | ||
| 573 | * RCU grace periods. | ||
| 574 | * | ||
| 575 | * The caller must disable hard irqs. | ||
| 576 | */ | ||
| 577 | void rcu_cpu_stall_reset(void) | ||
| 578 | { | ||
| 579 | rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2; | ||
| 580 | rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2; | ||
| 581 | rcu_preempt_stall_reset(); | ||
| 582 | } | ||
| 583 | |||
| 555 | static struct notifier_block rcu_panic_block = { | 584 | static struct notifier_block rcu_panic_block = { | 
| 556 | .notifier_call = rcu_panic, | 585 | .notifier_call = rcu_panic, | 
| 557 | }; | 586 | }; | 
| @@ -571,6 +600,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 571 | { | 600 | { | 
| 572 | } | 601 | } | 
| 573 | 602 | ||
| 603 | void rcu_cpu_stall_reset(void) | ||
| 604 | { | ||
| 605 | } | ||
| 606 | |||
| 574 | static void __init check_cpu_stall_init(void) | 607 | static void __init check_cpu_stall_init(void) | 
| 575 | { | 608 | { | 
| 576 | } | 609 | } | 
| @@ -712,7 +745,7 @@ static void | |||
| 712 | rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | 745 | rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | 
| 713 | __releases(rcu_get_root(rsp)->lock) | 746 | __releases(rcu_get_root(rsp)->lock) | 
| 714 | { | 747 | { | 
| 715 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | 748 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); | 
| 716 | struct rcu_node *rnp = rcu_get_root(rsp); | 749 | struct rcu_node *rnp = rcu_get_root(rsp); | 
| 717 | 750 | ||
| 718 | if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { | 751 | if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { | 
| @@ -960,7 +993,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 960 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | 993 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | 
| 961 | { | 994 | { | 
| 962 | int i; | 995 | int i; | 
| 963 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | 996 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); | 
| 964 | 997 | ||
| 965 | if (rdp->nxtlist == NULL) | 998 | if (rdp->nxtlist == NULL) | 
| 966 | return; /* irqs disabled, so comparison is stable. */ | 999 | return; /* irqs disabled, so comparison is stable. */ | 
| @@ -971,6 +1004,7 @@ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | |||
| 971 | for (i = 0; i < RCU_NEXT_SIZE; i++) | 1004 | for (i = 0; i < RCU_NEXT_SIZE; i++) | 
| 972 | rdp->nxttail[i] = &rdp->nxtlist; | 1005 | rdp->nxttail[i] = &rdp->nxtlist; | 
| 973 | rsp->orphan_qlen += rdp->qlen; | 1006 | rsp->orphan_qlen += rdp->qlen; | 
| 1007 | rdp->n_cbs_orphaned += rdp->qlen; | ||
| 974 | rdp->qlen = 0; | 1008 | rdp->qlen = 0; | 
| 975 | raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | 1009 | raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | 
| 976 | } | 1010 | } | 
| @@ -984,7 +1018,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
| 984 | struct rcu_data *rdp; | 1018 | struct rcu_data *rdp; | 
| 985 | 1019 | ||
| 986 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1020 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 
| 987 | rdp = rsp->rda[smp_processor_id()]; | 1021 | rdp = this_cpu_ptr(rsp->rda); | 
| 988 | if (rsp->orphan_cbs_list == NULL) { | 1022 | if (rsp->orphan_cbs_list == NULL) { | 
| 989 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | 1023 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | 
| 990 | return; | 1024 | return; | 
| @@ -992,6 +1026,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
| 992 | *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; | 1026 | *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; | 
| 993 | rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; | 1027 | rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; | 
| 994 | rdp->qlen += rsp->orphan_qlen; | 1028 | rdp->qlen += rsp->orphan_qlen; | 
| 1029 | rdp->n_cbs_adopted += rsp->orphan_qlen; | ||
| 995 | rsp->orphan_cbs_list = NULL; | 1030 | rsp->orphan_cbs_list = NULL; | 
| 996 | rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; | 1031 | rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; | 
| 997 | rsp->orphan_qlen = 0; | 1032 | rsp->orphan_qlen = 0; | 
| @@ -1007,7 +1042,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
| 1007 | unsigned long flags; | 1042 | unsigned long flags; | 
| 1008 | unsigned long mask; | 1043 | unsigned long mask; | 
| 1009 | int need_report = 0; | 1044 | int need_report = 0; | 
| 1010 | struct rcu_data *rdp = rsp->rda[cpu]; | 1045 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 
| 1011 | struct rcu_node *rnp; | 1046 | struct rcu_node *rnp; | 
| 1012 | 1047 | ||
| 1013 | /* Exclude any attempts to start a new grace period. */ | 1048 | /* Exclude any attempts to start a new grace period. */ | 
| @@ -1123,6 +1158,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1123 | 1158 | ||
| 1124 | /* Update count, and requeue any remaining callbacks. */ | 1159 | /* Update count, and requeue any remaining callbacks. */ | 
| 1125 | rdp->qlen -= count; | 1160 | rdp->qlen -= count; | 
| 1161 | rdp->n_cbs_invoked += count; | ||
| 1126 | if (list != NULL) { | 1162 | if (list != NULL) { | 
| 1127 | *tail = rdp->nxtlist; | 1163 | *tail = rdp->nxtlist; | 
| 1128 | rdp->nxtlist = list; | 1164 | rdp->nxtlist = list; | 
| @@ -1226,7 +1262,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
| 1226 | cpu = rnp->grplo; | 1262 | cpu = rnp->grplo; | 
| 1227 | bit = 1; | 1263 | bit = 1; | 
| 1228 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { | 1264 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { | 
| 1229 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) | 1265 | if ((rnp->qsmask & bit) != 0 && | 
| 1266 | f(per_cpu_ptr(rsp->rda, cpu))) | ||
| 1230 | mask |= bit; | 1267 | mask |= bit; | 
| 1231 | } | 1268 | } | 
| 1232 | if (mask != 0) { | 1269 | if (mask != 0) { | 
| @@ -1402,7 +1439,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
| 1402 | * a quiescent state betweentimes. | 1439 | * a quiescent state betweentimes. | 
| 1403 | */ | 1440 | */ | 
| 1404 | local_irq_save(flags); | 1441 | local_irq_save(flags); | 
| 1405 | rdp = rsp->rda[smp_processor_id()]; | 1442 | rdp = this_cpu_ptr(rsp->rda); | 
| 1406 | rcu_process_gp_end(rsp, rdp); | 1443 | rcu_process_gp_end(rsp, rdp); | 
| 1407 | check_for_new_grace_period(rsp, rdp); | 1444 | check_for_new_grace_period(rsp, rdp); | 
| 1408 | 1445 | ||
| @@ -1701,7 +1738,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 1701 | { | 1738 | { | 
| 1702 | unsigned long flags; | 1739 | unsigned long flags; | 
| 1703 | int i; | 1740 | int i; | 
| 1704 | struct rcu_data *rdp = rsp->rda[cpu]; | 1741 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 
| 1705 | struct rcu_node *rnp = rcu_get_root(rsp); | 1742 | struct rcu_node *rnp = rcu_get_root(rsp); | 
| 1706 | 1743 | ||
| 1707 | /* Set up local state, ensuring consistent view of global state. */ | 1744 | /* Set up local state, ensuring consistent view of global state. */ | 
| @@ -1729,7 +1766,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
| 1729 | { | 1766 | { | 
| 1730 | unsigned long flags; | 1767 | unsigned long flags; | 
| 1731 | unsigned long mask; | 1768 | unsigned long mask; | 
| 1732 | struct rcu_data *rdp = rsp->rda[cpu]; | 1769 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 
| 1733 | struct rcu_node *rnp = rcu_get_root(rsp); | 1770 | struct rcu_node *rnp = rcu_get_root(rsp); | 
| 1734 | 1771 | ||
| 1735 | /* Set up local state, ensuring consistent view of global state. */ | 1772 | /* Set up local state, ensuring consistent view of global state. */ | 
| @@ -1865,7 +1902,8 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) | |||
| 1865 | /* | 1902 | /* | 
| 1866 | * Helper function for rcu_init() that initializes one rcu_state structure. | 1903 | * Helper function for rcu_init() that initializes one rcu_state structure. | 
| 1867 | */ | 1904 | */ | 
| 1868 | static void __init rcu_init_one(struct rcu_state *rsp) | 1905 | static void __init rcu_init_one(struct rcu_state *rsp, | 
| 1906 | struct rcu_data __percpu *rda) | ||
| 1869 | { | 1907 | { | 
| 1870 | static char *buf[] = { "rcu_node_level_0", | 1908 | static char *buf[] = { "rcu_node_level_0", | 
| 1871 | "rcu_node_level_1", | 1909 | "rcu_node_level_1", | 
| @@ -1918,37 +1956,23 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
| 1918 | } | 1956 | } | 
| 1919 | } | 1957 | } | 
| 1920 | 1958 | ||
| 1959 | rsp->rda = rda; | ||
| 1921 | rnp = rsp->level[NUM_RCU_LVLS - 1]; | 1960 | rnp = rsp->level[NUM_RCU_LVLS - 1]; | 
| 1922 | for_each_possible_cpu(i) { | 1961 | for_each_possible_cpu(i) { | 
| 1923 | while (i > rnp->grphi) | 1962 | while (i > rnp->grphi) | 
| 1924 | rnp++; | 1963 | rnp++; | 
| 1925 | rsp->rda[i]->mynode = rnp; | 1964 | per_cpu_ptr(rsp->rda, i)->mynode = rnp; | 
| 1926 | rcu_boot_init_percpu_data(i, rsp); | 1965 | rcu_boot_init_percpu_data(i, rsp); | 
| 1927 | } | 1966 | } | 
| 1928 | } | 1967 | } | 
| 1929 | 1968 | ||
| 1930 | /* | ||
| 1931 | * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used | ||
| 1932 | * nowhere else! Assigns leaf node pointers into each CPU's rcu_data | ||
| 1933 | * structure. | ||
| 1934 | */ | ||
| 1935 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ | ||
| 1936 | do { \ | ||
| 1937 | int i; \ | ||
| 1938 | \ | ||
| 1939 | for_each_possible_cpu(i) { \ | ||
| 1940 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | ||
| 1941 | } \ | ||
| 1942 | rcu_init_one(rsp); \ | ||
| 1943 | } while (0) | ||
| 1944 | |||
| 1945 | void __init rcu_init(void) | 1969 | void __init rcu_init(void) | 
| 1946 | { | 1970 | { | 
| 1947 | int cpu; | 1971 | int cpu; | 
| 1948 | 1972 | ||
| 1949 | rcu_bootup_announce(); | 1973 | rcu_bootup_announce(); | 
| 1950 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); | 1974 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); | 
| 1951 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); | 1975 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); | 
| 1952 | __rcu_init_preempt(); | 1976 | __rcu_init_preempt(); | 
| 1953 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1977 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 
| 1954 | 1978 | ||
