diff options
author | Cliff Wickman <cpw@sgi.com> | 2009-04-03 09:34:05 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-03 12:25:26 -0400 |
commit | 9674f35b1ec17577163897f052f405c1e9e5893d (patch) | |
tree | 88dad89d3ba2a736f780abccf87a833a9726355c /arch/x86/kernel/tlb_uv.c | |
parent | 484cad34dd667235565c14a40e2f5a8143184aaa (diff) |
x86: UV BAU and nodes with no memory
This patch fixes BAU initialization for systems containing
nodes with no memory and for systems with non-consecutive
node numbers.
Fixes and clarifies situations where pnode should be used instead
of node id.
Tested on the UV hardware simulator.
Signed-off-by: Cliff Wickman <cpw@sgi.com>
LKML-Reference: <E1LpjX3-00007N-12@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/tlb_uv.c')
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 108 |
1 files changed, 61 insertions, 47 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 79c073247284..b833bc634d17 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -32,6 +32,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats); | |||
32 | static DEFINE_PER_CPU(struct bau_control, bau_control); | 32 | static DEFINE_PER_CPU(struct bau_control, bau_control); |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Determine the first node on a blade. | ||
36 | */ | ||
37 | static int __init blade_to_first_node(int blade) | ||
38 | { | ||
39 | int node, b; | ||
40 | |||
41 | for_each_online_node(node) { | ||
42 | b = uv_node_to_blade_id(node); | ||
43 | if (blade == b) | ||
44 | return node; | ||
45 | } | ||
46 | BUG(); | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Determine the apicid of the first cpu on a blade. | ||
51 | */ | ||
52 | static int __init blade_to_first_apicid(int blade) | ||
53 | { | ||
54 | int cpu; | ||
55 | |||
56 | for_each_present_cpu(cpu) | ||
57 | if (blade == uv_cpu_to_blade_id(cpu)) | ||
58 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
59 | return -1; | ||
60 | } | ||
61 | |||
62 | /* | ||
35 | * Free a software acknowledge hardware resource by clearing its Pending | 63 | * Free a software acknowledge hardware resource by clearing its Pending |
36 | * bit. This will return a reply to the sender. | 64 | * bit. This will return a reply to the sender. |
37 | * If the message has timed out, a reply has already been sent by the | 65 | * If the message has timed out, a reply has already been sent by the |
@@ -67,7 +95,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg, | |||
67 | msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; | 95 | msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; |
68 | cpu = uv_blade_processor_id(); | 96 | cpu = uv_blade_processor_id(); |
69 | msg->number_of_cpus = | 97 | msg->number_of_cpus = |
70 | uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); | 98 | uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); |
71 | this_cpu_mask = 1UL << cpu; | 99 | this_cpu_mask = 1UL << cpu; |
72 | if (msp->seen_by.bits & this_cpu_mask) | 100 | if (msp->seen_by.bits & this_cpu_mask) |
73 | return; | 101 | return; |
@@ -215,14 +243,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
215 | * Returns @flush_mask if some remote flushing remains to be done. The | 243 | * Returns @flush_mask if some remote flushing remains to be done. The |
216 | * mask will have some bits still set. | 244 | * mask will have some bits still set. |
217 | */ | 245 | */ |
218 | const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, | 246 | const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, |
219 | struct bau_desc *bau_desc, | 247 | struct bau_desc *bau_desc, |
220 | struct cpumask *flush_mask) | 248 | struct cpumask *flush_mask) |
221 | { | 249 | { |
222 | int completion_status = 0; | 250 | int completion_status = 0; |
223 | int right_shift; | 251 | int right_shift; |
224 | int tries = 0; | 252 | int tries = 0; |
225 | int blade; | 253 | int pnode; |
226 | int bit; | 254 | int bit; |
227 | unsigned long mmr_offset; | 255 | unsigned long mmr_offset; |
228 | unsigned long index; | 256 | unsigned long index; |
@@ -265,8 +293,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, | |||
265 | * use the IPI method of shootdown on them. | 293 | * use the IPI method of shootdown on them. |
266 | */ | 294 | */ |
267 | for_each_cpu(bit, flush_mask) { | 295 | for_each_cpu(bit, flush_mask) { |
268 | blade = uv_cpu_to_blade_id(bit); | 296 | pnode = uv_cpu_to_pnode(bit); |
269 | if (blade == this_blade) | 297 | if (pnode == this_pnode) |
270 | continue; | 298 | continue; |
271 | cpumask_clear_cpu(bit, flush_mask); | 299 | cpumask_clear_cpu(bit, flush_mask); |
272 | } | 300 | } |
@@ -308,16 +336,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
308 | struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask); | 336 | struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask); |
309 | int i; | 337 | int i; |
310 | int bit; | 338 | int bit; |
311 | int blade; | 339 | int pnode; |
312 | int uv_cpu; | 340 | int uv_cpu; |
313 | int this_blade; | 341 | int this_pnode; |
314 | int locals = 0; | 342 | int locals = 0; |
315 | struct bau_desc *bau_desc; | 343 | struct bau_desc *bau_desc; |
316 | 344 | ||
317 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | 345 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); |
318 | 346 | ||
319 | uv_cpu = uv_blade_processor_id(); | 347 | uv_cpu = uv_blade_processor_id(); |
320 | this_blade = uv_numa_blade_id(); | 348 | this_pnode = uv_hub_info->pnode; |
321 | bau_desc = __get_cpu_var(bau_control).descriptor_base; | 349 | bau_desc = __get_cpu_var(bau_control).descriptor_base; |
322 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; | 350 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; |
323 | 351 | ||
@@ -325,13 +353,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
325 | 353 | ||
326 | i = 0; | 354 | i = 0; |
327 | for_each_cpu(bit, flush_mask) { | 355 | for_each_cpu(bit, flush_mask) { |
328 | blade = uv_cpu_to_blade_id(bit); | 356 | pnode = uv_cpu_to_pnode(bit); |
329 | BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); | 357 | BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1)); |
330 | if (blade == this_blade) { | 358 | if (pnode == this_pnode) { |
331 | locals++; | 359 | locals++; |
332 | continue; | 360 | continue; |
333 | } | 361 | } |
334 | bau_node_set(blade, &bau_desc->distribution); | 362 | bau_node_set(pnode, &bau_desc->distribution); |
335 | i++; | 363 | i++; |
336 | } | 364 | } |
337 | if (i == 0) { | 365 | if (i == 0) { |
@@ -349,7 +377,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
349 | bau_desc->payload.address = va; | 377 | bau_desc->payload.address = va; |
350 | bau_desc->payload.sending_cpu = cpu; | 378 | bau_desc->payload.sending_cpu = cpu; |
351 | 379 | ||
352 | return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); | 380 | return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask); |
353 | } | 381 | } |
354 | 382 | ||
355 | /* | 383 | /* |
@@ -481,8 +509,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data) | |||
481 | stat->requestee, stat->onetlb, stat->alltlb, | 509 | stat->requestee, stat->onetlb, stat->alltlb, |
482 | stat->s_retry, stat->d_retry, stat->ptc_i); | 510 | stat->s_retry, stat->d_retry, stat->ptc_i); |
483 | seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", | 511 | seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", |
484 | uv_read_global_mmr64(uv_blade_to_pnode | 512 | uv_read_global_mmr64(uv_cpu_to_pnode(cpu), |
485 | (uv_cpu_to_blade_id(cpu)), | ||
486 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), | 513 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), |
487 | stat->sflush, stat->dflush, | 514 | stat->sflush, stat->dflush, |
488 | stat->retriesok, stat->nomsg, | 515 | stat->retriesok, stat->nomsg, |
@@ -616,16 +643,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node) | |||
616 | * finish the initialization of the per-blade control structures | 643 | * finish the initialization of the per-blade control structures |
617 | */ | 644 | */ |
618 | static void __init | 645 | static void __init |
619 | uv_table_bases_finish(int blade, int node, int cur_cpu, | 646 | uv_table_bases_finish(int blade, |
620 | struct bau_control *bau_tablesp, | 647 | struct bau_control *bau_tablesp, |
621 | struct bau_desc *adp) | 648 | struct bau_desc *adp) |
622 | { | 649 | { |
623 | struct bau_control *bcp; | 650 | struct bau_control *bcp; |
624 | int i; | 651 | int cpu; |
625 | 652 | ||
626 | for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { | 653 | for_each_present_cpu(cpu) { |
627 | bcp = (struct bau_control *)&per_cpu(bau_control, i); | 654 | if (blade != uv_cpu_to_blade_id(cpu)) |
655 | continue; | ||
628 | 656 | ||
657 | bcp = (struct bau_control *)&per_cpu(bau_control, cpu); | ||
629 | bcp->bau_msg_head = bau_tablesp->va_queue_first; | 658 | bcp->bau_msg_head = bau_tablesp->va_queue_first; |
630 | bcp->va_queue_first = bau_tablesp->va_queue_first; | 659 | bcp->va_queue_first = bau_tablesp->va_queue_first; |
631 | bcp->va_queue_last = bau_tablesp->va_queue_last; | 660 | bcp->va_queue_last = bau_tablesp->va_queue_last; |
@@ -648,8 +677,7 @@ uv_activation_descriptor_init(int node, int pnode) | |||
648 | struct bau_desc *adp; | 677 | struct bau_desc *adp; |
649 | struct bau_desc *ad2; | 678 | struct bau_desc *ad2; |
650 | 679 | ||
651 | adp = (struct bau_desc *) | 680 | adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node); |
652 | kmalloc_node(16384, GFP_KERNEL, node); | ||
653 | BUG_ON(!adp); | 681 | BUG_ON(!adp); |
654 | 682 | ||
655 | pa = __pa((unsigned long)adp); | 683 | pa = __pa((unsigned long)adp); |
@@ -666,8 +694,7 @@ uv_activation_descriptor_init(int node, int pnode) | |||
666 | for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { | 694 | for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { |
667 | memset(ad2, 0, sizeof(struct bau_desc)); | 695 | memset(ad2, 0, sizeof(struct bau_desc)); |
668 | ad2->header.sw_ack_flag = 1; | 696 | ad2->header.sw_ack_flag = 1; |
669 | ad2->header.base_dest_nodeid = | 697 | ad2->header.base_dest_nodeid = uv_cpu_to_pnode(0); |
670 | uv_blade_to_pnode(uv_cpu_to_blade_id(0)); | ||
671 | ad2->header.command = UV_NET_ENDPOINT_INTD; | 698 | ad2->header.command = UV_NET_ENDPOINT_INTD; |
672 | ad2->header.int_both = 1; | 699 | ad2->header.int_both = 1; |
673 | /* | 700 | /* |
@@ -714,8 +741,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) | |||
714 | /* | 741 | /* |
715 | * Initialization of each UV blade's structures | 742 | * Initialization of each UV blade's structures |
716 | */ | 743 | */ |
717 | static int __init uv_init_blade(int blade, int node, int cur_cpu) | 744 | static int __init uv_init_blade(int blade) |
718 | { | 745 | { |
746 | int node; | ||
719 | int pnode; | 747 | int pnode; |
720 | unsigned long pa; | 748 | unsigned long pa; |
721 | unsigned long apicid; | 749 | unsigned long apicid; |
@@ -723,16 +751,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu) | |||
723 | struct bau_payload_queue_entry *pqp; | 751 | struct bau_payload_queue_entry *pqp; |
724 | struct bau_control *bau_tablesp; | 752 | struct bau_control *bau_tablesp; |
725 | 753 | ||
754 | node = blade_to_first_node(blade); | ||
726 | bau_tablesp = uv_table_bases_init(blade, node); | 755 | bau_tablesp = uv_table_bases_init(blade, node); |
727 | pnode = uv_blade_to_pnode(blade); | 756 | pnode = uv_blade_to_pnode(blade); |
728 | adp = uv_activation_descriptor_init(node, pnode); | 757 | adp = uv_activation_descriptor_init(node, pnode); |
729 | pqp = uv_payload_queue_init(node, pnode, bau_tablesp); | 758 | pqp = uv_payload_queue_init(node, pnode, bau_tablesp); |
730 | uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); | 759 | uv_table_bases_finish(blade, bau_tablesp, adp); |
731 | /* | 760 | /* |
732 | * the below initialization can't be in firmware because the | 761 | * the below initialization can't be in firmware because the |
733 | * messaging IRQ will be determined by the OS | 762 | * messaging IRQ will be determined by the OS |
734 | */ | 763 | */ |
735 | apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); | 764 | apicid = blade_to_first_apicid(blade); |
736 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); | 765 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); |
737 | if ((pa & 0xff) != UV_BAU_MESSAGE) { | 766 | if ((pa & 0xff) != UV_BAU_MESSAGE) { |
738 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | 767 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
@@ -747,9 +776,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu) | |||
747 | static int __init uv_bau_init(void) | 776 | static int __init uv_bau_init(void) |
748 | { | 777 | { |
749 | int blade; | 778 | int blade; |
750 | int node; | ||
751 | int nblades; | 779 | int nblades; |
752 | int last_blade; | ||
753 | int cur_cpu; | 780 | int cur_cpu; |
754 | 781 | ||
755 | if (!is_uv_system()) | 782 | if (!is_uv_system()) |
@@ -758,29 +785,16 @@ static int __init uv_bau_init(void) | |||
758 | uv_bau_retry_limit = 1; | 785 | uv_bau_retry_limit = 1; |
759 | uv_nshift = uv_hub_info->n_val; | 786 | uv_nshift = uv_hub_info->n_val; |
760 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; | 787 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; |
761 | nblades = 0; | 788 | nblades = uv_num_possible_blades(); |
762 | last_blade = -1; | 789 | |
763 | cur_cpu = 0; | ||
764 | for_each_online_node(node) { | ||
765 | blade = uv_node_to_blade_id(node); | ||
766 | if (blade == last_blade) | ||
767 | continue; | ||
768 | last_blade = blade; | ||
769 | nblades++; | ||
770 | } | ||
771 | uv_bau_table_bases = (struct bau_control **) | 790 | uv_bau_table_bases = (struct bau_control **) |
772 | kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); | 791 | kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); |
773 | BUG_ON(!uv_bau_table_bases); | 792 | BUG_ON(!uv_bau_table_bases); |
774 | 793 | ||
775 | last_blade = -1; | 794 | for (blade = 0; blade < nblades; blade++) |
776 | for_each_online_node(node) { | 795 | if (uv_blade_nr_possible_cpus(blade)) |
777 | blade = uv_node_to_blade_id(node); | 796 | uv_init_blade(blade); |
778 | if (blade == last_blade) | 797 | |
779 | continue; | ||
780 | last_blade = blade; | ||
781 | uv_init_blade(blade, node, cur_cpu); | ||
782 | cur_cpu += uv_blade_nr_possible_cpus(blade); | ||
783 | } | ||
784 | alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); | 798 | alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); |
785 | uv_enable_timeouts(); | 799 | uv_enable_timeouts(); |
786 | 800 | ||