aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-18 08:28:19 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 06:23:26 -0400
commitb4c286e6af24a116228c8c9f58b9a9eb5b7c000a (patch)
tree18b9c2a22bd17874fd5e3517266b2d6791a803a2 /arch
parentdc163a41ffba22a6ef70b51e7ddf68aa13b4b414 (diff)
SGI UV: clean up arch/x86/kernel/tlb_uv.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/tlb_uv.c107
1 files changed, 62 insertions, 45 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index d8705e97e8d0..7bdbf67a2d79 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -11,19 +11,22 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12 12
13#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
14#include <asm/idle.h>
15#include <asm/genapic.h>
16#include <asm/uv/uv_hub.h>
17#include <asm/uv/uv_mmrs.h> 14#include <asm/uv/uv_mmrs.h>
15#include <asm/uv/uv_hub.h>
18#include <asm/uv/uv_bau.h> 16#include <asm/uv/uv_bau.h>
17#include <asm/genapic.h>
18#include <asm/idle.h>
19#include <asm/tsc.h> 19#include <asm/tsc.h>
20 20
21#include <mach_apic.h> 21#include <mach_apic.h>
22 22
23static struct bau_control **uv_bau_table_bases __read_mostly; 23static struct bau_control **uv_bau_table_bases __read_mostly;
24static int uv_bau_retry_limit __read_mostly; 24static int uv_bau_retry_limit __read_mostly;
25static int uv_nshift __read_mostly; /* position of pnode (which is nasid>>1) */ 25
26static unsigned long uv_mmask __read_mostly; 26/* position of pnode (which is nasid>>1): */
27static int uv_nshift __read_mostly;
28
29static unsigned long uv_mmask __read_mostly;
27 30
28static DEFINE_PER_CPU(struct ptc_stats, ptcstats); 31static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
29static DEFINE_PER_CPU(struct bau_control, bau_control); 32static DEFINE_PER_CPU(struct bau_control, bau_control);
@@ -37,8 +40,8 @@ static DEFINE_PER_CPU(struct bau_control, bau_control);
37 * be sent (the hardware will only do one reply per message). 40 * be sent (the hardware will only do one reply per message).
38 */ 41 */
39static void uv_reply_to_message(int resource, 42static void uv_reply_to_message(int resource,
40 struct bau_payload_queue_entry *msg, 43 struct bau_payload_queue_entry *msg,
41 struct bau_msg_status *msp) 44 struct bau_msg_status *msp)
42{ 45{
43 unsigned long dw; 46 unsigned long dw;
44 47
@@ -55,11 +58,11 @@ static void uv_reply_to_message(int resource,
55 * Other cpu's may come here at the same time for this message. 58 * Other cpu's may come here at the same time for this message.
56 */ 59 */
57static void uv_bau_process_message(struct bau_payload_queue_entry *msg, 60static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
58 int msg_slot, int sw_ack_slot) 61 int msg_slot, int sw_ack_slot)
59{ 62{
60 int cpu;
61 unsigned long this_cpu_mask; 63 unsigned long this_cpu_mask;
62 struct bau_msg_status *msp; 64 struct bau_msg_status *msp;
65 int cpu;
63 66
64 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; 67 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
65 cpu = uv_blade_processor_id(); 68 cpu = uv_blade_processor_id();
@@ -96,11 +99,11 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
96 */ 99 */
97static int uv_examine_destination(struct bau_control *bau_tablesp, int sender) 100static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
98{ 101{
99 int i;
100 int j;
101 int count = 0;
102 struct bau_payload_queue_entry *msg; 102 struct bau_payload_queue_entry *msg;
103 struct bau_msg_status *msp; 103 struct bau_msg_status *msp;
104 int count = 0;
105 int i;
106 int j;
104 107
105 for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE; 108 for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE;
106 msg++, i++) { 109 msg++, i++) {
@@ -111,7 +114,7 @@ static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
111 i, msg->address, msg->acknowledge_count, 114 i, msg->address, msg->acknowledge_count,
112 msg->number_of_cpus); 115 msg->number_of_cpus);
113 for (j = 0; j < msg->number_of_cpus; j++) { 116 for (j = 0; j < msg->number_of_cpus; j++) {
114 if (!((long)1 << j & msp-> seen_by.bits)) { 117 if (!((1L << j) & msp->seen_by.bits)) {
115 count++; 118 count++;
116 printk("%d ", j); 119 printk("%d ", j);
117 } 120 }
@@ -135,8 +138,7 @@ static int uv_examine_destinations(struct bau_target_nodemask *distribution)
135 int count = 0; 138 int count = 0;
136 139
137 sender = smp_processor_id(); 140 sender = smp_processor_id();
138 for (i = 0; i < (sizeof(struct bau_target_nodemask) * BITSPERBYTE); 141 for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) {
139 i++) {
140 if (!bau_node_isset(i, distribution)) 142 if (!bau_node_isset(i, distribution))
141 continue; 143 continue;
142 count += uv_examine_destination(uv_bau_table_bases[i], sender); 144 count += uv_examine_destination(uv_bau_table_bases[i], sender);
@@ -217,11 +219,11 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
217{ 219{
218 int completion_status = 0; 220 int completion_status = 0;
219 int right_shift; 221 int right_shift;
220 int bit;
221 int blade;
222 int tries = 0; 222 int tries = 0;
223 unsigned long index; 223 int blade;
224 int bit;
224 unsigned long mmr_offset; 225 unsigned long mmr_offset;
226 unsigned long index;
225 cycles_t time1; 227 cycles_t time1;
226 cycles_t time2; 228 cycles_t time2;
227 229
@@ -294,7 +296,7 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
294 * Returns 0 if some remote flushing remains to be done. 296 * Returns 0 if some remote flushing remains to be done.
295 */ 297 */
296int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, 298int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
297 unsigned long va) 299 unsigned long va)
298{ 300{
299 int i; 301 int i;
300 int bit; 302 int bit;
@@ -356,12 +358,12 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
356 */ 358 */
357void uv_bau_message_interrupt(struct pt_regs *regs) 359void uv_bau_message_interrupt(struct pt_regs *regs)
358{ 360{
359 struct bau_payload_queue_entry *pqp;
360 struct bau_payload_queue_entry *msg;
361 struct bau_payload_queue_entry *va_queue_first; 361 struct bau_payload_queue_entry *va_queue_first;
362 struct bau_payload_queue_entry *va_queue_last; 362 struct bau_payload_queue_entry *va_queue_last;
363 struct bau_payload_queue_entry *msg;
363 struct pt_regs *old_regs = set_irq_regs(regs); 364 struct pt_regs *old_regs = set_irq_regs(regs);
364 cycles_t time1, time2; 365 cycles_t time1;
366 cycles_t time2;
365 int msg_slot; 367 int msg_slot;
366 int sw_ack_slot; 368 int sw_ack_slot;
367 int fw; 369 int fw;
@@ -376,13 +378,14 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
376 378
377 local_pnode = uv_blade_to_pnode(uv_numa_blade_id()); 379 local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
378 380
379 pqp = va_queue_first = __get_cpu_var(bau_control).va_queue_first; 381 va_queue_first = __get_cpu_var(bau_control).va_queue_first;
380 va_queue_last = __get_cpu_var(bau_control).va_queue_last; 382 va_queue_last = __get_cpu_var(bau_control).va_queue_last;
383
381 msg = __get_cpu_var(bau_control).bau_msg_head; 384 msg = __get_cpu_var(bau_control).bau_msg_head;
382 while (msg->sw_ack_vector) { 385 while (msg->sw_ack_vector) {
383 count++; 386 count++;
384 fw = msg->sw_ack_vector; 387 fw = msg->sw_ack_vector;
385 msg_slot = msg - pqp; 388 msg_slot = msg - va_queue_first;
386 sw_ack_slot = ffs(fw) - 1; 389 sw_ack_slot = ffs(fw) - 1;
387 390
388 uv_bau_process_message(msg, msg_slot, sw_ack_slot); 391 uv_bau_process_message(msg, msg_slot, sw_ack_slot);
@@ -484,7 +487,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
484 * >0: retry limit 487 * >0: retry limit
485 */ 488 */
486static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, 489static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
487 size_t count, loff_t *data) 490 size_t count, loff_t *data)
488{ 491{
489 long newmode; 492 long newmode;
490 char optstr[64]; 493 char optstr[64];
@@ -587,42 +590,48 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
587 bau_tabp = 590 bau_tabp =
588 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node); 591 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
589 BUG_ON(!bau_tabp); 592 BUG_ON(!bau_tabp);
593
590 bau_tabp->msg_statuses = 594 bau_tabp->msg_statuses =
591 kmalloc_node(sizeof(struct bau_msg_status) * 595 kmalloc_node(sizeof(struct bau_msg_status) *
592 DEST_Q_SIZE, GFP_KERNEL, node); 596 DEST_Q_SIZE, GFP_KERNEL, node);
593 BUG_ON(!bau_tabp->msg_statuses); 597 BUG_ON(!bau_tabp->msg_statuses);
598
594 for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++) 599 for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
595 bau_cpubits_clear(&msp->seen_by, (int) 600 bau_cpubits_clear(&msp->seen_by, (int)
596 uv_blade_nr_possible_cpus(blade)); 601 uv_blade_nr_possible_cpus(blade));
602
597 bau_tabp->watching = 603 bau_tabp->watching =
598 kmalloc_node(sizeof(int) * DEST_NUM_RESOURCES, GFP_KERNEL, node); 604 kmalloc_node(sizeof(int) * DEST_NUM_RESOURCES, GFP_KERNEL, node);
599 BUG_ON(!bau_tabp->watching); 605 BUG_ON(!bau_tabp->watching);
600 for (i = 0, ip = bau_tabp->watching; i < DEST_Q_SIZE; i++, ip++) { 606
607 for (i = 0, ip = bau_tabp->watching; i < DEST_Q_SIZE; i++, ip++)
601 *ip = 0; 608 *ip = 0;
602 } 609
603 uv_bau_table_bases[blade] = bau_tabp; 610 uv_bau_table_bases[blade] = bau_tabp;
611
604 return bau_tabsp; 612 return bau_tabsp;
605} 613}
606 614
607/* 615/*
608 * finish the initialization of the per-blade control structures 616 * finish the initialization of the per-blade control structures
609 */ 617 */
610static void __init uv_table_bases_finish(int blade, int node, int cur_cpu, 618static void __init
611 struct bau_control *bau_tablesp, 619uv_table_bases_finish(int blade, int node, int cur_cpu,
612 struct bau_desc *adp) 620 struct bau_control *bau_tablesp,
621 struct bau_desc *adp)
613{ 622{
614 int i;
615 struct bau_control *bcp; 623 struct bau_control *bcp;
624 int i;
616 625
617 for (i = cur_cpu; i < (cur_cpu + uv_blade_nr_possible_cpus(blade)); 626 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) {
618 i++) {
619 bcp = (struct bau_control *)&per_cpu(bau_control, i); 627 bcp = (struct bau_control *)&per_cpu(bau_control, i);
620 bcp->bau_msg_head = bau_tablesp->va_queue_first; 628
621 bcp->va_queue_first = bau_tablesp->va_queue_first; 629 bcp->bau_msg_head = bau_tablesp->va_queue_first;
622 bcp->va_queue_last = bau_tablesp->va_queue_last; 630 bcp->va_queue_first = bau_tablesp->va_queue_first;
623 bcp->watching = bau_tablesp->watching; 631 bcp->va_queue_last = bau_tablesp->va_queue_last;
624 bcp->msg_statuses = bau_tablesp->msg_statuses; 632 bcp->watching = bau_tablesp->watching;
625 bcp->descriptor_base = adp; 633 bcp->msg_statuses = bau_tablesp->msg_statuses;
634 bcp->descriptor_base = adp;
626 } 635 }
627} 636}
628 637
@@ -643,14 +652,18 @@ uv_activation_descriptor_init(int node, int pnode)
643 adp = (struct bau_desc *) 652 adp = (struct bau_desc *)
644 kmalloc_node(16384, GFP_KERNEL, node); 653 kmalloc_node(16384, GFP_KERNEL, node);
645 BUG_ON(!adp); 654 BUG_ON(!adp);
655
646 pa = __pa((unsigned long)adp); 656 pa = __pa((unsigned long)adp);
647 n = pa >> uv_nshift; 657 n = pa >> uv_nshift;
648 m = pa & uv_mmask; 658 m = pa & uv_mmask;
659
649 mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE); 660 mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE);
650 if (mmr_image) 661 if (mmr_image) {
651 uv_write_global_mmr64(pnode, (unsigned long) 662 uv_write_global_mmr64(pnode, (unsigned long)
652 UVH_LB_BAU_SB_DESCRIPTOR_BASE, 663 UVH_LB_BAU_SB_DESCRIPTOR_BASE,
653 (n << UV_DESC_BASE_PNODE_SHIFT | m)); 664 (n << UV_DESC_BASE_PNODE_SHIFT | m));
665 }
666
654 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { 667 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
655 memset(ad2, 0, sizeof(struct bau_desc)); 668 memset(ad2, 0, sizeof(struct bau_desc));
656 ad2->header.sw_ack_flag = 1; 669 ad2->header.sw_ack_flag = 1;
@@ -669,16 +682,17 @@ uv_activation_descriptor_init(int node, int pnode)
669/* 682/*
670 * initialize the destination side's receiving buffers 683 * initialize the destination side's receiving buffers
671 */ 684 */
672static struct bau_payload_queue_entry * __init uv_payload_queue_init(int node, 685static struct bau_payload_queue_entry * __init
673 int pnode, struct bau_control *bau_tablesp) 686uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
674{ 687{
675 char *cp;
676 struct bau_payload_queue_entry *pqp; 688 struct bau_payload_queue_entry *pqp;
689 char *cp;
677 690
678 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 691 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
679 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), 692 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
680 GFP_KERNEL, node); 693 GFP_KERNEL, node);
681 BUG_ON(!pqp); 694 BUG_ON(!pqp);
695
682 cp = (char *)pqp + 31; 696 cp = (char *)pqp + 31;
683 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 697 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
684 bau_tablesp->va_queue_first = pqp; 698 bau_tablesp->va_queue_first = pqp;
@@ -694,6 +708,7 @@ static struct bau_payload_queue_entry * __init uv_payload_queue_init(int node,
694 (unsigned long) 708 (unsigned long)
695 uv_physnodeaddr(bau_tablesp->va_queue_last)); 709 uv_physnodeaddr(bau_tablesp->va_queue_last));
696 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE); 710 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
711
697 return pqp; 712 return pqp;
698} 713}
699 714
@@ -756,6 +771,7 @@ static int __init uv_bau_init(void)
756 uv_bau_table_bases = (struct bau_control **) 771 uv_bau_table_bases = (struct bau_control **)
757 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); 772 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
758 BUG_ON(!uv_bau_table_bases); 773 BUG_ON(!uv_bau_table_bases);
774
759 last_blade = -1; 775 last_blade = -1;
760 for_each_online_node(node) { 776 for_each_online_node(node) {
761 blade = uv_node_to_blade_id(node); 777 blade = uv_node_to_blade_id(node);
@@ -767,6 +783,7 @@ static int __init uv_bau_init(void)
767 } 783 }
768 set_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 784 set_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
769 uv_enable_timeouts(); 785 uv_enable_timeouts();
786
770 return 0; 787 return 0;
771} 788}
772__initcall(uv_bau_init); 789__initcall(uv_bau_init);