aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tlb_uv.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-18 08:15:43 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 06:23:25 -0400
commitdc163a41ffba22a6ef70b51e7ddf68aa13b4b414 (patch)
treeec70a09841104730a130577e069c21c4f16b38ef /arch/x86/kernel/tlb_uv.c
parentb194b120507276b4f09e2e14f941884e777fc7c8 (diff)
SGI UV: TLB shootdown using broadcast assist unit
TLB shootdown for SGI UV. v5: 6/12 corrections/improvements per Ingo's second review Signed-off-by: Cliff Wickman <cpw@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/tlb_uv.c')
-rw-r--r--arch/x86/kernel/tlb_uv.c183
1 files changed, 85 insertions, 98 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index f7bc6a6fbe49..d8705e97e8d0 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -25,15 +25,8 @@ static int uv_bau_retry_limit __read_mostly;
25static int uv_nshift __read_mostly; /* position of pnode (which is nasid>>1) */ 25static int uv_nshift __read_mostly; /* position of pnode (which is nasid>>1) */
26static unsigned long uv_mmask __read_mostly; 26static unsigned long uv_mmask __read_mostly;
27 27
28char *status_table[] = { 28static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
29 "IDLE", 29static DEFINE_PER_CPU(struct bau_control, bau_control);
30 "ACTIVE",
31 "DESTINATION TIMEOUT",
32 "SOURCE TIMEOUT"
33};
34
35DEFINE_PER_CPU(struct ptc_stats, ptcstats);
36DEFINE_PER_CPU(struct bau_control, bau_control);
37 30
38/* 31/*
39 * Free a software acknowledge hardware resource by clearing its Pending 32 * Free a software acknowledge hardware resource by clearing its Pending
@@ -55,7 +48,6 @@ static void uv_reply_to_message(int resource,
55 if (msp) 48 if (msp)
56 msp->seen_by.bits = 0; 49 msp->seen_by.bits = 0;
57 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw); 50 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
58 return;
59} 51}
60 52
61/* 53/*
@@ -73,7 +65,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
73 cpu = uv_blade_processor_id(); 65 cpu = uv_blade_processor_id();
74 msg->number_of_cpus = 66 msg->number_of_cpus =
75 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); 67 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
76 this_cpu_mask = (unsigned long)1 << cpu; 68 this_cpu_mask = 1UL << cpu;
77 if (msp->seen_by.bits & this_cpu_mask) 69 if (msp->seen_by.bits & this_cpu_mask)
78 return; 70 return;
79 atomic_or_long(&msp->seen_by.bits, this_cpu_mask); 71 atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
@@ -94,53 +86,60 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
94 atomic_inc_short(&msg->acknowledge_count); 86 atomic_inc_short(&msg->acknowledge_count);
95 if (msg->number_of_cpus == msg->acknowledge_count) 87 if (msg->number_of_cpus == msg->acknowledge_count)
96 uv_reply_to_message(sw_ack_slot, msg, msp); 88 uv_reply_to_message(sw_ack_slot, msg, msp);
97 return;
98} 89}
99 90
100/* 91/*
101 * Examine the payload queue on all the distribution nodes to see 92 * Examine the payload queue on one distribution node to see
102 * which messages have not been seen, and which cpu(s) have not seen them. 93 * which messages have not been seen, and which cpu(s) have not seen them.
103 * 94 *
104 * Returns the number of cpu's that have not responded. 95 * Returns the number of cpu's that have not responded.
105 */ 96 */
106static int uv_examine_destinations(struct bau_target_nodemask *distribution) 97static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
107{ 98{
108 int sender;
109 int i; 99 int i;
110 int j; 100 int j;
111 int k;
112 int count = 0; 101 int count = 0;
113 struct bau_control *bau_tablesp;
114 struct bau_payload_queue_entry *msg; 102 struct bau_payload_queue_entry *msg;
115 struct bau_msg_status *msp; 103 struct bau_msg_status *msp;
116 104
105 for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE;
106 msg++, i++) {
107 if ((msg->sending_cpu == sender) && (!msg->replied_to)) {
108 msp = bau_tablesp->msg_statuses + i;
109 printk(KERN_DEBUG
110 "blade %d: address:%#lx %d of %d, not cpu(s): ",
111 i, msg->address, msg->acknowledge_count,
112 msg->number_of_cpus);
113 for (j = 0; j < msg->number_of_cpus; j++) {
114 if (!((long)1 << j & msp-> seen_by.bits)) {
115 count++;
116 printk("%d ", j);
117 }
118 }
119 printk("\n");
120 }
121 }
122 return count;
123}
124
125/*
126 * Examine the payload queue on all the distribution nodes to see
127 * which messages have not been seen, and which cpu(s) have not seen them.
128 *
129 * Returns the number of cpu's that have not responded.
130 */
131static int uv_examine_destinations(struct bau_target_nodemask *distribution)
132{
133 int sender;
134 int i;
135 int count = 0;
136
117 sender = smp_processor_id(); 137 sender = smp_processor_id();
118 for (i = 0; i < (sizeof(struct bau_target_nodemask) * BITSPERBYTE); 138 for (i = 0; i < (sizeof(struct bau_target_nodemask) * BITSPERBYTE);
119 i++) { 139 i++) {
120 if (!bau_node_isset(i, distribution)) 140 if (!bau_node_isset(i, distribution))
121 continue; 141 continue;
122 bau_tablesp = uv_bau_table_bases[i]; 142 count += uv_examine_destination(uv_bau_table_bases[i], sender);
123 for (msg = bau_tablesp->va_queue_first, j = 0;
124 j < DESTINATION_PAYLOAD_QUEUE_SIZE; msg++, j++) {
125 if ((msg->sending_cpu == sender) &&
126 (!msg->replied_to)) {
127 msp = bau_tablesp->msg_statuses + j;
128 printk(KERN_DEBUG
129 "blade %d: address:%#lx %d of %d, not cpu(s): ",
130 i, msg->address,
131 msg->acknowledge_count,
132 msg->number_of_cpus);
133 for (k = 0; k < msg->number_of_cpus;
134 k++) {
135 if (!((long)1 << k & msp->
136 seen_by.bits)) {
137 count++;
138 printk("%d ", k);
139 }
140 }
141 printk("\n");
142 }
143 }
144 } 143 }
145 return count; 144 return count;
146} 145}
@@ -150,7 +149,7 @@ static int uv_examine_destinations(struct bau_target_nodemask *distribution)
150 * 149 *
151 * return COMPLETE, RETRY or GIVEUP 150 * return COMPLETE, RETRY or GIVEUP
152 */ 151 */
153static int uv_wait_completion(struct bau_activation_descriptor *bau_desc, 152static int uv_wait_completion(struct bau_desc *bau_desc,
154 unsigned long mmr_offset, int right_shift) 153 unsigned long mmr_offset, int right_shift)
155{ 154{
156 int exams = 0; 155 int exams = 0;
@@ -213,8 +212,8 @@ static int uv_wait_completion(struct bau_activation_descriptor *bau_desc,
213 * Returns 0 if some remote flushing remains to be done. The mask is left 212 * Returns 0 if some remote flushing remains to be done. The mask is left
214 * unchanged. 213 * unchanged.
215 */ 214 */
216int uv_flush_send_and_wait(int cpu, int this_blade, 215int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
217 struct bau_activation_descriptor *bau_desc, cpumask_t *cpumaskp) 216 cpumask_t *cpumaskp)
218{ 217{
219 int completion_status = 0; 218 int completion_status = 0;
220 int right_shift; 219 int right_shift;
@@ -237,8 +236,8 @@ int uv_flush_send_and_wait(int cpu, int this_blade,
237 time1 = get_cycles(); 236 time1 = get_cycles();
238 do { 237 do {
239 tries++; 238 tries++;
240 index = ((unsigned long) 239 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
241 1 << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | cpu; 240 cpu;
242 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); 241 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
243 completion_status = uv_wait_completion(bau_desc, mmr_offset, 242 completion_status = uv_wait_completion(bau_desc, mmr_offset,
244 right_shift); 243 right_shift);
@@ -303,7 +302,7 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
303 int cpu; 302 int cpu;
304 int this_blade; 303 int this_blade;
305 int locals = 0; 304 int locals = 0;
306 struct bau_activation_descriptor *bau_desc; 305 struct bau_desc *bau_desc;
307 306
308 cpu = uv_blade_processor_id(); 307 cpu = uv_blade_processor_id();
309 this_blade = uv_numa_blade_id(); 308 this_blade = uv_numa_blade_id();
@@ -315,8 +314,7 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
315 i = 0; 314 i = 0;
316 for_each_cpu_mask(bit, *cpumaskp) { 315 for_each_cpu_mask(bit, *cpumaskp) {
317 blade = uv_cpu_to_blade_id(bit); 316 blade = uv_cpu_to_blade_id(bit);
318 if (blade > (UV_DISTRIBUTION_SIZE - 1)) 317 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
319 BUG();
320 if (blade == this_blade) { 318 if (blade == this_blade) {
321 locals++; 319 locals++;
322 continue; 320 continue;
@@ -360,6 +358,8 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
360{ 358{
361 struct bau_payload_queue_entry *pqp; 359 struct bau_payload_queue_entry *pqp;
362 struct bau_payload_queue_entry *msg; 360 struct bau_payload_queue_entry *msg;
361 struct bau_payload_queue_entry *va_queue_first;
362 struct bau_payload_queue_entry *va_queue_last;
363 struct pt_regs *old_regs = set_irq_regs(regs); 363 struct pt_regs *old_regs = set_irq_regs(regs);
364 cycles_t time1, time2; 364 cycles_t time1, time2;
365 int msg_slot; 365 int msg_slot;
@@ -376,7 +376,8 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
376 376
377 local_pnode = uv_blade_to_pnode(uv_numa_blade_id()); 377 local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
378 378
379 pqp = __get_cpu_var(bau_control).va_queue_first; 379 pqp = va_queue_first = __get_cpu_var(bau_control).va_queue_first;
380 va_queue_last = __get_cpu_var(bau_control).va_queue_last;
380 msg = __get_cpu_var(bau_control).bau_msg_head; 381 msg = __get_cpu_var(bau_control).bau_msg_head;
381 while (msg->sw_ack_vector) { 382 while (msg->sw_ack_vector) {
382 count++; 383 count++;
@@ -387,8 +388,8 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
387 uv_bau_process_message(msg, msg_slot, sw_ack_slot); 388 uv_bau_process_message(msg, msg_slot, sw_ack_slot);
388 389
389 msg++; 390 msg++;
390 if (msg > __get_cpu_var(bau_control).va_queue_last) 391 if (msg > va_queue_last)
391 msg = __get_cpu_var(bau_control).va_queue_first; 392 msg = va_queue_first;
392 __get_cpu_var(bau_control).bau_msg_head = msg; 393 __get_cpu_var(bau_control).bau_msg_head = msg;
393 } 394 }
394 if (!count) 395 if (!count)
@@ -401,7 +402,6 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
401 402
402 irq_exit(); 403 irq_exit();
403 set_irq_regs(old_regs); 404 set_irq_regs(old_regs);
404 return;
405} 405}
406 406
407static void uv_enable_timeouts(void) 407static void uv_enable_timeouts(void)
@@ -423,7 +423,6 @@ static void uv_enable_timeouts(void)
423 pnode = uv_blade_to_pnode(blade); 423 pnode = uv_blade_to_pnode(blade);
424 cur_cpu += uv_blade_nr_possible_cpus(i); 424 cur_cpu += uv_blade_nr_possible_cpus(i);
425 } 425 }
426 return;
427} 426}
428 427
429static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset) 428static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
@@ -535,10 +534,10 @@ static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
535} 534}
536 535
537static const struct seq_operations uv_ptc_seq_ops = { 536static const struct seq_operations uv_ptc_seq_ops = {
538 .start = uv_ptc_seq_start, 537 .start = uv_ptc_seq_start,
539 .next = uv_ptc_seq_next, 538 .next = uv_ptc_seq_next,
540 .stop = uv_ptc_seq_stop, 539 .stop = uv_ptc_seq_stop,
541 .show = uv_ptc_seq_show 540 .show = uv_ptc_seq_show
542}; 541};
543 542
544static int uv_ptc_proc_open(struct inode *inode, struct file *file) 543static int uv_ptc_proc_open(struct inode *inode, struct file *file)
@@ -568,6 +567,7 @@ static int __init uv_ptc_init(void)
568 if (!proc_uv_ptc) { 567 if (!proc_uv_ptc) {
569 printk(KERN_ERR "unable to create %s proc entry\n", 568 printk(KERN_ERR "unable to create %s proc entry\n",
570 UV_PTC_BASENAME); 569 UV_PTC_BASENAME);
570 remove_proc_entry("sgi_uv", NULL);
571 return -EINVAL; 571 return -EINVAL;
572 } 572 }
573 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations; 573 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
@@ -582,33 +582,26 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
582 int i; 582 int i;
583 int *ip; 583 int *ip;
584 struct bau_msg_status *msp; 584 struct bau_msg_status *msp;
585 struct bau_control *bau_tablesp; 585 struct bau_control *bau_tabp;
586 586
587 bau_tablesp = 587 bau_tabp =
588 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node); 588 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
589 if (!bau_tablesp) 589 BUG_ON(!bau_tabp);
590 BUG(); 590 bau_tabp->msg_statuses =
591 bau_tablesp->msg_statuses =
592 kmalloc_node(sizeof(struct bau_msg_status) * 591 kmalloc_node(sizeof(struct bau_msg_status) *
593 DESTINATION_PAYLOAD_QUEUE_SIZE, GFP_KERNEL, node); 592 DEST_Q_SIZE, GFP_KERNEL, node);
594 if (!bau_tablesp->msg_statuses) 593 BUG_ON(!bau_tabp->msg_statuses);
595 BUG(); 594 for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
596 for (i = 0, msp = bau_tablesp->msg_statuses;
597 i < DESTINATION_PAYLOAD_QUEUE_SIZE; i++, msp++) {
598 bau_cpubits_clear(&msp->seen_by, (int) 595 bau_cpubits_clear(&msp->seen_by, (int)
599 uv_blade_nr_possible_cpus(blade)); 596 uv_blade_nr_possible_cpus(blade));
600 } 597 bau_tabp->watching =
601 bau_tablesp->watching = 598 kmalloc_node(sizeof(int) * DEST_NUM_RESOURCES, GFP_KERNEL, node);
602 kmalloc_node(sizeof(int) * DESTINATION_NUM_RESOURCES, 599 BUG_ON(!bau_tabp->watching);
603 GFP_KERNEL, node); 600 for (i = 0, ip = bau_tabp->watching; i < DEST_Q_SIZE; i++, ip++) {
604 if (!bau_tablesp->watching)
605 BUG();
606 for (i = 0, ip = bau_tablesp->watching;
607 i < DESTINATION_PAYLOAD_QUEUE_SIZE; i++, ip++) {
608 *ip = 0; 601 *ip = 0;
609 } 602 }
610 uv_bau_table_bases[blade] = bau_tablesp; 603 uv_bau_table_bases[blade] = bau_tabp;
611 return bau_tablesp; 604 return bau_tabsp;
612} 605}
613 606
614/* 607/*
@@ -616,7 +609,7 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
616 */ 609 */
617static void __init uv_table_bases_finish(int blade, int node, int cur_cpu, 610static void __init uv_table_bases_finish(int blade, int node, int cur_cpu,
618 struct bau_control *bau_tablesp, 611 struct bau_control *bau_tablesp,
619 struct bau_activation_descriptor *adp) 612 struct bau_desc *adp)
620{ 613{
621 int i; 614 int i;
622 struct bau_control *bcp; 615 struct bau_control *bcp;
@@ -636,7 +629,7 @@ static void __init uv_table_bases_finish(int blade, int node, int cur_cpu,
636/* 629/*
637 * initialize the sending side's sending buffers 630 * initialize the sending side's sending buffers
638 */ 631 */
639static struct bau_activation_descriptor * __init 632static struct bau_desc * __init
640uv_activation_descriptor_init(int node, int pnode) 633uv_activation_descriptor_init(int node, int pnode)
641{ 634{
642 int i; 635 int i;
@@ -644,13 +637,12 @@ uv_activation_descriptor_init(int node, int pnode)
644 unsigned long m; 637 unsigned long m;
645 unsigned long n; 638 unsigned long n;
646 unsigned long mmr_image; 639 unsigned long mmr_image;
647 struct bau_activation_descriptor *adp; 640 struct bau_desc *adp;
648 struct bau_activation_descriptor *ad2; 641 struct bau_desc *ad2;
649 642
650 adp = (struct bau_activation_descriptor *) 643 adp = (struct bau_desc *)
651 kmalloc_node(16384, GFP_KERNEL, node); 644 kmalloc_node(16384, GFP_KERNEL, node);
652 if (!adp) 645 BUG_ON(!adp);
653 BUG();
654 pa = __pa((unsigned long)adp); 646 pa = __pa((unsigned long)adp);
655 n = pa >> uv_nshift; 647 n = pa >> uv_nshift;
656 m = pa & uv_mmask; 648 m = pa & uv_mmask;
@@ -660,7 +652,7 @@ uv_activation_descriptor_init(int node, int pnode)
660 UVH_LB_BAU_SB_DESCRIPTOR_BASE, 652 UVH_LB_BAU_SB_DESCRIPTOR_BASE,
661 (n << UV_DESC_BASE_PNODE_SHIFT | m)); 653 (n << UV_DESC_BASE_PNODE_SHIFT | m));
662 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { 654 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
663 memset(ad2, 0, sizeof(struct bau_activation_descriptor)); 655 memset(ad2, 0, sizeof(struct bau_desc));
664 ad2->header.sw_ack_flag = 1; 656 ad2->header.sw_ack_flag = 1;
665 ad2->header.base_dest_nodeid = 657 ad2->header.base_dest_nodeid =
666 uv_blade_to_pnode(uv_cpu_to_blade_id(0)); 658 uv_blade_to_pnode(uv_cpu_to_blade_id(0));
@@ -683,12 +675,10 @@ static struct bau_payload_queue_entry * __init uv_payload_queue_init(int node,
683 char *cp; 675 char *cp;
684 struct bau_payload_queue_entry *pqp; 676 struct bau_payload_queue_entry *pqp;
685 677
686 pqp = (struct bau_payload_queue_entry *) 678 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
687 kmalloc_node((DESTINATION_PAYLOAD_QUEUE_SIZE + 1) * 679 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
688 sizeof(struct bau_payload_queue_entry), 680 GFP_KERNEL, node);
689 GFP_KERNEL, node); 681 BUG_ON(!pqp);
690 if (!pqp)
691 BUG();
692 cp = (char *)pqp + 31; 682 cp = (char *)pqp + 31;
693 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 683 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
694 bau_tablesp->va_queue_first = pqp; 684 bau_tablesp->va_queue_first = pqp;
@@ -699,13 +689,11 @@ static struct bau_payload_queue_entry * __init uv_payload_queue_init(int node,
699 uv_physnodeaddr(pqp)); 689 uv_physnodeaddr(pqp));
700 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 690 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
701 uv_physnodeaddr(pqp)); 691 uv_physnodeaddr(pqp));
702 bau_tablesp->va_queue_last = 692 bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
703 pqp + (DESTINATION_PAYLOAD_QUEUE_SIZE - 1);
704 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, 693 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
705 (unsigned long) 694 (unsigned long)
706 uv_physnodeaddr(bau_tablesp->va_queue_last)); 695 uv_physnodeaddr(bau_tablesp->va_queue_last));
707 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * 696 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
708 DESTINATION_PAYLOAD_QUEUE_SIZE);
709 return pqp; 697 return pqp;
710} 698}
711 699
@@ -717,7 +705,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
717 int pnode; 705 int pnode;
718 unsigned long pa; 706 unsigned long pa;
719 unsigned long apicid; 707 unsigned long apicid;
720 struct bau_activation_descriptor *adp; 708 struct bau_desc *adp;
721 struct bau_payload_queue_entry *pqp; 709 struct bau_payload_queue_entry *pqp;
722 struct bau_control *bau_tablesp; 710 struct bau_control *bau_tablesp;
723 711
@@ -755,7 +743,7 @@ static int __init uv_bau_init(void)
755 743
756 uv_bau_retry_limit = 1; 744 uv_bau_retry_limit = 1;
757 uv_nshift = uv_hub_info->n_val; 745 uv_nshift = uv_hub_info->n_val;
758 uv_mmask = ((unsigned long)1 << uv_hub_info->n_val) - 1; 746 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
759 nblades = 0; 747 nblades = 0;
760 last_blade = -1; 748 last_blade = -1;
761 for_each_online_node(node) { 749 for_each_online_node(node) {
@@ -767,8 +755,7 @@ static int __init uv_bau_init(void)
767 } 755 }
768 uv_bau_table_bases = (struct bau_control **) 756 uv_bau_table_bases = (struct bau_control **)
769 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); 757 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
770 if (!uv_bau_table_bases) 758 BUG_ON(!uv_bau_table_bases);
771 BUG();
772 last_blade = -1; 759 last_blade = -1;
773 for_each_online_node(node) { 760 for_each_online_node(node) {
774 blade = uv_node_to_blade_id(node); 761 blade = uv_node_to_blade_id(node);