aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tlb_uv.c
diff options
context:
space:
mode:
authorCliff Wickman <cpw@sgi.com>2010-04-14 12:35:46 -0400
committerIngo Molnar <mingo@elte.hu>2010-04-14 12:49:53 -0400
commitb8f7fb13d2d7ff14818fd1d3edd8b834d38b0217 (patch)
tree48844c12cc443690116abbec7e836f8c08360d56 /arch/x86/kernel/tlb_uv.c
parent2acebe9ecb2b77876e87a1480729cfb2db4570dd (diff)
x86, UV: Improve BAU performance and error recovery
- increase performance of the interrupt handler - release timed-out software acknowledge resources - recover from continuous-busy status due to a hardware issue - add a 'throttle' to keep a uvhub from sending more than a specified number of broadcasts concurrently (work around the hardware issue) - provide a 'nobau' boot command line option - rename 'pnode' and 'node' to 'uvhub' (the 'node' terminology is ambiguous) - add some new statistics about the scope of broadcasts, retries, the hardware issue and the 'throttle' - split off new function uv_bau_retry_msg() from uv_bau_process_message() per community coding style feedback. - simplify the argument list to uv_bau_process_message(), per community coding style feedback. Signed-off-by: Cliff Wickman <cpw@sgi.com> Cc: linux-mm@kvack.org Cc: Jack Steiner <steiner@sgi.com> Cc: Russ Anderson <rja@sgi.com> Cc: Mike Travis <travis@sgi.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> LKML-Reference: <E1O25Z4-0004Ur-PB@eag09.americas.sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/tlb_uv.c')
-rw-r--r--arch/x86/kernel/tlb_uv.c1270
1 files changed, 900 insertions, 370 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index ef68ba48564b..414f7c4fe76c 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SGI UltraViolet TLB flush routines. 2 * SGI UltraViolet TLB flush routines.
3 * 3 *
4 * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI. 4 * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
5 * 5 *
6 * This code is released under the GNU General Public License version 2 or 6 * This code is released under the GNU General Public License version 2 or
7 * later. 7 * later.
@@ -19,44 +19,67 @@
19#include <asm/idle.h> 19#include <asm/idle.h>
20#include <asm/tsc.h> 20#include <asm/tsc.h>
21#include <asm/irq_vectors.h> 21#include <asm/irq_vectors.h>
22#include <asm/timer.h>
23
24struct msg_desc {
25 struct bau_payload_queue_entry *msg;
26 int msg_slot;
27 int sw_ack_slot;
28 struct bau_payload_queue_entry *va_queue_first;
29 struct bau_payload_queue_entry *va_queue_last;
30};
22 31
23#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL 32#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
24 33
25static struct bau_control **uv_bau_table_bases __read_mostly; 34static int uv_bau_max_concurrent __read_mostly;
26static int uv_bau_retry_limit __read_mostly;
27 35
28/* base pnode in this partition */ 36static int nobau;
29static int uv_partition_base_pnode __read_mostly; 37static int __init setup_nobau(char *arg)
38{
39 nobau = 1;
40 return 0;
41}
42early_param("nobau", setup_nobau);
30 43
31static unsigned long uv_mmask __read_mostly; 44/* base pnode in this partition */
45static int uv_partition_base_pnode __read_mostly;
46/* position of pnode (which is nasid>>1): */
47static int uv_nshift __read_mostly;
48static unsigned long uv_mmask __read_mostly;
32 49
33static DEFINE_PER_CPU(struct ptc_stats, ptcstats); 50static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
34static DEFINE_PER_CPU(struct bau_control, bau_control); 51static DEFINE_PER_CPU(struct bau_control, bau_control);
52static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
53
54struct reset_args {
55 int sender;
56};
35 57
36/* 58/*
37 * Determine the first node on a blade. 59 * Determine the first node on a uvhub. 'Nodes' are used for kernel
60 * memory allocation.
38 */ 61 */
39static int __init blade_to_first_node(int blade) 62static int __init uvhub_to_first_node(int uvhub)
40{ 63{
41 int node, b; 64 int node, b;
42 65
43 for_each_online_node(node) { 66 for_each_online_node(node) {
44 b = uv_node_to_blade_id(node); 67 b = uv_node_to_blade_id(node);
45 if (blade == b) 68 if (uvhub == b)
46 return node; 69 return node;
47 } 70 }
48 return -1; /* shouldn't happen */ 71 return -1;
49} 72}
50 73
51/* 74/*
52 * Determine the apicid of the first cpu on a blade. 75 * Determine the apicid of the first cpu on a uvhub.
53 */ 76 */
54static int __init blade_to_first_apicid(int blade) 77static int __init uvhub_to_first_apicid(int uvhub)
55{ 78{
56 int cpu; 79 int cpu;
57 80
58 for_each_present_cpu(cpu) 81 for_each_present_cpu(cpu)
59 if (blade == uv_cpu_to_blade_id(cpu)) 82 if (uvhub == uv_cpu_to_blade_id(cpu))
60 return per_cpu(x86_cpu_to_apicid, cpu); 83 return per_cpu(x86_cpu_to_apicid, cpu);
61 return -1; 84 return -1;
62} 85}
@@ -69,195 +92,459 @@ static int __init blade_to_first_apicid(int blade)
69 * clear of the Timeout bit (as well) will free the resource. No reply will 92 * clear of the Timeout bit (as well) will free the resource. No reply will
70 * be sent (the hardware will only do one reply per message). 93 * be sent (the hardware will only do one reply per message).
71 */ 94 */
72static void uv_reply_to_message(int resource, 95static inline void uv_reply_to_message(struct msg_desc *mdp,
73 struct bau_payload_queue_entry *msg, 96 struct bau_control *bcp)
74 struct bau_msg_status *msp)
75{ 97{
76 unsigned long dw; 98 unsigned long dw;
99 struct bau_payload_queue_entry *msg;
77 100
78 dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource); 101 msg = mdp->msg;
102 if (!msg->canceled) {
103 dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
104 msg->sw_ack_vector;
105 uv_write_local_mmr(
106 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
107 }
79 msg->replied_to = 1; 108 msg->replied_to = 1;
80 msg->sw_ack_vector = 0; 109 msg->sw_ack_vector = 0;
81 if (msp)
82 msp->seen_by.bits = 0;
83 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
84} 110}
85 111
86/* 112/*
87 * Do all the things a cpu should do for a TLB shootdown message. 113 * Process the receipt of a RETRY message
88 * Other cpu's may come here at the same time for this message.
89 */ 114 */
90static void uv_bau_process_message(struct bau_payload_queue_entry *msg, 115static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
91 int msg_slot, int sw_ack_slot) 116 struct bau_control *bcp)
92{ 117{
93 unsigned long this_cpu_mask; 118 int i;
94 struct bau_msg_status *msp; 119 int cancel_count = 0;
95 int cpu; 120 int slot2;
121 unsigned long msg_res;
122 unsigned long mmr = 0;
123 struct bau_payload_queue_entry *msg;
124 struct bau_payload_queue_entry *msg2;
125 struct ptc_stats *stat;
96 126
97 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; 127 msg = mdp->msg;
98 cpu = uv_blade_processor_id(); 128 stat = &per_cpu(ptcstats, bcp->cpu);
99 msg->number_of_cpus = 129 stat->d_retries++;
100 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); 130 /*
101 this_cpu_mask = 1UL << cpu; 131 * cancel any message from msg+1 to the retry itself
102 if (msp->seen_by.bits & this_cpu_mask) 132 */
103 return; 133 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
104 atomic_or_long(&msp->seen_by.bits, this_cpu_mask); 134 if (msg2 > mdp->va_queue_last)
135 msg2 = mdp->va_queue_first;
136 if (msg2 == msg)
137 break;
138
139 /* same conditions for cancellation as uv_do_reset */
140 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
141 (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
142 msg->sw_ack_vector) == 0) &&
143 (msg2->sending_cpu == msg->sending_cpu) &&
144 (msg2->msg_type != MSG_NOOP)) {
145 slot2 = msg2 - mdp->va_queue_first;
146 mmr = uv_read_local_mmr
147 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
148 msg_res = ((msg2->sw_ack_vector << 8) |
149 msg2->sw_ack_vector);
150 /*
151 * This is a message retry; clear the resources held
152 * by the previous message only if they timed out.
153 * If it has not timed out we have an unexpected
154 * situation to report.
155 */
156 if (mmr & (msg_res << 8)) {
157 /*
158 * is the resource timed out?
159 * make everyone ignore the cancelled message.
160 */
161 msg2->canceled = 1;
162 stat->d_canceled++;
163 cancel_count++;
164 uv_write_local_mmr(
165 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
166 (msg_res << 8) | msg_res);
167 } else
168 printk(KERN_INFO "note bau retry: no effect\n");
169 }
170 }
171 if (!cancel_count)
172 stat->d_nocanceled++;
173}
105 174
106 if (msg->replied_to == 1) 175/*
107 return; 176 * Do all the things a cpu should do for a TLB shootdown message.
177 * Other cpu's may come here at the same time for this message.
178 */
179static void uv_bau_process_message(struct msg_desc *mdp,
180 struct bau_control *bcp)
181{
182 int msg_ack_count;
183 short socket_ack_count = 0;
184 struct ptc_stats *stat;
185 struct bau_payload_queue_entry *msg;
186 struct bau_control *smaster = bcp->socket_master;
108 187
188 /*
189 * This must be a normal message, or retry of a normal message
190 */
191 msg = mdp->msg;
192 stat = &per_cpu(ptcstats, bcp->cpu);
109 if (msg->address == TLB_FLUSH_ALL) { 193 if (msg->address == TLB_FLUSH_ALL) {
110 local_flush_tlb(); 194 local_flush_tlb();
111 __get_cpu_var(ptcstats).alltlb++; 195 stat->d_alltlb++;
112 } else { 196 } else {
113 __flush_tlb_one(msg->address); 197 __flush_tlb_one(msg->address);
114 __get_cpu_var(ptcstats).onetlb++; 198 stat->d_onetlb++;
115 } 199 }
200 stat->d_requestee++;
201
202 /*
203 * One cpu on each uvhub has the additional job on a RETRY
204 * of releasing the resource held by the message that is
205 * being retried. That message is identified by sending
206 * cpu number.
207 */
208 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
209 uv_bau_process_retry_msg(mdp, bcp);
116 210
117 __get_cpu_var(ptcstats).requestee++; 211 /*
212 * This is a sw_ack message, so we have to reply to it.
213 * Count each responding cpu on the socket. This avoids
214 * pinging the count's cache line back and forth between
215 * the sockets.
216 */
217 socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
218 &smaster->socket_acknowledge_count[mdp->msg_slot]);
219 if (socket_ack_count == bcp->cpus_in_socket) {
220 /*
221 * Both sockets dump their completed count total into
222 * the message's count.
223 */
224 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
225 msg_ack_count = atomic_add_short_return(socket_ack_count,
226 (struct atomic_short *)&msg->acknowledge_count);
227
228 if (msg_ack_count == bcp->cpus_in_uvhub) {
229 /*
230 * All cpus in uvhub saw it; reply
231 */
232 uv_reply_to_message(mdp, bcp);
233 }
234 }
118 235
119 atomic_inc_short(&msg->acknowledge_count); 236 return;
120 if (msg->number_of_cpus == msg->acknowledge_count)
121 uv_reply_to_message(sw_ack_slot, msg, msp);
122} 237}
123 238
124/* 239/*
125 * Examine the payload queue on one distribution node to see 240 * Determine the first cpu on a uvhub.
126 * which messages have not been seen, and which cpu(s) have not seen them. 241 */
242static int uvhub_to_first_cpu(int uvhub)
243{
244 int cpu;
245 for_each_present_cpu(cpu)
246 if (uvhub == uv_cpu_to_blade_id(cpu))
247 return cpu;
248 return -1;
249}
250
251/*
252 * Last resort when we get a large number of destination timeouts is
253 * to clear resources held by a given cpu.
254 * Do this with IPI so that all messages in the BAU message queue
255 * can be identified by their nonzero sw_ack_vector field.
127 * 256 *
128 * Returns the number of cpu's that have not responded. 257 * This is entered for a single cpu on the uvhub.
258 * The sender want's this uvhub to free a specific message's
259 * sw_ack resources.
129 */ 260 */
130static int uv_examine_destination(struct bau_control *bau_tablesp, int sender) 261static void
262uv_do_reset(void *ptr)
131{ 263{
132 struct bau_payload_queue_entry *msg;
133 struct bau_msg_status *msp;
134 int count = 0;
135 int i; 264 int i;
136 int j; 265 int slot;
266 int count = 0;
267 unsigned long mmr;
268 unsigned long msg_res;
269 struct bau_control *bcp;
270 struct reset_args *rap;
271 struct bau_payload_queue_entry *msg;
272 struct ptc_stats *stat;
137 273
138 for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE; 274 bcp = &per_cpu(bau_control, smp_processor_id());
139 msg++, i++) { 275 rap = (struct reset_args *)ptr;
140 if ((msg->sending_cpu == sender) && (!msg->replied_to)) { 276 stat = &per_cpu(ptcstats, bcp->cpu);
141 msp = bau_tablesp->msg_statuses + i; 277 stat->d_resets++;
142 printk(KERN_DEBUG 278
143 "blade %d: address:%#lx %d of %d, not cpu(s): ", 279 /*
144 i, msg->address, msg->acknowledge_count, 280 * We're looking for the given sender, and
145 msg->number_of_cpus); 281 * will free its sw_ack resource.
146 for (j = 0; j < msg->number_of_cpus; j++) { 282 * If all cpu's finally responded after the timeout, its
147 if (!((1L << j) & msp->seen_by.bits)) { 283 * message 'replied_to' was set.
148 count++; 284 */
149 printk("%d ", j); 285 for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
150 } 286 /* uv_do_reset: same conditions for cancellation as
287 uv_bau_process_retry_msg() */
288 if ((msg->replied_to == 0) &&
289 (msg->canceled == 0) &&
290 (msg->sending_cpu == rap->sender) &&
291 (msg->sw_ack_vector) &&
292 (msg->msg_type != MSG_NOOP)) {
293 /*
294 * make everyone else ignore this message
295 */
296 msg->canceled = 1;
297 slot = msg - bcp->va_queue_first;
298 count++;
299 /*
300 * only reset the resource if it is still pending
301 */
302 mmr = uv_read_local_mmr
303 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
304 msg_res = ((msg->sw_ack_vector << 8) |
305 msg->sw_ack_vector);
306 if (mmr & msg_res) {
307 stat->d_rcanceled++;
308 uv_write_local_mmr(
309 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
310 msg_res);
151 } 311 }
152 printk("\n");
153 } 312 }
154 } 313 }
155 return count; 314 return;
156} 315}
157 316
158/* 317/*
159 * Examine the payload queue on all the distribution nodes to see 318 * Use IPI to get all target uvhubs to release resources held by
160 * which messages have not been seen, and which cpu(s) have not seen them. 319 * a given sending cpu number.
161 *
162 * Returns the number of cpu's that have not responded.
163 */ 320 */
164static int uv_examine_destinations(struct bau_target_nodemask *distribution) 321static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
322 int sender)
165{ 323{
166 int sender; 324 int uvhub;
167 int i; 325 int cpu;
168 int count = 0; 326 cpumask_t mask;
327 struct reset_args reset_args;
169 328
170 sender = smp_processor_id(); 329 reset_args.sender = sender;
171 for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) { 330
172 if (!bau_node_isset(i, distribution)) 331 cpus_clear(mask);
332 /* find a single cpu for each uvhub in this distribution mask */
333 for (uvhub = 0;
334 uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
335 uvhub++) {
336 if (!bau_uvhub_isset(uvhub, distribution))
173 continue; 337 continue;
174 count += uv_examine_destination(uv_bau_table_bases[i], sender); 338 /* find a cpu for this uvhub */
339 cpu = uvhub_to_first_cpu(uvhub);
340 cpu_set(cpu, mask);
175 } 341 }
176 return count; 342 /* IPI all cpus; Preemption is already disabled */
343 smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
344 return;
345}
346
347static inline unsigned long
348cycles_2_us(unsigned long long cyc)
349{
350 unsigned long long ns;
351 unsigned long us;
352 ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
353 >> CYC2NS_SCALE_FACTOR;
354 us = ns / 1000;
355 return us;
177} 356}
178 357
179/* 358/*
180 * wait for completion of a broadcast message 359 * wait for all cpus on this hub to finish their sends and go quiet
181 * 360 * leaves uvhub_quiesce set so that no new broadcasts are started by
182 * return COMPLETE, RETRY or GIVEUP 361 * bau_flush_send_and_wait()
362 */
363static inline void
364quiesce_local_uvhub(struct bau_control *hmaster)
365{
366 atomic_add_short_return(1, (struct atomic_short *)
367 &hmaster->uvhub_quiesce);
368}
369
370/*
371 * mark this quiet-requestor as done
372 */
373static inline void
374end_uvhub_quiesce(struct bau_control *hmaster)
375{
376 atomic_add_short_return(-1, (struct atomic_short *)
377 &hmaster->uvhub_quiesce);
378}
379
380/*
381 * Wait for completion of a broadcast software ack message
382 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
183 */ 383 */
184static int uv_wait_completion(struct bau_desc *bau_desc, 384static int uv_wait_completion(struct bau_desc *bau_desc,
185 unsigned long mmr_offset, int right_shift) 385 unsigned long mmr_offset, int right_shift, int this_cpu,
386 struct bau_control *bcp, struct bau_control *smaster, long try)
186{ 387{
187 int exams = 0; 388 int relaxes = 0;
188 long destination_timeouts = 0;
189 long source_timeouts = 0;
190 unsigned long descriptor_status; 389 unsigned long descriptor_status;
390 unsigned long mmr;
391 unsigned long mask;
392 cycles_t ttime;
393 cycles_t timeout_time;
394 struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu);
395 struct bau_control *hmaster;
191 396
397 hmaster = bcp->uvhub_master;
398 timeout_time = get_cycles() + bcp->timeout_interval;
399
400 /* spin on the status MMR, waiting for it to go idle */
192 while ((descriptor_status = (((unsigned long) 401 while ((descriptor_status = (((unsigned long)
193 uv_read_local_mmr(mmr_offset) >> 402 uv_read_local_mmr(mmr_offset) >>
194 right_shift) & UV_ACT_STATUS_MASK)) != 403 right_shift) & UV_ACT_STATUS_MASK)) !=
195 DESC_STATUS_IDLE) { 404 DESC_STATUS_IDLE) {
196 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
197 source_timeouts++;
198 if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
199 source_timeouts = 0;
200 __get_cpu_var(ptcstats).s_retry++;
201 return FLUSH_RETRY;
202 }
203 /* 405 /*
204 * spin here looking for progress at the destinations 406 * Our software ack messages may be blocked because there are
407 * no swack resources available. As long as none of them
408 * has timed out hardware will NACK our message and its
409 * state will stay IDLE.
205 */ 410 */
206 if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) { 411 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
207 destination_timeouts++; 412 stat->s_stimeout++;
208 if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) { 413 return FLUSH_GIVEUP;
209 /* 414 } else if (descriptor_status ==
210 * returns number of cpus not responding 415 DESC_STATUS_DESTINATION_TIMEOUT) {
211 */ 416 stat->s_dtimeout++;
212 if (uv_examine_destinations 417 ttime = get_cycles();
213 (&bau_desc->distribution) == 0) { 418
214 __get_cpu_var(ptcstats).d_retry++; 419 /*
215 return FLUSH_RETRY; 420 * Our retries may be blocked by all destination
216 } 421 * swack resources being consumed, and a timeout
217 exams++; 422 * pending. In that case hardware returns the
218 if (exams >= uv_bau_retry_limit) { 423 * ERROR that looks like a destination timeout.
219 printk(KERN_DEBUG 424 */
220 "uv_flush_tlb_others"); 425 if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) {
221 printk("giving up on cpu %d\n", 426 bcp->conseccompletes = 0;
222 smp_processor_id()); 427 return FLUSH_RETRY_PLUGGED;
428 }
429
430 bcp->conseccompletes = 0;
431 return FLUSH_RETRY_TIMEOUT;
432 } else {
433 /*
434 * descriptor_status is still BUSY
435 */
436 cpu_relax();
437 relaxes++;
438 if (relaxes >= 10000) {
439 relaxes = 0;
440 if (get_cycles() > timeout_time) {
441 quiesce_local_uvhub(hmaster);
442
443 /* single-thread the register change */
444 spin_lock(&hmaster->masks_lock);
445 mmr = uv_read_local_mmr(mmr_offset);
446 mask = 0UL;
447 mask |= (3UL < right_shift);
448 mask = ~mask;
449 mmr &= mask;
450 uv_write_local_mmr(mmr_offset, mmr);
451 spin_unlock(&hmaster->masks_lock);
452 end_uvhub_quiesce(hmaster);
453 stat->s_busy++;
223 return FLUSH_GIVEUP; 454 return FLUSH_GIVEUP;
224 } 455 }
225 /*
226 * delays can hang the simulator
227 udelay(1000);
228 */
229 destination_timeouts = 0;
230 } 456 }
231 } 457 }
232 cpu_relax();
233 } 458 }
459 bcp->conseccompletes++;
234 return FLUSH_COMPLETE; 460 return FLUSH_COMPLETE;
235} 461}
236 462
463static inline cycles_t
464sec_2_cycles(unsigned long sec)
465{
466 unsigned long ns;
467 cycles_t cyc;
468
469 ns = sec * 1000000000;
470 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
471 return cyc;
472}
473
474/*
475 * conditionally add 1 to *v, unless *v is >= u
476 * return 0 if we cannot add 1 to *v because it is >= u
477 * return 1 if we can add 1 to *v because it is < u
478 * the add is atomic
479 *
480 * This is close to atomic_add_unless(), but this allows the 'u' value
481 * to be lowered below the current 'v'. atomic_add_unless can only stop
482 * on equal.
483 */
484static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
485{
486 spin_lock(lock);
487 if (atomic_read(v) >= u) {
488 spin_unlock(lock);
489 return 0;
490 }
491 atomic_inc(v);
492 spin_unlock(lock);
493 return 1;
494}
495
237/** 496/**
238 * uv_flush_send_and_wait 497 * uv_flush_send_and_wait
239 * 498 *
240 * Send a broadcast and wait for a broadcast message to complete. 499 * Send a broadcast and wait for it to complete.
241 * 500 *
242 * The flush_mask contains the cpus the broadcast was sent to. 501 * The flush_mask contains the cpus the broadcast is to be sent to, plus
502 * cpus that are on the local uvhub.
243 * 503 *
244 * Returns NULL if all remote flushing was done. The mask is zeroed. 504 * Returns NULL if all flushing represented in the mask was done. The mask
505 * is zeroed.
245 * Returns @flush_mask if some remote flushing remains to be done. The 506 * Returns @flush_mask if some remote flushing remains to be done. The
246 * mask will have some bits still set. 507 * mask will have some bits still set, representing any cpus on the local
508 * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
247 */ 509 */
248const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, 510const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
249 struct bau_desc *bau_desc, 511 struct cpumask *flush_mask,
250 struct cpumask *flush_mask) 512 struct bau_control *bcp)
251{ 513{
252 int completion_status = 0;
253 int right_shift; 514 int right_shift;
254 int tries = 0; 515 int uvhub;
255 int pnode;
256 int bit; 516 int bit;
517 int completion_status = 0;
518 int seq_number = 0;
519 long try = 0;
520 int cpu = bcp->uvhub_cpu;
521 int this_cpu = bcp->cpu;
522 int this_uvhub = bcp->uvhub;
257 unsigned long mmr_offset; 523 unsigned long mmr_offset;
258 unsigned long index; 524 unsigned long index;
259 cycles_t time1; 525 cycles_t time1;
260 cycles_t time2; 526 cycles_t time2;
527 struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu);
528 struct bau_control *smaster = bcp->socket_master;
529 struct bau_control *hmaster = bcp->uvhub_master;
530
531 /*
532 * Spin here while there are hmaster->max_concurrent or more active
533 * descriptors. This is the per-uvhub 'throttle'.
534 */
535 if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
536 &hmaster->active_descriptor_count,
537 hmaster->max_concurrent)) {
538 stat->s_throttles++;
539 do {
540 cpu_relax();
541 } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
542 &hmaster->active_descriptor_count,
543 hmaster->max_concurrent));
544 }
545
546 while (hmaster->uvhub_quiesce)
547 cpu_relax();
261 548
262 if (cpu < UV_CPUS_PER_ACT_STATUS) { 549 if (cpu < UV_CPUS_PER_ACT_STATUS) {
263 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; 550 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
@@ -269,24 +556,108 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
269 } 556 }
270 time1 = get_cycles(); 557 time1 = get_cycles();
271 do { 558 do {
272 tries++; 559 /*
560 * Every message from any given cpu gets a unique message
561 * sequence number. But retries use that same number.
562 * Our message may have timed out at the destination because
563 * all sw-ack resources are in use and there is a timeout
564 * pending there. In that case, our last send never got
565 * placed into the queue and we need to persist until it
566 * does.
567 *
568 * Make any retry a type MSG_RETRY so that the destination will
569 * free any resource held by a previous message from this cpu.
570 */
571 if (try == 0) {
572 /* use message type set by the caller the first time */
573 seq_number = bcp->message_number++;
574 } else {
575 /* use RETRY type on all the rest; same sequence */
576 bau_desc->header.msg_type = MSG_RETRY;
577 stat->s_retry_messages++;
578 }
579 bau_desc->header.sequence = seq_number;
273 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | 580 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
274 cpu; 581 bcp->uvhub_cpu;
582 bcp->send_message = get_cycles();
583
275 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); 584 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
585
586 try++;
276 completion_status = uv_wait_completion(bau_desc, mmr_offset, 587 completion_status = uv_wait_completion(bau_desc, mmr_offset,
277 right_shift); 588 right_shift, this_cpu, bcp, smaster, try);
278 } while (completion_status == FLUSH_RETRY); 589
590 if (completion_status == FLUSH_RETRY_PLUGGED) {
591 /*
592 * Our retries may be blocked by all destination swack
593 * resources being consumed, and a timeout pending. In
594 * that case hardware immediately returns the ERROR
595 * that looks like a destination timeout.
596 */
597 udelay(TIMEOUT_DELAY);
598 bcp->plugged_tries++;
599 if (bcp->plugged_tries >= PLUGSB4RESET) {
600 bcp->plugged_tries = 0;
601 quiesce_local_uvhub(hmaster);
602 spin_lock(&hmaster->queue_lock);
603 uv_reset_with_ipi(&bau_desc->distribution,
604 this_cpu);
605 spin_unlock(&hmaster->queue_lock);
606 end_uvhub_quiesce(hmaster);
607 bcp->ipi_attempts++;
608 stat->s_resets_plug++;
609 }
610 } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
611 hmaster->max_concurrent = 1;
612 bcp->timeout_tries++;
613 udelay(TIMEOUT_DELAY);
614 if (bcp->timeout_tries >= TIMEOUTSB4RESET) {
615 bcp->timeout_tries = 0;
616 quiesce_local_uvhub(hmaster);
617 spin_lock(&hmaster->queue_lock);
618 uv_reset_with_ipi(&bau_desc->distribution,
619 this_cpu);
620 spin_unlock(&hmaster->queue_lock);
621 end_uvhub_quiesce(hmaster);
622 bcp->ipi_attempts++;
623 stat->s_resets_timeout++;
624 }
625 }
626 if (bcp->ipi_attempts >= 3) {
627 bcp->ipi_attempts = 0;
628 completion_status = FLUSH_GIVEUP;
629 break;
630 }
631 cpu_relax();
632 } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
633 (completion_status == FLUSH_RETRY_TIMEOUT));
279 time2 = get_cycles(); 634 time2 = get_cycles();
280 __get_cpu_var(ptcstats).sflush += (time2 - time1);
281 if (tries > 1)
282 __get_cpu_var(ptcstats).retriesok++;
283 635
284 if (completion_status == FLUSH_GIVEUP) { 636 if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5)
637 && (hmaster->max_concurrent < hmaster->max_concurrent_constant))
638 hmaster->max_concurrent++;
639
640 /*
641 * hold any cpu not timing out here; no other cpu currently held by
642 * the 'throttle' should enter the activation code
643 */
644 while (hmaster->uvhub_quiesce)
645 cpu_relax();
646 atomic_dec(&hmaster->active_descriptor_count);
647
648 /* guard against cycles wrap */
649 if (time2 > time1)
650 stat->s_time += (time2 - time1);
651 else
652 stat->s_requestor--; /* don't count this one */
653 if (completion_status == FLUSH_COMPLETE && try > 1)
654 stat->s_retriesok++;
655 else if (completion_status == FLUSH_GIVEUP) {
285 /* 656 /*
286 * Cause the caller to do an IPI-style TLB shootdown on 657 * Cause the caller to do an IPI-style TLB shootdown on
287 * the cpu's, all of which are still in the mask. 658 * the target cpu's, all of which are still in the mask.
288 */ 659 */
289 __get_cpu_var(ptcstats).ptc_i++; 660 stat->s_giveup++;
290 return flush_mask; 661 return flush_mask;
291 } 662 }
292 663
@@ -295,18 +666,17 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
295 * use the IPI method of shootdown on them. 666 * use the IPI method of shootdown on them.
296 */ 667 */
297 for_each_cpu(bit, flush_mask) { 668 for_each_cpu(bit, flush_mask) {
298 pnode = uv_cpu_to_pnode(bit); 669 uvhub = uv_cpu_to_blade_id(bit);
299 if (pnode == this_pnode) 670 if (uvhub == this_uvhub)
300 continue; 671 continue;
301 cpumask_clear_cpu(bit, flush_mask); 672 cpumask_clear_cpu(bit, flush_mask);
302 } 673 }
303 if (!cpumask_empty(flush_mask)) 674 if (!cpumask_empty(flush_mask))
304 return flush_mask; 675 return flush_mask;
676
305 return NULL; 677 return NULL;
306} 678}
307 679
308static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
309
310/** 680/**
311 * uv_flush_tlb_others - globally purge translation cache of a virtual 681 * uv_flush_tlb_others - globally purge translation cache of a virtual
312 * address or all TLB's 682 * address or all TLB's
@@ -323,8 +693,8 @@ static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
323 * The caller has derived the cpumask from the mm_struct. This function 693 * The caller has derived the cpumask from the mm_struct. This function
324 * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) 694 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
325 * 695 *
326 * The cpumask is converted into a nodemask of the nodes containing 696 * The cpumask is converted into a uvhubmask of the uvhubs containing
327 * the cpus. 697 * those cpus.
328 * 698 *
329 * Note that this function should be called with preemption disabled. 699 * Note that this function should be called with preemption disabled.
330 * 700 *
@@ -336,52 +706,82 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
336 struct mm_struct *mm, 706 struct mm_struct *mm,
337 unsigned long va, unsigned int cpu) 707 unsigned long va, unsigned int cpu)
338{ 708{
339 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); 709 int remotes;
340 int i; 710 int tcpu;
341 int bit; 711 int uvhub;
342 int pnode;
343 int uv_cpu;
344 int this_pnode;
345 int locals = 0; 712 int locals = 0;
346 struct bau_desc *bau_desc; 713 struct bau_desc *bau_desc;
714 struct cpumask *flush_mask;
715 struct ptc_stats *stat;
716 struct bau_control *bcp;
347 717
348 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 718 if (nobau)
719 return cpumask;
349 720
350 uv_cpu = uv_blade_processor_id(); 721 bcp = &per_cpu(bau_control, cpu);
351 this_pnode = uv_hub_info->pnode; 722 /*
352 bau_desc = __get_cpu_var(bau_control).descriptor_base; 723 * Each sending cpu has a per-cpu mask which it fills from the caller's
353 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; 724 * cpu mask. Only remote cpus are converted to uvhubs and copied.
725 */
726 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
727 /*
728 * copy cpumask to flush_mask, removing current cpu
729 * (current cpu should already have been flushed by the caller and
730 * should never be returned if we return flush_mask)
731 */
732 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
733 if (cpu_isset(cpu, *cpumask))
734 locals++; /* current cpu was targeted */
354 735
355 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 736 bau_desc = bcp->descriptor_base;
737 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
356 738
357 i = 0; 739 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
358 for_each_cpu(bit, flush_mask) { 740 remotes = 0;
359 pnode = uv_cpu_to_pnode(bit); 741 for_each_cpu(tcpu, flush_mask) {
360 BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1)); 742 uvhub = uv_cpu_to_blade_id(tcpu);
361 if (pnode == this_pnode) { 743 if (uvhub == bcp->uvhub) {
362 locals++; 744 locals++;
363 continue; 745 continue;
364 } 746 }
365 bau_node_set(pnode - uv_partition_base_pnode, 747 bau_uvhub_set(uvhub, &bau_desc->distribution);
366 &bau_desc->distribution); 748 remotes++;
367 i++;
368 } 749 }
369 if (i == 0) { 750 if (remotes == 0) {
370 /* 751 /*
371 * no off_node flushing; return status for local node 752 * No off_hub flushing; return status for local hub.
753 * Return the caller's mask if all were local (the current
754 * cpu may be in that mask).
372 */ 755 */
373 if (locals) 756 if (locals)
374 return flush_mask; 757 return cpumask;
375 else 758 else
376 return NULL; 759 return NULL;
377 } 760 }
378 __get_cpu_var(ptcstats).requestor++; 761 stat = &per_cpu(ptcstats, cpu);
379 __get_cpu_var(ptcstats).ntargeted += i; 762 stat->s_requestor++;
763 stat->s_ntargcpu += remotes;
764 remotes = bau_uvhub_weight(&bau_desc->distribution);
765 stat->s_ntarguvhub += remotes;
766 if (remotes >= 16)
767 stat->s_ntarguvhub16++;
768 else if (remotes >= 8)
769 stat->s_ntarguvhub8++;
770 else if (remotes >= 4)
771 stat->s_ntarguvhub4++;
772 else if (remotes >= 2)
773 stat->s_ntarguvhub2++;
774 else
775 stat->s_ntarguvhub1++;
380 776
381 bau_desc->payload.address = va; 777 bau_desc->payload.address = va;
382 bau_desc->payload.sending_cpu = cpu; 778 bau_desc->payload.sending_cpu = cpu;
383 779
384 return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask); 780 /*
781 * uv_flush_send_and_wait returns null if all cpu's were messaged, or
782 * the adjusted flush_mask if any cpu's were not messaged.
783 */
784 return uv_flush_send_and_wait(bau_desc, flush_mask, bcp);
385} 785}
386 786
387/* 787/*
@@ -390,87 +790,70 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
390 * 790 *
391 * We received a broadcast assist message. 791 * We received a broadcast assist message.
392 * 792 *
393 * Interrupts may have been disabled; this interrupt could represent 793 * Interrupts are disabled; this interrupt could represent
394 * the receipt of several messages. 794 * the receipt of several messages.
395 * 795 *
396 * All cores/threads on this node get this interrupt. 796 * All cores/threads on this hub get this interrupt.
397 * The last one to see it does the s/w ack. 797 * The last one to see it does the software ack.
398 * (the resource will not be freed until noninterruptable cpus see this 798 * (the resource will not be freed until noninterruptable cpus see this
399 * interrupt; hardware will timeout the s/w ack and reply ERROR) 799 * interrupt; hardware may timeout the s/w ack and reply ERROR)
400 */ 800 */
401void uv_bau_message_interrupt(struct pt_regs *regs) 801void uv_bau_message_interrupt(struct pt_regs *regs)
402{ 802{
403 struct bau_payload_queue_entry *va_queue_first;
404 struct bau_payload_queue_entry *va_queue_last;
405 struct bau_payload_queue_entry *msg;
406 struct pt_regs *old_regs = set_irq_regs(regs);
407 cycles_t time1;
408 cycles_t time2;
409 int msg_slot;
410 int sw_ack_slot;
411 int fw;
412 int count = 0; 803 int count = 0;
413 unsigned long local_pnode; 804 cycles_t time_start;
414 805 struct bau_payload_queue_entry *msg;
415 ack_APIC_irq(); 806 struct bau_control *bcp;
416 exit_idle(); 807 struct ptc_stats *stat;
417 irq_enter(); 808 struct msg_desc msgdesc;
418 809
419 time1 = get_cycles(); 810 time_start = get_cycles();
420 811 bcp = &per_cpu(bau_control, smp_processor_id());
421 local_pnode = uv_blade_to_pnode(uv_numa_blade_id()); 812 stat = &per_cpu(ptcstats, smp_processor_id());
422 813 msgdesc.va_queue_first = bcp->va_queue_first;
423 va_queue_first = __get_cpu_var(bau_control).va_queue_first; 814 msgdesc.va_queue_last = bcp->va_queue_last;
424 va_queue_last = __get_cpu_var(bau_control).va_queue_last; 815 msg = bcp->bau_msg_head;
425
426 msg = __get_cpu_var(bau_control).bau_msg_head;
427 while (msg->sw_ack_vector) { 816 while (msg->sw_ack_vector) {
428 count++; 817 count++;
429 fw = msg->sw_ack_vector; 818 msgdesc.msg_slot = msg - msgdesc.va_queue_first;
430 msg_slot = msg - va_queue_first; 819 msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
431 sw_ack_slot = ffs(fw) - 1; 820 msgdesc.msg = msg;
432 821 uv_bau_process_message(&msgdesc, bcp);
433 uv_bau_process_message(msg, msg_slot, sw_ack_slot);
434
435 msg++; 822 msg++;
436 if (msg > va_queue_last) 823 if (msg > msgdesc.va_queue_last)
437 msg = va_queue_first; 824 msg = msgdesc.va_queue_first;
438 __get_cpu_var(bau_control).bau_msg_head = msg; 825 bcp->bau_msg_head = msg;
439 } 826 }
827 stat->d_time += (get_cycles() - time_start);
440 if (!count) 828 if (!count)
441 __get_cpu_var(ptcstats).nomsg++; 829 stat->d_nomsg++;
442 else if (count > 1) 830 else if (count > 1)
443 __get_cpu_var(ptcstats).multmsg++; 831 stat->d_multmsg++;
444 832 ack_APIC_irq();
445 time2 = get_cycles();
446 __get_cpu_var(ptcstats).dflush += (time2 - time1);
447
448 irq_exit();
449 set_irq_regs(old_regs);
450} 833}
451 834
452/* 835/*
453 * uv_enable_timeouts 836 * uv_enable_timeouts
454 * 837 *
455 * Each target blade (i.e. blades that have cpu's) needs to have 838 * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
456 * shootdown message timeouts enabled. The timeout does not cause 839 * shootdown message timeouts enabled. The timeout does not cause
457 * an interrupt, but causes an error message to be returned to 840 * an interrupt, but causes an error message to be returned to
458 * the sender. 841 * the sender.
459 */ 842 */
460static void uv_enable_timeouts(void) 843static void uv_enable_timeouts(void)
461{ 844{
462 int blade; 845 int uvhub;
463 int nblades; 846 int nuvhubs;
464 int pnode; 847 int pnode;
465 unsigned long mmr_image; 848 unsigned long mmr_image;
466 849
467 nblades = uv_num_possible_blades(); 850 nuvhubs = uv_num_possible_blades();
468 851
469 for (blade = 0; blade < nblades; blade++) { 852 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
470 if (!uv_blade_nr_possible_cpus(blade)) 853 if (!uv_blade_nr_possible_cpus(uvhub))
471 continue; 854 continue;
472 855
473 pnode = uv_blade_to_pnode(blade); 856 pnode = uv_blade_to_pnode(uvhub);
474 mmr_image = 857 mmr_image =
475 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); 858 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
476 /* 859 /*
@@ -523,9 +906,20 @@ static void uv_ptc_seq_stop(struct seq_file *file, void *data)
523{ 906{
524} 907}
525 908
909static inline unsigned long long
910millisec_2_cycles(unsigned long millisec)
911{
912 unsigned long ns;
913 unsigned long long cyc;
914
915 ns = millisec * 1000;
916 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
917 return cyc;
918}
919
526/* 920/*
527 * Display the statistics thru /proc 921 * Display the statistics thru /proc.
528 * data points to the cpu number 922 * 'data' points to the cpu number
529 */ 923 */
530static int uv_ptc_seq_show(struct seq_file *file, void *data) 924static int uv_ptc_seq_show(struct seq_file *file, void *data)
531{ 925{
@@ -536,78 +930,155 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
536 930
537 if (!cpu) { 931 if (!cpu) {
538 seq_printf(file, 932 seq_printf(file,
539 "# cpu requestor requestee one all sretry dretry ptc_i "); 933 "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
540 seq_printf(file, 934 seq_printf(file,
541 "sw_ack sflush dflush sok dnomsg dmult starget\n"); 935 "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
936 seq_printf(file,
937 "retries rok resetp resett giveup sto bz throt ");
938 seq_printf(file,
939 "sw_ack recv rtime all ");
940 seq_printf(file,
941 "one mult none retry canc nocan reset rcan\n");
542 } 942 }
543 if (cpu < num_possible_cpus() && cpu_online(cpu)) { 943 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
544 stat = &per_cpu(ptcstats, cpu); 944 stat = &per_cpu(ptcstats, cpu);
545 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ", 945 /* source side statistics */
546 cpu, stat->requestor, 946 seq_printf(file,
547 stat->requestee, stat->onetlb, stat->alltlb, 947 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
548 stat->s_retry, stat->d_retry, stat->ptc_i); 948 cpu, stat->s_requestor, cycles_2_us(stat->s_time),
549 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", 949 stat->s_ntarguvhub, stat->s_ntarguvhub16,
950 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
951 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
952 stat->s_ntargcpu, stat->s_dtimeout);
953 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
954 stat->s_retry_messages, stat->s_retriesok,
955 stat->s_resets_plug, stat->s_resets_timeout,
956 stat->s_giveup, stat->s_stimeout,
957 stat->s_busy, stat->s_throttles);
958 /* destination side statistics */
959 seq_printf(file,
960 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
550 uv_read_global_mmr64(uv_cpu_to_pnode(cpu), 961 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
551 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 962 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
552 stat->sflush, stat->dflush, 963 stat->d_requestee, cycles_2_us(stat->d_time),
553 stat->retriesok, stat->nomsg, 964 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
554 stat->multmsg, stat->ntargeted); 965 stat->d_nomsg, stat->d_retries, stat->d_canceled,
966 stat->d_nocanceled, stat->d_resets,
967 stat->d_rcanceled);
555 } 968 }
556 969
557 return 0; 970 return 0;
558} 971}
559 972
560/* 973/*
974 * -1: resetf the statistics
561 * 0: display meaning of the statistics 975 * 0: display meaning of the statistics
562 * >0: retry limit 976 * >0: maximum concurrent active descriptors per uvhub (throttle)
563 */ 977 */
564static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, 978static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
565 size_t count, loff_t *data) 979 size_t count, loff_t *data)
566{ 980{
567 long newmode; 981 int cpu;
982 long input_arg;
568 char optstr[64]; 983 char optstr[64];
984 struct ptc_stats *stat;
985 struct bau_control *bcp;
569 986
570 if (count == 0 || count > sizeof(optstr)) 987 if (count == 0 || count > sizeof(optstr))
571 return -EINVAL; 988 return -EINVAL;
572 if (copy_from_user(optstr, user, count)) 989 if (copy_from_user(optstr, user, count))
573 return -EFAULT; 990 return -EFAULT;
574 optstr[count - 1] = '\0'; 991 optstr[count - 1] = '\0';
575 if (strict_strtoul(optstr, 10, &newmode) < 0) { 992 if (strict_strtol(optstr, 10, &input_arg) < 0) {
576 printk(KERN_DEBUG "%s is invalid\n", optstr); 993 printk(KERN_DEBUG "%s is invalid\n", optstr);
577 return -EINVAL; 994 return -EINVAL;
578 } 995 }
579 996
580 if (newmode == 0) { 997 if (input_arg == 0) {
581 printk(KERN_DEBUG "# cpu: cpu number\n"); 998 printk(KERN_DEBUG "# cpu: cpu number\n");
999 printk(KERN_DEBUG "Sender statistics:\n");
1000 printk(KERN_DEBUG
1001 "sent: number of shootdown messages sent\n");
1002 printk(KERN_DEBUG
1003 "stime: time spent sending messages\n");
1004 printk(KERN_DEBUG
1005 "numuvhubs: number of hubs targeted with shootdown\n");
1006 printk(KERN_DEBUG
1007 "numuvhubs16: number times 16 or more hubs targeted\n");
1008 printk(KERN_DEBUG
1009 "numuvhubs8: number times 8 or more hubs targeted\n");
1010 printk(KERN_DEBUG
1011 "numuvhubs4: number times 4 or more hubs targeted\n");
1012 printk(KERN_DEBUG
1013 "numuvhubs2: number times 2 or more hubs targeted\n");
1014 printk(KERN_DEBUG
1015 "numuvhubs1: number times 1 hub targeted\n");
1016 printk(KERN_DEBUG
1017 "numcpus: number of cpus targeted with shootdown\n");
1018 printk(KERN_DEBUG
1019 "dto: number of destination timeouts\n");
1020 printk(KERN_DEBUG
1021 "retries: destination timeout retries sent\n");
1022 printk(KERN_DEBUG
1023 "rok: : destination timeouts successfully retried\n");
1024 printk(KERN_DEBUG
1025 "resetp: ipi-style resource resets for plugs\n");
1026 printk(KERN_DEBUG
1027 "resett: ipi-style resource resets for timeouts\n");
1028 printk(KERN_DEBUG
1029 "giveup: fall-backs to ipi-style shootdowns\n");
1030 printk(KERN_DEBUG
1031 "sto: number of source timeouts\n");
1032 printk(KERN_DEBUG
1033 "bz: number of stay-busy's\n");
1034 printk(KERN_DEBUG
1035 "throt: number times spun in throttle\n");
1036 printk(KERN_DEBUG "Destination side statistics:\n");
582 printk(KERN_DEBUG 1037 printk(KERN_DEBUG
583 "requestor: times this cpu was the flush requestor\n"); 1038 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
584 printk(KERN_DEBUG 1039 printk(KERN_DEBUG
585 "requestee: times this cpu was requested to flush its TLBs\n"); 1040 "recv: shootdown messages received\n");
586 printk(KERN_DEBUG 1041 printk(KERN_DEBUG
587 "one: times requested to flush a single address\n"); 1042 "rtime: time spent processing messages\n");
588 printk(KERN_DEBUG 1043 printk(KERN_DEBUG
589 "all: times requested to flush all TLB's\n"); 1044 "all: shootdown all-tlb messages\n");
590 printk(KERN_DEBUG 1045 printk(KERN_DEBUG
591 "sretry: number of retries of source-side timeouts\n"); 1046 "one: shootdown one-tlb messages\n");
592 printk(KERN_DEBUG 1047 printk(KERN_DEBUG
593 "dretry: number of retries of destination-side timeouts\n"); 1048 "mult: interrupts that found multiple messages\n");
594 printk(KERN_DEBUG 1049 printk(KERN_DEBUG
595 "ptc_i: times UV fell through to IPI-style flushes\n"); 1050 "none: interrupts that found no messages\n");
596 printk(KERN_DEBUG 1051 printk(KERN_DEBUG
597 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n"); 1052 "retry: number of retry messages processed\n");
598 printk(KERN_DEBUG 1053 printk(KERN_DEBUG
599 "sflush_us: cycles spent in uv_flush_tlb_others()\n"); 1054 "canc: number messages canceled by retries\n");
600 printk(KERN_DEBUG 1055 printk(KERN_DEBUG
601 "dflush_us: cycles spent in handling flush requests\n"); 1056 "nocan: number retries that found nothing to cancel\n");
602 printk(KERN_DEBUG "sok: successes on retry\n");
603 printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
604 printk(KERN_DEBUG 1057 printk(KERN_DEBUG
605 "dmult: interrupts with multiple messages\n"); 1058 "reset: number of ipi-style reset requests processed\n");
606 printk(KERN_DEBUG "starget: nodes targeted\n"); 1059 printk(KERN_DEBUG
1060 "rcan: number messages canceled by reset requests\n");
1061 } else if (input_arg == -1) {
1062 for_each_present_cpu(cpu) {
1063 stat = &per_cpu(ptcstats, cpu);
1064 memset(stat, 0, sizeof(struct ptc_stats));
1065 }
607 } else { 1066 } else {
608 uv_bau_retry_limit = newmode; 1067 uv_bau_max_concurrent = input_arg;
609 printk(KERN_DEBUG "timeout retry limit:%d\n", 1068 bcp = &per_cpu(bau_control, smp_processor_id());
610 uv_bau_retry_limit); 1069 if (uv_bau_max_concurrent < 1 ||
1070 uv_bau_max_concurrent > bcp->cpus_in_uvhub) {
1071 printk(KERN_DEBUG
1072 "Error: BAU max concurrent %d; %d is invalid\n",
1073 bcp->max_concurrent, uv_bau_max_concurrent);
1074 return -EINVAL;
1075 }
1076 printk(KERN_DEBUG "Set BAU max concurrent:%d\n",
1077 uv_bau_max_concurrent);
1078 for_each_present_cpu(cpu) {
1079 bcp = &per_cpu(bau_control, cpu);
1080 bcp->max_concurrent = uv_bau_max_concurrent;
1081 }
611 } 1082 }
612 1083
613 return count; 1084 return count;
@@ -651,79 +1122,30 @@ static int __init uv_ptc_init(void)
651} 1122}
652 1123
653/* 1124/*
654 * begin the initialization of the per-blade control structures
655 */
656static struct bau_control * __init uv_table_bases_init(int blade, int node)
657{
658 int i;
659 struct bau_msg_status *msp;
660 struct bau_control *bau_tabp;
661
662 bau_tabp =
663 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
664 BUG_ON(!bau_tabp);
665
666 bau_tabp->msg_statuses =
667 kmalloc_node(sizeof(struct bau_msg_status) *
668 DEST_Q_SIZE, GFP_KERNEL, node);
669 BUG_ON(!bau_tabp->msg_statuses);
670
671 for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
672 bau_cpubits_clear(&msp->seen_by, (int)
673 uv_blade_nr_possible_cpus(blade));
674
675 uv_bau_table_bases[blade] = bau_tabp;
676
677 return bau_tabp;
678}
679
680/*
681 * finish the initialization of the per-blade control structures
682 */
683static void __init
684uv_table_bases_finish(int blade,
685 struct bau_control *bau_tablesp,
686 struct bau_desc *adp)
687{
688 struct bau_control *bcp;
689 int cpu;
690
691 for_each_present_cpu(cpu) {
692 if (blade != uv_cpu_to_blade_id(cpu))
693 continue;
694
695 bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
696 bcp->bau_msg_head = bau_tablesp->va_queue_first;
697 bcp->va_queue_first = bau_tablesp->va_queue_first;
698 bcp->va_queue_last = bau_tablesp->va_queue_last;
699 bcp->msg_statuses = bau_tablesp->msg_statuses;
700 bcp->descriptor_base = adp;
701 }
702}
703
704/*
705 * initialize the sending side's sending buffers 1125 * initialize the sending side's sending buffers
706 */ 1126 */
707static struct bau_desc * __init 1127static void
708uv_activation_descriptor_init(int node, int pnode) 1128uv_activation_descriptor_init(int node, int pnode)
709{ 1129{
710 int i; 1130 int i;
1131 int cpu;
711 unsigned long pa; 1132 unsigned long pa;
712 unsigned long m; 1133 unsigned long m;
713 unsigned long n; 1134 unsigned long n;
714 struct bau_desc *adp; 1135 struct bau_desc *bau_desc;
715 struct bau_desc *ad2; 1136 struct bau_desc *bd2;
1137 struct bau_control *bcp;
716 1138
717 /* 1139 /*
718 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) 1140 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
719 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade 1141 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
720 */ 1142 */
721 adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* 1143 bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
722 UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); 1144 UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
723 BUG_ON(!adp); 1145 BUG_ON(!bau_desc);
724 1146
725 pa = uv_gpa(adp); /* need the real nasid*/ 1147 pa = uv_gpa(bau_desc); /* need the real nasid*/
726 n = uv_gpa_to_pnode(pa); 1148 n = pa >> uv_nshift;
727 m = pa & uv_mmask; 1149 m = pa & uv_mmask;
728 1150
729 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, 1151 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
@@ -732,96 +1154,188 @@ uv_activation_descriptor_init(int node, int pnode)
732 /* 1154 /*
733 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each 1155 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
734 * cpu even though we only use the first one; one descriptor can 1156 * cpu even though we only use the first one; one descriptor can
735 * describe a broadcast to 256 nodes. 1157 * describe a broadcast to 256 uv hubs.
736 */ 1158 */
737 for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); 1159 for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
738 i++, ad2++) { 1160 i++, bd2++) {
739 memset(ad2, 0, sizeof(struct bau_desc)); 1161 memset(bd2, 0, sizeof(struct bau_desc));
740 ad2->header.sw_ack_flag = 1; 1162 bd2->header.sw_ack_flag = 1;
741 /* 1163 /*
742 * base_dest_nodeid is the first node in the partition, so 1164 * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
743 * the bit map will indicate partition-relative node numbers. 1165 * in the partition. The bit map will indicate uvhub numbers,
744 * note that base_dest_nodeid is actually a nasid. 1166 * which are 0-N in a partition. Pnodes are unique system-wide.
745 */ 1167 */
746 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1; 1168 bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
747 ad2->header.dest_subnodeid = 0x10; /* the LB */ 1169 bd2->header.dest_subnodeid = 0x10; /* the LB */
748 ad2->header.command = UV_NET_ENDPOINT_INTD; 1170 bd2->header.command = UV_NET_ENDPOINT_INTD;
749 ad2->header.int_both = 1; 1171 bd2->header.int_both = 1;
750 /* 1172 /*
751 * all others need to be set to zero: 1173 * all others need to be set to zero:
752 * fairness chaining multilevel count replied_to 1174 * fairness chaining multilevel count replied_to
753 */ 1175 */
754 } 1176 }
755 return adp; 1177 for_each_present_cpu(cpu) {
1178 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1179 continue;
1180 bcp = &per_cpu(bau_control, cpu);
1181 bcp->descriptor_base = bau_desc;
1182 }
756} 1183}
757 1184
758/* 1185/*
759 * initialize the destination side's receiving buffers 1186 * initialize the destination side's receiving buffers
1187 * entered for each uvhub in the partition
1188 * - node is first node (kernel memory notion) on the uvhub
1189 * - pnode is the uvhub's physical identifier
760 */ 1190 */
761static struct bau_payload_queue_entry * __init 1191static void
762uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) 1192uv_payload_queue_init(int node, int pnode)
763{ 1193{
764 struct bau_payload_queue_entry *pqp;
765 unsigned long pa;
766 int pn; 1194 int pn;
1195 int cpu;
767 char *cp; 1196 char *cp;
1197 unsigned long pa;
1198 struct bau_payload_queue_entry *pqp;
1199 struct bau_payload_queue_entry *pqp_malloc;
1200 struct bau_control *bcp;
768 1201
769 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 1202 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
770 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), 1203 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
771 GFP_KERNEL, node); 1204 GFP_KERNEL, node);
772 BUG_ON(!pqp); 1205 BUG_ON(!pqp);
1206 pqp_malloc = pqp;
773 1207
774 cp = (char *)pqp + 31; 1208 cp = (char *)pqp + 31;
775 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 1209 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
776 bau_tablesp->va_queue_first = pqp; 1210
1211 for_each_present_cpu(cpu) {
1212 if (pnode != uv_cpu_to_pnode(cpu))
1213 continue;
1214 /* for every cpu on this pnode: */
1215 bcp = &per_cpu(bau_control, cpu);
1216 bcp->va_queue_first = pqp;
1217 bcp->bau_msg_head = pqp;
1218 bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
1219 }
777 /* 1220 /*
778 * need the pnode of where the memory was really allocated 1221 * need the pnode of where the memory was really allocated
779 */ 1222 */
780 pa = uv_gpa(pqp); 1223 pa = uv_gpa(pqp);
781 pn = uv_gpa_to_pnode(pa); 1224 pn = pa >> uv_nshift;
782 uv_write_global_mmr64(pnode, 1225 uv_write_global_mmr64(pnode,
783 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, 1226 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
784 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | 1227 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
785 uv_physnodeaddr(pqp)); 1228 uv_physnodeaddr(pqp));
786 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 1229 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
787 uv_physnodeaddr(pqp)); 1230 uv_physnodeaddr(pqp));
788 bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
789 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, 1231 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
790 (unsigned long) 1232 (unsigned long)
791 uv_physnodeaddr(bau_tablesp->va_queue_last)); 1233 uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
1234 /* in effect, all msg_type's are set to MSG_NOOP */
792 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE); 1235 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
793
794 return pqp;
795} 1236}
796 1237
797/* 1238/*
798 * Initialization of each UV blade's structures 1239 * Initialization of each UV hub's structures
799 */ 1240 */
800static int __init uv_init_blade(int blade) 1241static void __init uv_init_uvhub(int uvhub, int vector)
801{ 1242{
802 int node; 1243 int node;
803 int pnode; 1244 int pnode;
804 unsigned long pa;
805 unsigned long apicid; 1245 unsigned long apicid;
806 struct bau_desc *adp; 1246
807 struct bau_payload_queue_entry *pqp; 1247 node = uvhub_to_first_node(uvhub);
808 struct bau_control *bau_tablesp; 1248 pnode = uv_blade_to_pnode(uvhub);
809 1249 uv_activation_descriptor_init(node, pnode);
810 node = blade_to_first_node(blade); 1250 uv_payload_queue_init(node, pnode);
811 bau_tablesp = uv_table_bases_init(blade, node);
812 pnode = uv_blade_to_pnode(blade);
813 adp = uv_activation_descriptor_init(node, pnode);
814 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
815 uv_table_bases_finish(blade, bau_tablesp, adp);
816 /* 1251 /*
817 * the below initialization can't be in firmware because the 1252 * the below initialization can't be in firmware because the
818 * messaging IRQ will be determined by the OS 1253 * messaging IRQ will be determined by the OS
819 */ 1254 */
820 apicid = blade_to_first_apicid(blade); 1255 apicid = uvhub_to_first_apicid(uvhub);
821 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
822 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 1256 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
823 ((apicid << 32) | UV_BAU_MESSAGE)); 1257 ((apicid << 32) | vector));
824 return 0; 1258}
1259
1260/*
1261 * initialize the bau_control structure for each cpu
1262 */
1263static void uv_init_per_cpu(int nuvhubs)
1264{
1265 int i, j, k;
1266 int cpu;
1267 int pnode;
1268 int uvhub;
1269 short socket = 0;
1270 struct bau_control *bcp;
1271 struct uvhub_desc *bdp;
1272 struct socket_desc *sdp;
1273 struct bau_control *hmaster = NULL;
1274 struct bau_control *smaster = NULL;
1275 struct socket_desc {
1276 short num_cpus;
1277 short cpu_number[16];
1278 };
1279 struct uvhub_desc {
1280 short num_sockets;
1281 short num_cpus;
1282 short uvhub;
1283 short pnode;
1284 struct socket_desc socket[2];
1285 };
1286 struct uvhub_desc *uvhub_descs;
1287
1288 uvhub_descs = (struct uvhub_desc *)
1289 kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1290 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1291 for_each_present_cpu(cpu) {
1292 bcp = &per_cpu(bau_control, cpu);
1293 memset(bcp, 0, sizeof(struct bau_control));
1294 spin_lock_init(&bcp->masks_lock);
1295 bcp->max_concurrent = uv_bau_max_concurrent;
1296 pnode = uv_cpu_hub_info(cpu)->pnode;
1297 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1298 bdp = &uvhub_descs[uvhub];
1299 bdp->num_cpus++;
1300 bdp->uvhub = uvhub;
1301 bdp->pnode = pnode;
1302 /* time interval to catch a hardware stay-busy bug */
1303 bcp->timeout_interval = millisec_2_cycles(3);
1304 /* kludge: assume uv_hub.h is constant */
1305 socket = (cpu_physical_id(cpu)>>5)&1;
1306 if (socket >= bdp->num_sockets)
1307 bdp->num_sockets = socket+1;
1308 sdp = &bdp->socket[socket];
1309 sdp->cpu_number[sdp->num_cpus] = cpu;
1310 sdp->num_cpus++;
1311 }
1312 socket = 0;
1313 for_each_possible_blade(uvhub) {
1314 bdp = &uvhub_descs[uvhub];
1315 for (i = 0; i < bdp->num_sockets; i++) {
1316 sdp = &bdp->socket[i];
1317 for (j = 0; j < sdp->num_cpus; j++) {
1318 cpu = sdp->cpu_number[j];
1319 bcp = &per_cpu(bau_control, cpu);
1320 bcp->cpu = cpu;
1321 if (j == 0) {
1322 smaster = bcp;
1323 if (i == 0)
1324 hmaster = bcp;
1325 }
1326 bcp->cpus_in_uvhub = bdp->num_cpus;
1327 bcp->cpus_in_socket = sdp->num_cpus;
1328 bcp->socket_master = smaster;
1329 bcp->uvhub_master = hmaster;
1330 for (k = 0; k < DEST_Q_SIZE; k++)
1331 bcp->socket_acknowledge_count[k] = 0;
1332 bcp->uvhub_cpu =
1333 uv_cpu_hub_info(cpu)->blade_processor_id;
1334 }
1335 socket++;
1336 }
1337 }
1338 kfree(uvhub_descs);
825} 1339}
826 1340
827/* 1341/*
@@ -829,38 +1343,54 @@ static int __init uv_init_blade(int blade)
829 */ 1343 */
830static int __init uv_bau_init(void) 1344static int __init uv_bau_init(void)
831{ 1345{
832 int blade; 1346 int uvhub;
833 int nblades; 1347 int pnode;
1348 int nuvhubs;
834 int cur_cpu; 1349 int cur_cpu;
1350 int vector;
1351 unsigned long mmr;
835 1352
836 if (!is_uv_system()) 1353 if (!is_uv_system())
837 return 0; 1354 return 0;
838 1355
1356 if (nobau)
1357 return 0;
1358
839 for_each_possible_cpu(cur_cpu) 1359 for_each_possible_cpu(cur_cpu)
840 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 1360 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
841 GFP_KERNEL, cpu_to_node(cur_cpu)); 1361 GFP_KERNEL, cpu_to_node(cur_cpu));
842 1362
843 uv_bau_retry_limit = 1; 1363 uv_bau_max_concurrent = MAX_BAU_CONCURRENT;
1364 uv_nshift = uv_hub_info->m_val;
844 uv_mmask = (1UL << uv_hub_info->m_val) - 1; 1365 uv_mmask = (1UL << uv_hub_info->m_val) - 1;
845 nblades = uv_num_possible_blades(); 1366 nuvhubs = uv_num_possible_blades();
846 1367
847 uv_bau_table_bases = (struct bau_control **) 1368 uv_init_per_cpu(nuvhubs);
848 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
849 BUG_ON(!uv_bau_table_bases);
850 1369
851 uv_partition_base_pnode = 0x7fffffff; 1370 uv_partition_base_pnode = 0x7fffffff;
852 for (blade = 0; blade < nblades; blade++) 1371 for (uvhub = 0; uvhub < nuvhubs; uvhub++)
853 if (uv_blade_nr_possible_cpus(blade) && 1372 if (uv_blade_nr_possible_cpus(uvhub) &&
854 (uv_blade_to_pnode(blade) < uv_partition_base_pnode)) 1373 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
855 uv_partition_base_pnode = uv_blade_to_pnode(blade); 1374 uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
856 for (blade = 0; blade < nblades; blade++) 1375
857 if (uv_blade_nr_possible_cpus(blade)) 1376 vector = UV_BAU_MESSAGE;
858 uv_init_blade(blade); 1377 for_each_possible_blade(uvhub)
859 1378 if (uv_blade_nr_possible_cpus(uvhub))
860 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 1379 uv_init_uvhub(uvhub, vector);
1380
861 uv_enable_timeouts(); 1381 uv_enable_timeouts();
1382 alloc_intr_gate(vector, uv_bau_message_intr1);
1383
1384 for_each_possible_blade(uvhub) {
1385 pnode = uv_blade_to_pnode(uvhub);
1386 /* INIT the bau */
1387 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL,
1388 ((unsigned long)1 << 63));
1389 mmr = 1; /* should be 1 to broadcast to both sockets */
1390 uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr);
1391 }
862 1392
863 return 0; 1393 return 0;
864} 1394}
865__initcall(uv_bau_init); 1395core_initcall(uv_bau_init);
866__initcall(uv_ptc_init); 1396core_initcall(uv_ptc_init);