aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tlb_uv.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-18 12:46:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-18 12:46:35 -0400
commit537b60d17894b7c19a6060feae40299d7109d6e7 (patch)
tree11a30267b4ecb7175d02215a995c8b6461304b9c /arch/x86/kernel/tlb_uv.c
parent3ae684e1c48e6deedc9b9faff8fa1c391ca8a652 (diff)
parenta289cc7c70da784a2d370b91885cab4f966dcb0f (diff)
Merge branch 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, UV: uv_irq.c: Fix all sparse warnings x86, UV: Improve BAU performance and error recovery x86, UV: Delete unneeded boot messages x86, UV: Clean up UV headers for MMR definitions
Diffstat (limited to 'arch/x86/kernel/tlb_uv.c')
-rw-r--r--arch/x86/kernel/tlb_uv.c1280
1 files changed, 906 insertions, 374 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 17b03dd3a6b5..7fea555929e2 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SGI UltraViolet TLB flush routines. 2 * SGI UltraViolet TLB flush routines.
3 * 3 *
4 * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI. 4 * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
5 * 5 *
6 * This code is released under the GNU General Public License version 2 or 6 * This code is released under the GNU General Public License version 2 or
7 * later. 7 * later.
@@ -20,42 +20,67 @@
20#include <asm/idle.h> 20#include <asm/idle.h>
21#include <asm/tsc.h> 21#include <asm/tsc.h>
22#include <asm/irq_vectors.h> 22#include <asm/irq_vectors.h>
23#include <asm/timer.h>
23 24
24static struct bau_control **uv_bau_table_bases __read_mostly; 25struct msg_desc {
25static int uv_bau_retry_limit __read_mostly; 26 struct bau_payload_queue_entry *msg;
27 int msg_slot;
28 int sw_ack_slot;
29 struct bau_payload_queue_entry *va_queue_first;
30 struct bau_payload_queue_entry *va_queue_last;
31};
26 32
27/* base pnode in this partition */ 33#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
28static int uv_partition_base_pnode __read_mostly; 34
35static int uv_bau_max_concurrent __read_mostly;
36
37static int nobau;
38static int __init setup_nobau(char *arg)
39{
40 nobau = 1;
41 return 0;
42}
43early_param("nobau", setup_nobau);
29 44
30static unsigned long uv_mmask __read_mostly; 45/* base pnode in this partition */
46static int uv_partition_base_pnode __read_mostly;
47/* position of pnode (which is nasid>>1): */
48static int uv_nshift __read_mostly;
49static unsigned long uv_mmask __read_mostly;
31 50
32static DEFINE_PER_CPU(struct ptc_stats, ptcstats); 51static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
33static DEFINE_PER_CPU(struct bau_control, bau_control); 52static DEFINE_PER_CPU(struct bau_control, bau_control);
53static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
54
55struct reset_args {
56 int sender;
57};
34 58
35/* 59/*
36 * Determine the first node on a blade. 60 * Determine the first node on a uvhub. 'Nodes' are used for kernel
61 * memory allocation.
37 */ 62 */
38static int __init blade_to_first_node(int blade) 63static int __init uvhub_to_first_node(int uvhub)
39{ 64{
40 int node, b; 65 int node, b;
41 66
42 for_each_online_node(node) { 67 for_each_online_node(node) {
43 b = uv_node_to_blade_id(node); 68 b = uv_node_to_blade_id(node);
44 if (blade == b) 69 if (uvhub == b)
45 return node; 70 return node;
46 } 71 }
47 return -1; /* shouldn't happen */ 72 return -1;
48} 73}
49 74
50/* 75/*
51 * Determine the apicid of the first cpu on a blade. 76 * Determine the apicid of the first cpu on a uvhub.
52 */ 77 */
53static int __init blade_to_first_apicid(int blade) 78static int __init uvhub_to_first_apicid(int uvhub)
54{ 79{
55 int cpu; 80 int cpu;
56 81
57 for_each_present_cpu(cpu) 82 for_each_present_cpu(cpu)
58 if (blade == uv_cpu_to_blade_id(cpu)) 83 if (uvhub == uv_cpu_to_blade_id(cpu))
59 return per_cpu(x86_cpu_to_apicid, cpu); 84 return per_cpu(x86_cpu_to_apicid, cpu);
60 return -1; 85 return -1;
61} 86}
@@ -68,195 +93,459 @@ static int __init blade_to_first_apicid(int blade)
68 * clear of the Timeout bit (as well) will free the resource. No reply will 93 * clear of the Timeout bit (as well) will free the resource. No reply will
69 * be sent (the hardware will only do one reply per message). 94 * be sent (the hardware will only do one reply per message).
70 */ 95 */
71static void uv_reply_to_message(int resource, 96static inline void uv_reply_to_message(struct msg_desc *mdp,
72 struct bau_payload_queue_entry *msg, 97 struct bau_control *bcp)
73 struct bau_msg_status *msp)
74{ 98{
75 unsigned long dw; 99 unsigned long dw;
100 struct bau_payload_queue_entry *msg;
76 101
77 dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource); 102 msg = mdp->msg;
103 if (!msg->canceled) {
104 dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
105 msg->sw_ack_vector;
106 uv_write_local_mmr(
107 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
108 }
78 msg->replied_to = 1; 109 msg->replied_to = 1;
79 msg->sw_ack_vector = 0; 110 msg->sw_ack_vector = 0;
80 if (msp)
81 msp->seen_by.bits = 0;
82 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
83} 111}
84 112
85/* 113/*
86 * Do all the things a cpu should do for a TLB shootdown message. 114 * Process the receipt of a RETRY message
87 * Other cpu's may come here at the same time for this message.
88 */ 115 */
89static void uv_bau_process_message(struct bau_payload_queue_entry *msg, 116static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
90 int msg_slot, int sw_ack_slot) 117 struct bau_control *bcp)
91{ 118{
92 unsigned long this_cpu_mask; 119 int i;
93 struct bau_msg_status *msp; 120 int cancel_count = 0;
94 int cpu; 121 int slot2;
122 unsigned long msg_res;
123 unsigned long mmr = 0;
124 struct bau_payload_queue_entry *msg;
125 struct bau_payload_queue_entry *msg2;
126 struct ptc_stats *stat;
95 127
96 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; 128 msg = mdp->msg;
97 cpu = uv_blade_processor_id(); 129 stat = &per_cpu(ptcstats, bcp->cpu);
98 msg->number_of_cpus = 130 stat->d_retries++;
99 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); 131 /*
100 this_cpu_mask = 1UL << cpu; 132 * cancel any message from msg+1 to the retry itself
101 if (msp->seen_by.bits & this_cpu_mask) 133 */
102 return; 134 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
103 atomic_or_long(&msp->seen_by.bits, this_cpu_mask); 135 if (msg2 > mdp->va_queue_last)
136 msg2 = mdp->va_queue_first;
137 if (msg2 == msg)
138 break;
139
140 /* same conditions for cancellation as uv_do_reset */
141 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
142 (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
143 msg->sw_ack_vector) == 0) &&
144 (msg2->sending_cpu == msg->sending_cpu) &&
145 (msg2->msg_type != MSG_NOOP)) {
146 slot2 = msg2 - mdp->va_queue_first;
147 mmr = uv_read_local_mmr
148 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
149 msg_res = ((msg2->sw_ack_vector << 8) |
150 msg2->sw_ack_vector);
151 /*
152 * This is a message retry; clear the resources held
153 * by the previous message only if they timed out.
154 * If it has not timed out we have an unexpected
155 * situation to report.
156 */
157 if (mmr & (msg_res << 8)) {
158 /*
159 * is the resource timed out?
160 * make everyone ignore the cancelled message.
161 */
162 msg2->canceled = 1;
163 stat->d_canceled++;
164 cancel_count++;
165 uv_write_local_mmr(
166 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
167 (msg_res << 8) | msg_res);
168 } else
169 printk(KERN_INFO "note bau retry: no effect\n");
170 }
171 }
172 if (!cancel_count)
173 stat->d_nocanceled++;
174}
104 175
105 if (msg->replied_to == 1) 176/*
106 return; 177 * Do all the things a cpu should do for a TLB shootdown message.
178 * Other cpu's may come here at the same time for this message.
179 */
180static void uv_bau_process_message(struct msg_desc *mdp,
181 struct bau_control *bcp)
182{
183 int msg_ack_count;
184 short socket_ack_count = 0;
185 struct ptc_stats *stat;
186 struct bau_payload_queue_entry *msg;
187 struct bau_control *smaster = bcp->socket_master;
107 188
189 /*
190 * This must be a normal message, or retry of a normal message
191 */
192 msg = mdp->msg;
193 stat = &per_cpu(ptcstats, bcp->cpu);
108 if (msg->address == TLB_FLUSH_ALL) { 194 if (msg->address == TLB_FLUSH_ALL) {
109 local_flush_tlb(); 195 local_flush_tlb();
110 __get_cpu_var(ptcstats).alltlb++; 196 stat->d_alltlb++;
111 } else { 197 } else {
112 __flush_tlb_one(msg->address); 198 __flush_tlb_one(msg->address);
113 __get_cpu_var(ptcstats).onetlb++; 199 stat->d_onetlb++;
114 } 200 }
201 stat->d_requestee++;
202
203 /*
204 * One cpu on each uvhub has the additional job on a RETRY
205 * of releasing the resource held by the message that is
206 * being retried. That message is identified by sending
207 * cpu number.
208 */
209 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
210 uv_bau_process_retry_msg(mdp, bcp);
115 211
116 __get_cpu_var(ptcstats).requestee++; 212 /*
213 * This is a sw_ack message, so we have to reply to it.
214 * Count each responding cpu on the socket. This avoids
215 * pinging the count's cache line back and forth between
216 * the sockets.
217 */
218 socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
219 &smaster->socket_acknowledge_count[mdp->msg_slot]);
220 if (socket_ack_count == bcp->cpus_in_socket) {
221 /*
222 * Both sockets dump their completed count total into
223 * the message's count.
224 */
225 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
226 msg_ack_count = atomic_add_short_return(socket_ack_count,
227 (struct atomic_short *)&msg->acknowledge_count);
228
229 if (msg_ack_count == bcp->cpus_in_uvhub) {
230 /*
231 * All cpus in uvhub saw it; reply
232 */
233 uv_reply_to_message(mdp, bcp);
234 }
235 }
117 236
118 atomic_inc_short(&msg->acknowledge_count); 237 return;
119 if (msg->number_of_cpus == msg->acknowledge_count)
120 uv_reply_to_message(sw_ack_slot, msg, msp);
121} 238}
122 239
123/* 240/*
124 * Examine the payload queue on one distribution node to see 241 * Determine the first cpu on a uvhub.
125 * which messages have not been seen, and which cpu(s) have not seen them. 242 */
243static int uvhub_to_first_cpu(int uvhub)
244{
245 int cpu;
246 for_each_present_cpu(cpu)
247 if (uvhub == uv_cpu_to_blade_id(cpu))
248 return cpu;
249 return -1;
250}
251
252/*
253 * Last resort when we get a large number of destination timeouts is
254 * to clear resources held by a given cpu.
255 * Do this with IPI so that all messages in the BAU message queue
256 * can be identified by their nonzero sw_ack_vector field.
126 * 257 *
127 * Returns the number of cpu's that have not responded. 258 * This is entered for a single cpu on the uvhub.
259 * The sender want's this uvhub to free a specific message's
260 * sw_ack resources.
128 */ 261 */
129static int uv_examine_destination(struct bau_control *bau_tablesp, int sender) 262static void
263uv_do_reset(void *ptr)
130{ 264{
131 struct bau_payload_queue_entry *msg;
132 struct bau_msg_status *msp;
133 int count = 0;
134 int i; 265 int i;
135 int j; 266 int slot;
267 int count = 0;
268 unsigned long mmr;
269 unsigned long msg_res;
270 struct bau_control *bcp;
271 struct reset_args *rap;
272 struct bau_payload_queue_entry *msg;
273 struct ptc_stats *stat;
136 274
137 for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE; 275 bcp = &per_cpu(bau_control, smp_processor_id());
138 msg++, i++) { 276 rap = (struct reset_args *)ptr;
139 if ((msg->sending_cpu == sender) && (!msg->replied_to)) { 277 stat = &per_cpu(ptcstats, bcp->cpu);
140 msp = bau_tablesp->msg_statuses + i; 278 stat->d_resets++;
141 printk(KERN_DEBUG 279
142 "blade %d: address:%#lx %d of %d, not cpu(s): ", 280 /*
143 i, msg->address, msg->acknowledge_count, 281 * We're looking for the given sender, and
144 msg->number_of_cpus); 282 * will free its sw_ack resource.
145 for (j = 0; j < msg->number_of_cpus; j++) { 283 * If all cpu's finally responded after the timeout, its
146 if (!((1L << j) & msp->seen_by.bits)) { 284 * message 'replied_to' was set.
147 count++; 285 */
148 printk("%d ", j); 286 for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
149 } 287 /* uv_do_reset: same conditions for cancellation as
288 uv_bau_process_retry_msg() */
289 if ((msg->replied_to == 0) &&
290 (msg->canceled == 0) &&
291 (msg->sending_cpu == rap->sender) &&
292 (msg->sw_ack_vector) &&
293 (msg->msg_type != MSG_NOOP)) {
294 /*
295 * make everyone else ignore this message
296 */
297 msg->canceled = 1;
298 slot = msg - bcp->va_queue_first;
299 count++;
300 /*
301 * only reset the resource if it is still pending
302 */
303 mmr = uv_read_local_mmr
304 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
305 msg_res = ((msg->sw_ack_vector << 8) |
306 msg->sw_ack_vector);
307 if (mmr & msg_res) {
308 stat->d_rcanceled++;
309 uv_write_local_mmr(
310 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
311 msg_res);
150 } 312 }
151 printk("\n");
152 } 313 }
153 } 314 }
154 return count; 315 return;
155} 316}
156 317
157/* 318/*
158 * Examine the payload queue on all the distribution nodes to see 319 * Use IPI to get all target uvhubs to release resources held by
159 * which messages have not been seen, and which cpu(s) have not seen them. 320 * a given sending cpu number.
160 *
161 * Returns the number of cpu's that have not responded.
162 */ 321 */
163static int uv_examine_destinations(struct bau_target_nodemask *distribution) 322static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
323 int sender)
164{ 324{
165 int sender; 325 int uvhub;
166 int i; 326 int cpu;
167 int count = 0; 327 cpumask_t mask;
328 struct reset_args reset_args;
329
330 reset_args.sender = sender;
168 331
169 sender = smp_processor_id(); 332 cpus_clear(mask);
170 for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) { 333 /* find a single cpu for each uvhub in this distribution mask */
171 if (!bau_node_isset(i, distribution)) 334 for (uvhub = 0;
335 uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
336 uvhub++) {
337 if (!bau_uvhub_isset(uvhub, distribution))
172 continue; 338 continue;
173 count += uv_examine_destination(uv_bau_table_bases[i], sender); 339 /* find a cpu for this uvhub */
340 cpu = uvhub_to_first_cpu(uvhub);
341 cpu_set(cpu, mask);
174 } 342 }
175 return count; 343 /* IPI all cpus; Preemption is already disabled */
344 smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
345 return;
346}
347
348static inline unsigned long
349cycles_2_us(unsigned long long cyc)
350{
351 unsigned long long ns;
352 unsigned long us;
353 ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
354 >> CYC2NS_SCALE_FACTOR;
355 us = ns / 1000;
356 return us;
176} 357}
177 358
178/* 359/*
179 * wait for completion of a broadcast message 360 * wait for all cpus on this hub to finish their sends and go quiet
180 * 361 * leaves uvhub_quiesce set so that no new broadcasts are started by
181 * return COMPLETE, RETRY or GIVEUP 362 * bau_flush_send_and_wait()
363 */
364static inline void
365quiesce_local_uvhub(struct bau_control *hmaster)
366{
367 atomic_add_short_return(1, (struct atomic_short *)
368 &hmaster->uvhub_quiesce);
369}
370
371/*
372 * mark this quiet-requestor as done
373 */
374static inline void
375end_uvhub_quiesce(struct bau_control *hmaster)
376{
377 atomic_add_short_return(-1, (struct atomic_short *)
378 &hmaster->uvhub_quiesce);
379}
380
381/*
382 * Wait for completion of a broadcast software ack message
383 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
182 */ 384 */
183static int uv_wait_completion(struct bau_desc *bau_desc, 385static int uv_wait_completion(struct bau_desc *bau_desc,
184 unsigned long mmr_offset, int right_shift) 386 unsigned long mmr_offset, int right_shift, int this_cpu,
387 struct bau_control *bcp, struct bau_control *smaster, long try)
185{ 388{
186 int exams = 0; 389 int relaxes = 0;
187 long destination_timeouts = 0;
188 long source_timeouts = 0;
189 unsigned long descriptor_status; 390 unsigned long descriptor_status;
391 unsigned long mmr;
392 unsigned long mask;
393 cycles_t ttime;
394 cycles_t timeout_time;
395 struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu);
396 struct bau_control *hmaster;
397
398 hmaster = bcp->uvhub_master;
399 timeout_time = get_cycles() + bcp->timeout_interval;
190 400
401 /* spin on the status MMR, waiting for it to go idle */
191 while ((descriptor_status = (((unsigned long) 402 while ((descriptor_status = (((unsigned long)
192 uv_read_local_mmr(mmr_offset) >> 403 uv_read_local_mmr(mmr_offset) >>
193 right_shift) & UV_ACT_STATUS_MASK)) != 404 right_shift) & UV_ACT_STATUS_MASK)) !=
194 DESC_STATUS_IDLE) { 405 DESC_STATUS_IDLE) {
195 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
196 source_timeouts++;
197 if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
198 source_timeouts = 0;
199 __get_cpu_var(ptcstats).s_retry++;
200 return FLUSH_RETRY;
201 }
202 /* 406 /*
203 * spin here looking for progress at the destinations 407 * Our software ack messages may be blocked because there are
408 * no swack resources available. As long as none of them
409 * has timed out hardware will NACK our message and its
410 * state will stay IDLE.
204 */ 411 */
205 if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) { 412 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
206 destination_timeouts++; 413 stat->s_stimeout++;
207 if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) { 414 return FLUSH_GIVEUP;
208 /* 415 } else if (descriptor_status ==
209 * returns number of cpus not responding 416 DESC_STATUS_DESTINATION_TIMEOUT) {
210 */ 417 stat->s_dtimeout++;
211 if (uv_examine_destinations 418 ttime = get_cycles();
212 (&bau_desc->distribution) == 0) { 419
213 __get_cpu_var(ptcstats).d_retry++; 420 /*
214 return FLUSH_RETRY; 421 * Our retries may be blocked by all destination
215 } 422 * swack resources being consumed, and a timeout
216 exams++; 423 * pending. In that case hardware returns the
217 if (exams >= uv_bau_retry_limit) { 424 * ERROR that looks like a destination timeout.
218 printk(KERN_DEBUG 425 */
219 "uv_flush_tlb_others"); 426 if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) {
220 printk("giving up on cpu %d\n", 427 bcp->conseccompletes = 0;
221 smp_processor_id()); 428 return FLUSH_RETRY_PLUGGED;
429 }
430
431 bcp->conseccompletes = 0;
432 return FLUSH_RETRY_TIMEOUT;
433 } else {
434 /*
435 * descriptor_status is still BUSY
436 */
437 cpu_relax();
438 relaxes++;
439 if (relaxes >= 10000) {
440 relaxes = 0;
441 if (get_cycles() > timeout_time) {
442 quiesce_local_uvhub(hmaster);
443
444 /* single-thread the register change */
445 spin_lock(&hmaster->masks_lock);
446 mmr = uv_read_local_mmr(mmr_offset);
447 mask = 0UL;
448 mask |= (3UL < right_shift);
449 mask = ~mask;
450 mmr &= mask;
451 uv_write_local_mmr(mmr_offset, mmr);
452 spin_unlock(&hmaster->masks_lock);
453 end_uvhub_quiesce(hmaster);
454 stat->s_busy++;
222 return FLUSH_GIVEUP; 455 return FLUSH_GIVEUP;
223 } 456 }
224 /*
225 * delays can hang the simulator
226 udelay(1000);
227 */
228 destination_timeouts = 0;
229 } 457 }
230 } 458 }
231 cpu_relax();
232 } 459 }
460 bcp->conseccompletes++;
233 return FLUSH_COMPLETE; 461 return FLUSH_COMPLETE;
234} 462}
235 463
464static inline cycles_t
465sec_2_cycles(unsigned long sec)
466{
467 unsigned long ns;
468 cycles_t cyc;
469
470 ns = sec * 1000000000;
471 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
472 return cyc;
473}
474
475/*
476 * conditionally add 1 to *v, unless *v is >= u
477 * return 0 if we cannot add 1 to *v because it is >= u
478 * return 1 if we can add 1 to *v because it is < u
479 * the add is atomic
480 *
481 * This is close to atomic_add_unless(), but this allows the 'u' value
482 * to be lowered below the current 'v'. atomic_add_unless can only stop
483 * on equal.
484 */
485static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
486{
487 spin_lock(lock);
488 if (atomic_read(v) >= u) {
489 spin_unlock(lock);
490 return 0;
491 }
492 atomic_inc(v);
493 spin_unlock(lock);
494 return 1;
495}
496
236/** 497/**
237 * uv_flush_send_and_wait 498 * uv_flush_send_and_wait
238 * 499 *
239 * Send a broadcast and wait for a broadcast message to complete. 500 * Send a broadcast and wait for it to complete.
240 * 501 *
241 * The flush_mask contains the cpus the broadcast was sent to. 502 * The flush_mask contains the cpus the broadcast is to be sent to, plus
503 * cpus that are on the local uvhub.
242 * 504 *
243 * Returns NULL if all remote flushing was done. The mask is zeroed. 505 * Returns NULL if all flushing represented in the mask was done. The mask
506 * is zeroed.
244 * Returns @flush_mask if some remote flushing remains to be done. The 507 * Returns @flush_mask if some remote flushing remains to be done. The
245 * mask will have some bits still set. 508 * mask will have some bits still set, representing any cpus on the local
509 * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
246 */ 510 */
247const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, 511const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
248 struct bau_desc *bau_desc, 512 struct cpumask *flush_mask,
249 struct cpumask *flush_mask) 513 struct bau_control *bcp)
250{ 514{
251 int completion_status = 0;
252 int right_shift; 515 int right_shift;
253 int tries = 0; 516 int uvhub;
254 int pnode;
255 int bit; 517 int bit;
518 int completion_status = 0;
519 int seq_number = 0;
520 long try = 0;
521 int cpu = bcp->uvhub_cpu;
522 int this_cpu = bcp->cpu;
523 int this_uvhub = bcp->uvhub;
256 unsigned long mmr_offset; 524 unsigned long mmr_offset;
257 unsigned long index; 525 unsigned long index;
258 cycles_t time1; 526 cycles_t time1;
259 cycles_t time2; 527 cycles_t time2;
528 struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu);
529 struct bau_control *smaster = bcp->socket_master;
530 struct bau_control *hmaster = bcp->uvhub_master;
531
532 /*
533 * Spin here while there are hmaster->max_concurrent or more active
534 * descriptors. This is the per-uvhub 'throttle'.
535 */
536 if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
537 &hmaster->active_descriptor_count,
538 hmaster->max_concurrent)) {
539 stat->s_throttles++;
540 do {
541 cpu_relax();
542 } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
543 &hmaster->active_descriptor_count,
544 hmaster->max_concurrent));
545 }
546
547 while (hmaster->uvhub_quiesce)
548 cpu_relax();
260 549
261 if (cpu < UV_CPUS_PER_ACT_STATUS) { 550 if (cpu < UV_CPUS_PER_ACT_STATUS) {
262 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; 551 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
@@ -268,24 +557,108 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
268 } 557 }
269 time1 = get_cycles(); 558 time1 = get_cycles();
270 do { 559 do {
271 tries++; 560 /*
561 * Every message from any given cpu gets a unique message
562 * sequence number. But retries use that same number.
563 * Our message may have timed out at the destination because
564 * all sw-ack resources are in use and there is a timeout
565 * pending there. In that case, our last send never got
566 * placed into the queue and we need to persist until it
567 * does.
568 *
569 * Make any retry a type MSG_RETRY so that the destination will
570 * free any resource held by a previous message from this cpu.
571 */
572 if (try == 0) {
573 /* use message type set by the caller the first time */
574 seq_number = bcp->message_number++;
575 } else {
576 /* use RETRY type on all the rest; same sequence */
577 bau_desc->header.msg_type = MSG_RETRY;
578 stat->s_retry_messages++;
579 }
580 bau_desc->header.sequence = seq_number;
272 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | 581 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
273 cpu; 582 bcp->uvhub_cpu;
583 bcp->send_message = get_cycles();
584
274 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); 585 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
586
587 try++;
275 completion_status = uv_wait_completion(bau_desc, mmr_offset, 588 completion_status = uv_wait_completion(bau_desc, mmr_offset,
276 right_shift); 589 right_shift, this_cpu, bcp, smaster, try);
277 } while (completion_status == FLUSH_RETRY); 590
591 if (completion_status == FLUSH_RETRY_PLUGGED) {
592 /*
593 * Our retries may be blocked by all destination swack
594 * resources being consumed, and a timeout pending. In
595 * that case hardware immediately returns the ERROR
596 * that looks like a destination timeout.
597 */
598 udelay(TIMEOUT_DELAY);
599 bcp->plugged_tries++;
600 if (bcp->plugged_tries >= PLUGSB4RESET) {
601 bcp->plugged_tries = 0;
602 quiesce_local_uvhub(hmaster);
603 spin_lock(&hmaster->queue_lock);
604 uv_reset_with_ipi(&bau_desc->distribution,
605 this_cpu);
606 spin_unlock(&hmaster->queue_lock);
607 end_uvhub_quiesce(hmaster);
608 bcp->ipi_attempts++;
609 stat->s_resets_plug++;
610 }
611 } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
612 hmaster->max_concurrent = 1;
613 bcp->timeout_tries++;
614 udelay(TIMEOUT_DELAY);
615 if (bcp->timeout_tries >= TIMEOUTSB4RESET) {
616 bcp->timeout_tries = 0;
617 quiesce_local_uvhub(hmaster);
618 spin_lock(&hmaster->queue_lock);
619 uv_reset_with_ipi(&bau_desc->distribution,
620 this_cpu);
621 spin_unlock(&hmaster->queue_lock);
622 end_uvhub_quiesce(hmaster);
623 bcp->ipi_attempts++;
624 stat->s_resets_timeout++;
625 }
626 }
627 if (bcp->ipi_attempts >= 3) {
628 bcp->ipi_attempts = 0;
629 completion_status = FLUSH_GIVEUP;
630 break;
631 }
632 cpu_relax();
633 } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
634 (completion_status == FLUSH_RETRY_TIMEOUT));
278 time2 = get_cycles(); 635 time2 = get_cycles();
279 __get_cpu_var(ptcstats).sflush += (time2 - time1);
280 if (tries > 1)
281 __get_cpu_var(ptcstats).retriesok++;
282 636
283 if (completion_status == FLUSH_GIVEUP) { 637 if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5)
638 && (hmaster->max_concurrent < hmaster->max_concurrent_constant))
639 hmaster->max_concurrent++;
640
641 /*
642 * hold any cpu not timing out here; no other cpu currently held by
643 * the 'throttle' should enter the activation code
644 */
645 while (hmaster->uvhub_quiesce)
646 cpu_relax();
647 atomic_dec(&hmaster->active_descriptor_count);
648
649 /* guard against cycles wrap */
650 if (time2 > time1)
651 stat->s_time += (time2 - time1);
652 else
653 stat->s_requestor--; /* don't count this one */
654 if (completion_status == FLUSH_COMPLETE && try > 1)
655 stat->s_retriesok++;
656 else if (completion_status == FLUSH_GIVEUP) {
284 /* 657 /*
285 * Cause the caller to do an IPI-style TLB shootdown on 658 * Cause the caller to do an IPI-style TLB shootdown on
286 * the cpu's, all of which are still in the mask. 659 * the target cpu's, all of which are still in the mask.
287 */ 660 */
288 __get_cpu_var(ptcstats).ptc_i++; 661 stat->s_giveup++;
289 return flush_mask; 662 return flush_mask;
290 } 663 }
291 664
@@ -294,18 +667,17 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
294 * use the IPI method of shootdown on them. 667 * use the IPI method of shootdown on them.
295 */ 668 */
296 for_each_cpu(bit, flush_mask) { 669 for_each_cpu(bit, flush_mask) {
297 pnode = uv_cpu_to_pnode(bit); 670 uvhub = uv_cpu_to_blade_id(bit);
298 if (pnode == this_pnode) 671 if (uvhub == this_uvhub)
299 continue; 672 continue;
300 cpumask_clear_cpu(bit, flush_mask); 673 cpumask_clear_cpu(bit, flush_mask);
301 } 674 }
302 if (!cpumask_empty(flush_mask)) 675 if (!cpumask_empty(flush_mask))
303 return flush_mask; 676 return flush_mask;
677
304 return NULL; 678 return NULL;
305} 679}
306 680
307static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
308
309/** 681/**
310 * uv_flush_tlb_others - globally purge translation cache of a virtual 682 * uv_flush_tlb_others - globally purge translation cache of a virtual
311 * address or all TLB's 683 * address or all TLB's
@@ -322,8 +694,8 @@ static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
322 * The caller has derived the cpumask from the mm_struct. This function 694 * The caller has derived the cpumask from the mm_struct. This function
323 * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) 695 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
324 * 696 *
325 * The cpumask is converted into a nodemask of the nodes containing 697 * The cpumask is converted into a uvhubmask of the uvhubs containing
326 * the cpus. 698 * those cpus.
327 * 699 *
328 * Note that this function should be called with preemption disabled. 700 * Note that this function should be called with preemption disabled.
329 * 701 *
@@ -335,52 +707,82 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
335 struct mm_struct *mm, 707 struct mm_struct *mm,
336 unsigned long va, unsigned int cpu) 708 unsigned long va, unsigned int cpu)
337{ 709{
338 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); 710 int remotes;
339 int i; 711 int tcpu;
340 int bit; 712 int uvhub;
341 int pnode;
342 int uv_cpu;
343 int this_pnode;
344 int locals = 0; 713 int locals = 0;
345 struct bau_desc *bau_desc; 714 struct bau_desc *bau_desc;
715 struct cpumask *flush_mask;
716 struct ptc_stats *stat;
717 struct bau_control *bcp;
346 718
347 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 719 if (nobau)
720 return cpumask;
348 721
349 uv_cpu = uv_blade_processor_id(); 722 bcp = &per_cpu(bau_control, cpu);
350 this_pnode = uv_hub_info->pnode; 723 /*
351 bau_desc = __get_cpu_var(bau_control).descriptor_base; 724 * Each sending cpu has a per-cpu mask which it fills from the caller's
352 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; 725 * cpu mask. Only remote cpus are converted to uvhubs and copied.
726 */
727 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
728 /*
729 * copy cpumask to flush_mask, removing current cpu
730 * (current cpu should already have been flushed by the caller and
731 * should never be returned if we return flush_mask)
732 */
733 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
734 if (cpu_isset(cpu, *cpumask))
735 locals++; /* current cpu was targeted */
353 736
354 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 737 bau_desc = bcp->descriptor_base;
738 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
355 739
356 i = 0; 740 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
357 for_each_cpu(bit, flush_mask) { 741 remotes = 0;
358 pnode = uv_cpu_to_pnode(bit); 742 for_each_cpu(tcpu, flush_mask) {
359 BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1)); 743 uvhub = uv_cpu_to_blade_id(tcpu);
360 if (pnode == this_pnode) { 744 if (uvhub == bcp->uvhub) {
361 locals++; 745 locals++;
362 continue; 746 continue;
363 } 747 }
364 bau_node_set(pnode - uv_partition_base_pnode, 748 bau_uvhub_set(uvhub, &bau_desc->distribution);
365 &bau_desc->distribution); 749 remotes++;
366 i++;
367 } 750 }
368 if (i == 0) { 751 if (remotes == 0) {
369 /* 752 /*
370 * no off_node flushing; return status for local node 753 * No off_hub flushing; return status for local hub.
754 * Return the caller's mask if all were local (the current
755 * cpu may be in that mask).
371 */ 756 */
372 if (locals) 757 if (locals)
373 return flush_mask; 758 return cpumask;
374 else 759 else
375 return NULL; 760 return NULL;
376 } 761 }
377 __get_cpu_var(ptcstats).requestor++; 762 stat = &per_cpu(ptcstats, cpu);
378 __get_cpu_var(ptcstats).ntargeted += i; 763 stat->s_requestor++;
764 stat->s_ntargcpu += remotes;
765 remotes = bau_uvhub_weight(&bau_desc->distribution);
766 stat->s_ntarguvhub += remotes;
767 if (remotes >= 16)
768 stat->s_ntarguvhub16++;
769 else if (remotes >= 8)
770 stat->s_ntarguvhub8++;
771 else if (remotes >= 4)
772 stat->s_ntarguvhub4++;
773 else if (remotes >= 2)
774 stat->s_ntarguvhub2++;
775 else
776 stat->s_ntarguvhub1++;
379 777
380 bau_desc->payload.address = va; 778 bau_desc->payload.address = va;
381 bau_desc->payload.sending_cpu = cpu; 779 bau_desc->payload.sending_cpu = cpu;
382 780
383 return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask); 781 /*
782 * uv_flush_send_and_wait returns null if all cpu's were messaged, or
783 * the adjusted flush_mask if any cpu's were not messaged.
784 */
785 return uv_flush_send_and_wait(bau_desc, flush_mask, bcp);
384} 786}
385 787
386/* 788/*
@@ -389,87 +791,70 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
389 * 791 *
390 * We received a broadcast assist message. 792 * We received a broadcast assist message.
391 * 793 *
392 * Interrupts may have been disabled; this interrupt could represent 794 * Interrupts are disabled; this interrupt could represent
393 * the receipt of several messages. 795 * the receipt of several messages.
394 * 796 *
395 * All cores/threads on this node get this interrupt. 797 * All cores/threads on this hub get this interrupt.
396 * The last one to see it does the s/w ack. 798 * The last one to see it does the software ack.
397 * (the resource will not be freed until noninterruptable cpus see this 799 * (the resource will not be freed until noninterruptable cpus see this
398 * interrupt; hardware will timeout the s/w ack and reply ERROR) 800 * interrupt; hardware may timeout the s/w ack and reply ERROR)
399 */ 801 */
400void uv_bau_message_interrupt(struct pt_regs *regs) 802void uv_bau_message_interrupt(struct pt_regs *regs)
401{ 803{
402 struct bau_payload_queue_entry *va_queue_first;
403 struct bau_payload_queue_entry *va_queue_last;
404 struct bau_payload_queue_entry *msg;
405 struct pt_regs *old_regs = set_irq_regs(regs);
406 cycles_t time1;
407 cycles_t time2;
408 int msg_slot;
409 int sw_ack_slot;
410 int fw;
411 int count = 0; 804 int count = 0;
412 unsigned long local_pnode; 805 cycles_t time_start;
413 806 struct bau_payload_queue_entry *msg;
414 ack_APIC_irq(); 807 struct bau_control *bcp;
415 exit_idle(); 808 struct ptc_stats *stat;
416 irq_enter(); 809 struct msg_desc msgdesc;
417 810
418 time1 = get_cycles(); 811 time_start = get_cycles();
419 812 bcp = &per_cpu(bau_control, smp_processor_id());
420 local_pnode = uv_blade_to_pnode(uv_numa_blade_id()); 813 stat = &per_cpu(ptcstats, smp_processor_id());
421 814 msgdesc.va_queue_first = bcp->va_queue_first;
422 va_queue_first = __get_cpu_var(bau_control).va_queue_first; 815 msgdesc.va_queue_last = bcp->va_queue_last;
423 va_queue_last = __get_cpu_var(bau_control).va_queue_last; 816 msg = bcp->bau_msg_head;
424
425 msg = __get_cpu_var(bau_control).bau_msg_head;
426 while (msg->sw_ack_vector) { 817 while (msg->sw_ack_vector) {
427 count++; 818 count++;
428 fw = msg->sw_ack_vector; 819 msgdesc.msg_slot = msg - msgdesc.va_queue_first;
429 msg_slot = msg - va_queue_first; 820 msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
430 sw_ack_slot = ffs(fw) - 1; 821 msgdesc.msg = msg;
431 822 uv_bau_process_message(&msgdesc, bcp);
432 uv_bau_process_message(msg, msg_slot, sw_ack_slot);
433
434 msg++; 823 msg++;
435 if (msg > va_queue_last) 824 if (msg > msgdesc.va_queue_last)
436 msg = va_queue_first; 825 msg = msgdesc.va_queue_first;
437 __get_cpu_var(bau_control).bau_msg_head = msg; 826 bcp->bau_msg_head = msg;
438 } 827 }
828 stat->d_time += (get_cycles() - time_start);
439 if (!count) 829 if (!count)
440 __get_cpu_var(ptcstats).nomsg++; 830 stat->d_nomsg++;
441 else if (count > 1) 831 else if (count > 1)
442 __get_cpu_var(ptcstats).multmsg++; 832 stat->d_multmsg++;
443 833 ack_APIC_irq();
444 time2 = get_cycles();
445 __get_cpu_var(ptcstats).dflush += (time2 - time1);
446
447 irq_exit();
448 set_irq_regs(old_regs);
449} 834}
450 835
451/* 836/*
452 * uv_enable_timeouts 837 * uv_enable_timeouts
453 * 838 *
454 * Each target blade (i.e. blades that have cpu's) needs to have 839 * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
455 * shootdown message timeouts enabled. The timeout does not cause 840 * shootdown message timeouts enabled. The timeout does not cause
456 * an interrupt, but causes an error message to be returned to 841 * an interrupt, but causes an error message to be returned to
457 * the sender. 842 * the sender.
458 */ 843 */
459static void uv_enable_timeouts(void) 844static void uv_enable_timeouts(void)
460{ 845{
461 int blade; 846 int uvhub;
462 int nblades; 847 int nuvhubs;
463 int pnode; 848 int pnode;
464 unsigned long mmr_image; 849 unsigned long mmr_image;
465 850
466 nblades = uv_num_possible_blades(); 851 nuvhubs = uv_num_possible_blades();
467 852
468 for (blade = 0; blade < nblades; blade++) { 853 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
469 if (!uv_blade_nr_possible_cpus(blade)) 854 if (!uv_blade_nr_possible_cpus(uvhub))
470 continue; 855 continue;
471 856
472 pnode = uv_blade_to_pnode(blade); 857 pnode = uv_blade_to_pnode(uvhub);
473 mmr_image = 858 mmr_image =
474 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); 859 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
475 /* 860 /*
@@ -479,16 +864,16 @@ static void uv_enable_timeouts(void)
479 * To program the period, the SOFT_ACK_MODE must be off. 864 * To program the period, the SOFT_ACK_MODE must be off.
480 */ 865 */
481 mmr_image &= ~((unsigned long)1 << 866 mmr_image &= ~((unsigned long)1 <<
482 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); 867 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
483 uv_write_global_mmr64 868 uv_write_global_mmr64
484 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); 869 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
485 /* 870 /*
486 * Set the 4-bit period. 871 * Set the 4-bit period.
487 */ 872 */
488 mmr_image &= ~((unsigned long)0xf << 873 mmr_image &= ~((unsigned long)0xf <<
489 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); 874 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
490 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD << 875 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
491 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); 876 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
492 uv_write_global_mmr64 877 uv_write_global_mmr64
493 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); 878 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
494 /* 879 /*
@@ -497,7 +882,7 @@ static void uv_enable_timeouts(void)
497 * indicated in bits 2:0 (7 causes all of them to timeout). 882 * indicated in bits 2:0 (7 causes all of them to timeout).
498 */ 883 */
499 mmr_image |= ((unsigned long)1 << 884 mmr_image |= ((unsigned long)1 <<
500 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); 885 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
501 uv_write_global_mmr64 886 uv_write_global_mmr64
502 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); 887 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
503 } 888 }
@@ -522,9 +907,20 @@ static void uv_ptc_seq_stop(struct seq_file *file, void *data)
522{ 907{
523} 908}
524 909
910static inline unsigned long long
911millisec_2_cycles(unsigned long millisec)
912{
913 unsigned long ns;
914 unsigned long long cyc;
915
916 ns = millisec * 1000;
917 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
918 return cyc;
919}
920
525/* 921/*
526 * Display the statistics thru /proc 922 * Display the statistics thru /proc.
527 * data points to the cpu number 923 * 'data' points to the cpu number
528 */ 924 */
529static int uv_ptc_seq_show(struct seq_file *file, void *data) 925static int uv_ptc_seq_show(struct seq_file *file, void *data)
530{ 926{
@@ -535,78 +931,155 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
535 931
536 if (!cpu) { 932 if (!cpu) {
537 seq_printf(file, 933 seq_printf(file,
538 "# cpu requestor requestee one all sretry dretry ptc_i "); 934 "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
539 seq_printf(file, 935 seq_printf(file,
540 "sw_ack sflush dflush sok dnomsg dmult starget\n"); 936 "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
937 seq_printf(file,
938 "retries rok resetp resett giveup sto bz throt ");
939 seq_printf(file,
940 "sw_ack recv rtime all ");
941 seq_printf(file,
942 "one mult none retry canc nocan reset rcan\n");
541 } 943 }
542 if (cpu < num_possible_cpus() && cpu_online(cpu)) { 944 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
543 stat = &per_cpu(ptcstats, cpu); 945 stat = &per_cpu(ptcstats, cpu);
544 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ", 946 /* source side statistics */
545 cpu, stat->requestor, 947 seq_printf(file,
546 stat->requestee, stat->onetlb, stat->alltlb, 948 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
547 stat->s_retry, stat->d_retry, stat->ptc_i); 949 cpu, stat->s_requestor, cycles_2_us(stat->s_time),
548 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", 950 stat->s_ntarguvhub, stat->s_ntarguvhub16,
951 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
952 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
953 stat->s_ntargcpu, stat->s_dtimeout);
954 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
955 stat->s_retry_messages, stat->s_retriesok,
956 stat->s_resets_plug, stat->s_resets_timeout,
957 stat->s_giveup, stat->s_stimeout,
958 stat->s_busy, stat->s_throttles);
959 /* destination side statistics */
960 seq_printf(file,
961 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
549 uv_read_global_mmr64(uv_cpu_to_pnode(cpu), 962 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
550 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 963 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
551 stat->sflush, stat->dflush, 964 stat->d_requestee, cycles_2_us(stat->d_time),
552 stat->retriesok, stat->nomsg, 965 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
553 stat->multmsg, stat->ntargeted); 966 stat->d_nomsg, stat->d_retries, stat->d_canceled,
967 stat->d_nocanceled, stat->d_resets,
968 stat->d_rcanceled);
554 } 969 }
555 970
556 return 0; 971 return 0;
557} 972}
558 973
559/* 974/*
975 * -1: resetf the statistics
560 * 0: display meaning of the statistics 976 * 0: display meaning of the statistics
561 * >0: retry limit 977 * >0: maximum concurrent active descriptors per uvhub (throttle)
562 */ 978 */
563static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, 979static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
564 size_t count, loff_t *data) 980 size_t count, loff_t *data)
565{ 981{
566 long newmode; 982 int cpu;
983 long input_arg;
567 char optstr[64]; 984 char optstr[64];
985 struct ptc_stats *stat;
986 struct bau_control *bcp;
568 987
569 if (count == 0 || count > sizeof(optstr)) 988 if (count == 0 || count > sizeof(optstr))
570 return -EINVAL; 989 return -EINVAL;
571 if (copy_from_user(optstr, user, count)) 990 if (copy_from_user(optstr, user, count))
572 return -EFAULT; 991 return -EFAULT;
573 optstr[count - 1] = '\0'; 992 optstr[count - 1] = '\0';
574 if (strict_strtoul(optstr, 10, &newmode) < 0) { 993 if (strict_strtol(optstr, 10, &input_arg) < 0) {
575 printk(KERN_DEBUG "%s is invalid\n", optstr); 994 printk(KERN_DEBUG "%s is invalid\n", optstr);
576 return -EINVAL; 995 return -EINVAL;
577 } 996 }
578 997
579 if (newmode == 0) { 998 if (input_arg == 0) {
580 printk(KERN_DEBUG "# cpu: cpu number\n"); 999 printk(KERN_DEBUG "# cpu: cpu number\n");
1000 printk(KERN_DEBUG "Sender statistics:\n");
1001 printk(KERN_DEBUG
1002 "sent: number of shootdown messages sent\n");
1003 printk(KERN_DEBUG
1004 "stime: time spent sending messages\n");
1005 printk(KERN_DEBUG
1006 "numuvhubs: number of hubs targeted with shootdown\n");
1007 printk(KERN_DEBUG
1008 "numuvhubs16: number times 16 or more hubs targeted\n");
1009 printk(KERN_DEBUG
1010 "numuvhubs8: number times 8 or more hubs targeted\n");
1011 printk(KERN_DEBUG
1012 "numuvhubs4: number times 4 or more hubs targeted\n");
1013 printk(KERN_DEBUG
1014 "numuvhubs2: number times 2 or more hubs targeted\n");
1015 printk(KERN_DEBUG
1016 "numuvhubs1: number times 1 hub targeted\n");
1017 printk(KERN_DEBUG
1018 "numcpus: number of cpus targeted with shootdown\n");
1019 printk(KERN_DEBUG
1020 "dto: number of destination timeouts\n");
1021 printk(KERN_DEBUG
1022 "retries: destination timeout retries sent\n");
1023 printk(KERN_DEBUG
1024 "rok: : destination timeouts successfully retried\n");
1025 printk(KERN_DEBUG
1026 "resetp: ipi-style resource resets for plugs\n");
1027 printk(KERN_DEBUG
1028 "resett: ipi-style resource resets for timeouts\n");
1029 printk(KERN_DEBUG
1030 "giveup: fall-backs to ipi-style shootdowns\n");
1031 printk(KERN_DEBUG
1032 "sto: number of source timeouts\n");
1033 printk(KERN_DEBUG
1034 "bz: number of stay-busy's\n");
1035 printk(KERN_DEBUG
1036 "throt: number times spun in throttle\n");
1037 printk(KERN_DEBUG "Destination side statistics:\n");
581 printk(KERN_DEBUG 1038 printk(KERN_DEBUG
582 "requestor: times this cpu was the flush requestor\n"); 1039 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
583 printk(KERN_DEBUG 1040 printk(KERN_DEBUG
584 "requestee: times this cpu was requested to flush its TLBs\n"); 1041 "recv: shootdown messages received\n");
585 printk(KERN_DEBUG 1042 printk(KERN_DEBUG
586 "one: times requested to flush a single address\n"); 1043 "rtime: time spent processing messages\n");
587 printk(KERN_DEBUG 1044 printk(KERN_DEBUG
588 "all: times requested to flush all TLB's\n"); 1045 "all: shootdown all-tlb messages\n");
589 printk(KERN_DEBUG 1046 printk(KERN_DEBUG
590 "sretry: number of retries of source-side timeouts\n"); 1047 "one: shootdown one-tlb messages\n");
591 printk(KERN_DEBUG 1048 printk(KERN_DEBUG
592 "dretry: number of retries of destination-side timeouts\n"); 1049 "mult: interrupts that found multiple messages\n");
593 printk(KERN_DEBUG 1050 printk(KERN_DEBUG
594 "ptc_i: times UV fell through to IPI-style flushes\n"); 1051 "none: interrupts that found no messages\n");
595 printk(KERN_DEBUG 1052 printk(KERN_DEBUG
596 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n"); 1053 "retry: number of retry messages processed\n");
597 printk(KERN_DEBUG 1054 printk(KERN_DEBUG
598 "sflush_us: cycles spent in uv_flush_tlb_others()\n"); 1055 "canc: number messages canceled by retries\n");
599 printk(KERN_DEBUG 1056 printk(KERN_DEBUG
600 "dflush_us: cycles spent in handling flush requests\n"); 1057 "nocan: number retries that found nothing to cancel\n");
601 printk(KERN_DEBUG "sok: successes on retry\n");
602 printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
603 printk(KERN_DEBUG 1058 printk(KERN_DEBUG
604 "dmult: interrupts with multiple messages\n"); 1059 "reset: number of ipi-style reset requests processed\n");
605 printk(KERN_DEBUG "starget: nodes targeted\n"); 1060 printk(KERN_DEBUG
1061 "rcan: number messages canceled by reset requests\n");
1062 } else if (input_arg == -1) {
1063 for_each_present_cpu(cpu) {
1064 stat = &per_cpu(ptcstats, cpu);
1065 memset(stat, 0, sizeof(struct ptc_stats));
1066 }
606 } else { 1067 } else {
607 uv_bau_retry_limit = newmode; 1068 uv_bau_max_concurrent = input_arg;
608 printk(KERN_DEBUG "timeout retry limit:%d\n", 1069 bcp = &per_cpu(bau_control, smp_processor_id());
609 uv_bau_retry_limit); 1070 if (uv_bau_max_concurrent < 1 ||
1071 uv_bau_max_concurrent > bcp->cpus_in_uvhub) {
1072 printk(KERN_DEBUG
1073 "Error: BAU max concurrent %d; %d is invalid\n",
1074 bcp->max_concurrent, uv_bau_max_concurrent);
1075 return -EINVAL;
1076 }
1077 printk(KERN_DEBUG "Set BAU max concurrent:%d\n",
1078 uv_bau_max_concurrent);
1079 for_each_present_cpu(cpu) {
1080 bcp = &per_cpu(bau_control, cpu);
1081 bcp->max_concurrent = uv_bau_max_concurrent;
1082 }
610 } 1083 }
611 1084
612 return count; 1085 return count;
@@ -650,79 +1123,30 @@ static int __init uv_ptc_init(void)
650} 1123}
651 1124
652/* 1125/*
653 * begin the initialization of the per-blade control structures
654 */
655static struct bau_control * __init uv_table_bases_init(int blade, int node)
656{
657 int i;
658 struct bau_msg_status *msp;
659 struct bau_control *bau_tabp;
660
661 bau_tabp =
662 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
663 BUG_ON(!bau_tabp);
664
665 bau_tabp->msg_statuses =
666 kmalloc_node(sizeof(struct bau_msg_status) *
667 DEST_Q_SIZE, GFP_KERNEL, node);
668 BUG_ON(!bau_tabp->msg_statuses);
669
670 for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
671 bau_cpubits_clear(&msp->seen_by, (int)
672 uv_blade_nr_possible_cpus(blade));
673
674 uv_bau_table_bases[blade] = bau_tabp;
675
676 return bau_tabp;
677}
678
679/*
680 * finish the initialization of the per-blade control structures
681 */
682static void __init
683uv_table_bases_finish(int blade,
684 struct bau_control *bau_tablesp,
685 struct bau_desc *adp)
686{
687 struct bau_control *bcp;
688 int cpu;
689
690 for_each_present_cpu(cpu) {
691 if (blade != uv_cpu_to_blade_id(cpu))
692 continue;
693
694 bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
695 bcp->bau_msg_head = bau_tablesp->va_queue_first;
696 bcp->va_queue_first = bau_tablesp->va_queue_first;
697 bcp->va_queue_last = bau_tablesp->va_queue_last;
698 bcp->msg_statuses = bau_tablesp->msg_statuses;
699 bcp->descriptor_base = adp;
700 }
701}
702
703/*
704 * initialize the sending side's sending buffers 1126 * initialize the sending side's sending buffers
705 */ 1127 */
706static struct bau_desc * __init 1128static void
707uv_activation_descriptor_init(int node, int pnode) 1129uv_activation_descriptor_init(int node, int pnode)
708{ 1130{
709 int i; 1131 int i;
1132 int cpu;
710 unsigned long pa; 1133 unsigned long pa;
711 unsigned long m; 1134 unsigned long m;
712 unsigned long n; 1135 unsigned long n;
713 struct bau_desc *adp; 1136 struct bau_desc *bau_desc;
714 struct bau_desc *ad2; 1137 struct bau_desc *bd2;
1138 struct bau_control *bcp;
715 1139
716 /* 1140 /*
717 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) 1141 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
718 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade 1142 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
719 */ 1143 */
720 adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* 1144 bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
721 UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); 1145 UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
722 BUG_ON(!adp); 1146 BUG_ON(!bau_desc);
723 1147
724 pa = uv_gpa(adp); /* need the real nasid*/ 1148 pa = uv_gpa(bau_desc); /* need the real nasid*/
725 n = uv_gpa_to_pnode(pa); 1149 n = pa >> uv_nshift;
726 m = pa & uv_mmask; 1150 m = pa & uv_mmask;
727 1151
728 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, 1152 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
@@ -731,96 +1155,188 @@ uv_activation_descriptor_init(int node, int pnode)
731 /* 1155 /*
732 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each 1156 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
733 * cpu even though we only use the first one; one descriptor can 1157 * cpu even though we only use the first one; one descriptor can
734 * describe a broadcast to 256 nodes. 1158 * describe a broadcast to 256 uv hubs.
735 */ 1159 */
736 for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); 1160 for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
737 i++, ad2++) { 1161 i++, bd2++) {
738 memset(ad2, 0, sizeof(struct bau_desc)); 1162 memset(bd2, 0, sizeof(struct bau_desc));
739 ad2->header.sw_ack_flag = 1; 1163 bd2->header.sw_ack_flag = 1;
740 /* 1164 /*
741 * base_dest_nodeid is the first node in the partition, so 1165 * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
742 * the bit map will indicate partition-relative node numbers. 1166 * in the partition. The bit map will indicate uvhub numbers,
743 * note that base_dest_nodeid is actually a nasid. 1167 * which are 0-N in a partition. Pnodes are unique system-wide.
744 */ 1168 */
745 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1; 1169 bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
746 ad2->header.dest_subnodeid = 0x10; /* the LB */ 1170 bd2->header.dest_subnodeid = 0x10; /* the LB */
747 ad2->header.command = UV_NET_ENDPOINT_INTD; 1171 bd2->header.command = UV_NET_ENDPOINT_INTD;
748 ad2->header.int_both = 1; 1172 bd2->header.int_both = 1;
749 /* 1173 /*
750 * all others need to be set to zero: 1174 * all others need to be set to zero:
751 * fairness chaining multilevel count replied_to 1175 * fairness chaining multilevel count replied_to
752 */ 1176 */
753 } 1177 }
754 return adp; 1178 for_each_present_cpu(cpu) {
1179 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1180 continue;
1181 bcp = &per_cpu(bau_control, cpu);
1182 bcp->descriptor_base = bau_desc;
1183 }
755} 1184}
756 1185
757/* 1186/*
758 * initialize the destination side's receiving buffers 1187 * initialize the destination side's receiving buffers
1188 * entered for each uvhub in the partition
1189 * - node is first node (kernel memory notion) on the uvhub
1190 * - pnode is the uvhub's physical identifier
759 */ 1191 */
760static struct bau_payload_queue_entry * __init 1192static void
761uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) 1193uv_payload_queue_init(int node, int pnode)
762{ 1194{
763 struct bau_payload_queue_entry *pqp;
764 unsigned long pa;
765 int pn; 1195 int pn;
1196 int cpu;
766 char *cp; 1197 char *cp;
1198 unsigned long pa;
1199 struct bau_payload_queue_entry *pqp;
1200 struct bau_payload_queue_entry *pqp_malloc;
1201 struct bau_control *bcp;
767 1202
768 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 1203 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
769 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), 1204 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
770 GFP_KERNEL, node); 1205 GFP_KERNEL, node);
771 BUG_ON(!pqp); 1206 BUG_ON(!pqp);
1207 pqp_malloc = pqp;
772 1208
773 cp = (char *)pqp + 31; 1209 cp = (char *)pqp + 31;
774 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 1210 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
775 bau_tablesp->va_queue_first = pqp; 1211
1212 for_each_present_cpu(cpu) {
1213 if (pnode != uv_cpu_to_pnode(cpu))
1214 continue;
1215 /* for every cpu on this pnode: */
1216 bcp = &per_cpu(bau_control, cpu);
1217 bcp->va_queue_first = pqp;
1218 bcp->bau_msg_head = pqp;
1219 bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
1220 }
776 /* 1221 /*
777 * need the pnode of where the memory was really allocated 1222 * need the pnode of where the memory was really allocated
778 */ 1223 */
779 pa = uv_gpa(pqp); 1224 pa = uv_gpa(pqp);
780 pn = uv_gpa_to_pnode(pa); 1225 pn = pa >> uv_nshift;
781 uv_write_global_mmr64(pnode, 1226 uv_write_global_mmr64(pnode,
782 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, 1227 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
783 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | 1228 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
784 uv_physnodeaddr(pqp)); 1229 uv_physnodeaddr(pqp));
785 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 1230 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
786 uv_physnodeaddr(pqp)); 1231 uv_physnodeaddr(pqp));
787 bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
788 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, 1232 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
789 (unsigned long) 1233 (unsigned long)
790 uv_physnodeaddr(bau_tablesp->va_queue_last)); 1234 uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
1235 /* in effect, all msg_type's are set to MSG_NOOP */
791 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE); 1236 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
792
793 return pqp;
794} 1237}
795 1238
796/* 1239/*
797 * Initialization of each UV blade's structures 1240 * Initialization of each UV hub's structures
798 */ 1241 */
799static int __init uv_init_blade(int blade) 1242static void __init uv_init_uvhub(int uvhub, int vector)
800{ 1243{
801 int node; 1244 int node;
802 int pnode; 1245 int pnode;
803 unsigned long pa;
804 unsigned long apicid; 1246 unsigned long apicid;
805 struct bau_desc *adp; 1247
806 struct bau_payload_queue_entry *pqp; 1248 node = uvhub_to_first_node(uvhub);
807 struct bau_control *bau_tablesp; 1249 pnode = uv_blade_to_pnode(uvhub);
808 1250 uv_activation_descriptor_init(node, pnode);
809 node = blade_to_first_node(blade); 1251 uv_payload_queue_init(node, pnode);
810 bau_tablesp = uv_table_bases_init(blade, node);
811 pnode = uv_blade_to_pnode(blade);
812 adp = uv_activation_descriptor_init(node, pnode);
813 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
814 uv_table_bases_finish(blade, bau_tablesp, adp);
815 /* 1252 /*
816 * the below initialization can't be in firmware because the 1253 * the below initialization can't be in firmware because the
817 * messaging IRQ will be determined by the OS 1254 * messaging IRQ will be determined by the OS
818 */ 1255 */
819 apicid = blade_to_first_apicid(blade); 1256 apicid = uvhub_to_first_apicid(uvhub);
820 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
821 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 1257 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
822 ((apicid << 32) | UV_BAU_MESSAGE)); 1258 ((apicid << 32) | vector));
823 return 0; 1259}
1260
1261/*
1262 * initialize the bau_control structure for each cpu
1263 */
1264static void uv_init_per_cpu(int nuvhubs)
1265{
1266 int i, j, k;
1267 int cpu;
1268 int pnode;
1269 int uvhub;
1270 short socket = 0;
1271 struct bau_control *bcp;
1272 struct uvhub_desc *bdp;
1273 struct socket_desc *sdp;
1274 struct bau_control *hmaster = NULL;
1275 struct bau_control *smaster = NULL;
1276 struct socket_desc {
1277 short num_cpus;
1278 short cpu_number[16];
1279 };
1280 struct uvhub_desc {
1281 short num_sockets;
1282 short num_cpus;
1283 short uvhub;
1284 short pnode;
1285 struct socket_desc socket[2];
1286 };
1287 struct uvhub_desc *uvhub_descs;
1288
1289 uvhub_descs = (struct uvhub_desc *)
1290 kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1291 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1292 for_each_present_cpu(cpu) {
1293 bcp = &per_cpu(bau_control, cpu);
1294 memset(bcp, 0, sizeof(struct bau_control));
1295 spin_lock_init(&bcp->masks_lock);
1296 bcp->max_concurrent = uv_bau_max_concurrent;
1297 pnode = uv_cpu_hub_info(cpu)->pnode;
1298 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1299 bdp = &uvhub_descs[uvhub];
1300 bdp->num_cpus++;
1301 bdp->uvhub = uvhub;
1302 bdp->pnode = pnode;
1303 /* time interval to catch a hardware stay-busy bug */
1304 bcp->timeout_interval = millisec_2_cycles(3);
1305 /* kludge: assume uv_hub.h is constant */
1306 socket = (cpu_physical_id(cpu)>>5)&1;
1307 if (socket >= bdp->num_sockets)
1308 bdp->num_sockets = socket+1;
1309 sdp = &bdp->socket[socket];
1310 sdp->cpu_number[sdp->num_cpus] = cpu;
1311 sdp->num_cpus++;
1312 }
1313 socket = 0;
1314 for_each_possible_blade(uvhub) {
1315 bdp = &uvhub_descs[uvhub];
1316 for (i = 0; i < bdp->num_sockets; i++) {
1317 sdp = &bdp->socket[i];
1318 for (j = 0; j < sdp->num_cpus; j++) {
1319 cpu = sdp->cpu_number[j];
1320 bcp = &per_cpu(bau_control, cpu);
1321 bcp->cpu = cpu;
1322 if (j == 0) {
1323 smaster = bcp;
1324 if (i == 0)
1325 hmaster = bcp;
1326 }
1327 bcp->cpus_in_uvhub = bdp->num_cpus;
1328 bcp->cpus_in_socket = sdp->num_cpus;
1329 bcp->socket_master = smaster;
1330 bcp->uvhub_master = hmaster;
1331 for (k = 0; k < DEST_Q_SIZE; k++)
1332 bcp->socket_acknowledge_count[k] = 0;
1333 bcp->uvhub_cpu =
1334 uv_cpu_hub_info(cpu)->blade_processor_id;
1335 }
1336 socket++;
1337 }
1338 }
1339 kfree(uvhub_descs);
824} 1340}
825 1341
826/* 1342/*
@@ -828,38 +1344,54 @@ static int __init uv_init_blade(int blade)
828 */ 1344 */
829static int __init uv_bau_init(void) 1345static int __init uv_bau_init(void)
830{ 1346{
831 int blade; 1347 int uvhub;
832 int nblades; 1348 int pnode;
1349 int nuvhubs;
833 int cur_cpu; 1350 int cur_cpu;
1351 int vector;
1352 unsigned long mmr;
834 1353
835 if (!is_uv_system()) 1354 if (!is_uv_system())
836 return 0; 1355 return 0;
837 1356
1357 if (nobau)
1358 return 0;
1359
838 for_each_possible_cpu(cur_cpu) 1360 for_each_possible_cpu(cur_cpu)
839 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 1361 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
840 GFP_KERNEL, cpu_to_node(cur_cpu)); 1362 GFP_KERNEL, cpu_to_node(cur_cpu));
841 1363
842 uv_bau_retry_limit = 1; 1364 uv_bau_max_concurrent = MAX_BAU_CONCURRENT;
1365 uv_nshift = uv_hub_info->m_val;
843 uv_mmask = (1UL << uv_hub_info->m_val) - 1; 1366 uv_mmask = (1UL << uv_hub_info->m_val) - 1;
844 nblades = uv_num_possible_blades(); 1367 nuvhubs = uv_num_possible_blades();
845 1368
846 uv_bau_table_bases = (struct bau_control **) 1369 uv_init_per_cpu(nuvhubs);
847 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
848 BUG_ON(!uv_bau_table_bases);
849 1370
850 uv_partition_base_pnode = 0x7fffffff; 1371 uv_partition_base_pnode = 0x7fffffff;
851 for (blade = 0; blade < nblades; blade++) 1372 for (uvhub = 0; uvhub < nuvhubs; uvhub++)
852 if (uv_blade_nr_possible_cpus(blade) && 1373 if (uv_blade_nr_possible_cpus(uvhub) &&
853 (uv_blade_to_pnode(blade) < uv_partition_base_pnode)) 1374 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
854 uv_partition_base_pnode = uv_blade_to_pnode(blade); 1375 uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
855 for (blade = 0; blade < nblades; blade++) 1376
856 if (uv_blade_nr_possible_cpus(blade)) 1377 vector = UV_BAU_MESSAGE;
857 uv_init_blade(blade); 1378 for_each_possible_blade(uvhub)
858 1379 if (uv_blade_nr_possible_cpus(uvhub))
859 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 1380 uv_init_uvhub(uvhub, vector);
1381
860 uv_enable_timeouts(); 1382 uv_enable_timeouts();
1383 alloc_intr_gate(vector, uv_bau_message_intr1);
1384
1385 for_each_possible_blade(uvhub) {
1386 pnode = uv_blade_to_pnode(uvhub);
1387 /* INIT the bau */
1388 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL,
1389 ((unsigned long)1 << 63));
1390 mmr = 1; /* should be 1 to broadcast to both sockets */
1391 uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr);
1392 }
861 1393
862 return 0; 1394 return 0;
863} 1395}
864__initcall(uv_bau_init); 1396core_initcall(uv_bau_init);
865__initcall(uv_ptc_init); 1397core_initcall(uv_ptc_init);