aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/entry_64.S4
-rw-r--r--arch/x86/kernel/tlb_64.c5
-rw-r--r--arch/x86/kernel/tlb_uv.c736
-rw-r--r--include/asm-x86/uv/uv_bau.h331
5 files changed, 1077 insertions, 1 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 605995e11ea7..4078e98f0125 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -94,7 +94,7 @@ obj-$(CONFIG_OLPC) += olpc.o
94### 94###
95# 64 bit specific files 95# 64 bit specific files
96ifeq ($(CONFIG_X86_64),y) 96ifeq ($(CONFIG_X86_64),y)
97 obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o 97 obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
98 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 98 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
99 obj-$(CONFIG_AUDIT) += audit_64.o 99 obj-$(CONFIG_AUDIT) += audit_64.o
100 100
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 556a8df522a7..6fd1987466eb 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -720,6 +720,10 @@ ENTRY(apic_timer_interrupt)
720 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt 720 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
721END(apic_timer_interrupt) 721END(apic_timer_interrupt)
722 722
723ENTRY(uv_bau_message_intr1)
724 apicinterrupt 220,uv_bau_message_interrupt
725END(uv_bau_message_intr1)
726
723ENTRY(error_interrupt) 727ENTRY(error_interrupt)
724 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt 728 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
725END(error_interrupt) 729END(error_interrupt)
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index a1f07d793202..fc132113bdab 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -15,6 +15,8 @@
15#include <asm/proto.h> 15#include <asm/proto.h>
16#include <asm/apicdef.h> 16#include <asm/apicdef.h>
17#include <asm/idle.h> 17#include <asm/idle.h>
18#include <asm/uv/uv_hub.h>
19#include <asm/uv/uv_bau.h>
18 20
19#include <mach_ipi.h> 21#include <mach_ipi.h>
20/* 22/*
@@ -162,6 +164,9 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
162 union smp_flush_state *f; 164 union smp_flush_state *f;
163 cpumask_t cpumask = *cpumaskp; 165 cpumask_t cpumask = *cpumaskp;
164 166
167 if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
168 return;
169
165 /* Caller has disabled preemption */ 170 /* Caller has disabled preemption */
166 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; 171 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
167 f = &per_cpu(flush_state, sender); 172 f = &per_cpu(flush_state, sender);
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
new file mode 100644
index 000000000000..28e7c68d9d78
--- /dev/null
+++ b/arch/x86/kernel/tlb_uv.c
@@ -0,0 +1,736 @@
1/*
2 * SGI UltraViolet TLB flush routines.
3 *
4 * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
5 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
9#include <linux/mc146818rtc.h>
10#include <linux/proc_fs.h>
11#include <linux/kernel.h>
12
13#include <asm/mach-bigsmp/mach_apic.h>
14#include <asm/mmu_context.h>
15#include <asm/idle.h>
16#include <asm/genapic.h>
17#include <asm/uv/uv_hub.h>
18#include <asm/uv/uv_mmrs.h>
19#include <asm/uv/uv_bau.h>
20
21struct bau_control **uv_bau_table_bases;
22static int uv_bau_retry_limit;
23static int uv_nshift; /* position of pnode (which is nasid>>1) */
24static unsigned long uv_mmask;
25
26char *status_table[] = {
27 "IDLE",
28 "ACTIVE",
29 "DESTINATION TIMEOUT",
30 "SOURCE TIMEOUT"
31};
32
33DEFINE_PER_CPU(struct ptc_stats, ptcstats);
34DEFINE_PER_CPU(struct bau_control, bau_control);
35
36/*
37 * Free a software acknowledge hardware resource by clearing its Pending
38 * bit. This will return a reply to the sender.
39 * If the message has timed out, a reply has already been sent by the
40 * hardware but the resource has not been released. In that case our
41 * clear of the Timeout bit (as well) will free the resource. No reply will
42 * be sent (the hardware will only do one reply per message).
43 */
44static void
45uv_reply_to_message(int resource,
46 struct bau_payload_queue_entry *msg,
47 struct bau_msg_status *msp)
48{
49 int fw;
50
51 fw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource);
52 msg->replied_to = 1;
53 msg->sw_ack_vector = 0;
54 if (msp)
55 msp->seen_by.bits = 0;
56 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, fw);
57 return;
58}
59
60/*
61 * Do all the things a cpu should do for a TLB shootdown message.
62 * Other cpu's may come here at the same time for this message.
63 */
64static void
65uv_bau_process_message(struct bau_payload_queue_entry *msg,
66 int msg_slot, int sw_ack_slot)
67{
68 int cpu;
69 unsigned long this_cpu_mask;
70 struct bau_msg_status *msp;
71
72 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
73 cpu = uv_blade_processor_id();
74 msg->number_of_cpus =
75 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
76 this_cpu_mask = (unsigned long)1 << cpu;
77 if (msp->seen_by.bits & this_cpu_mask)
78 return;
79 atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
80
81 if (msg->replied_to == 1)
82 return;
83
84 if (msg->address == TLB_FLUSH_ALL) {
85 local_flush_tlb();
86 __get_cpu_var(ptcstats).alltlb++;
87 } else {
88 __flush_tlb_one(msg->address);
89 __get_cpu_var(ptcstats).onetlb++;
90 }
91
92 __get_cpu_var(ptcstats).requestee++;
93
94 atomic_inc_short(&msg->acknowledge_count);
95 if (msg->number_of_cpus == msg->acknowledge_count)
96 uv_reply_to_message(sw_ack_slot, msg, msp);
97 return;
98}
99
100/*
101 * Examine the payload queue on all the distribution nodes to see
102 * which messages have not been seen, and which cpu(s) have not seen them.
103 *
104 * Returns the number of cpu's that have not responded.
105 */
106static int
107uv_examine_destinations(struct bau_target_nodemask *distribution)
108{
109 int sender;
110 int i;
111 int j;
112 int k;
113 int count = 0;
114 struct bau_control *bau_tablesp;
115 struct bau_payload_queue_entry *msg;
116 struct bau_msg_status *msp;
117
118 sender = smp_processor_id();
119 for (i = 0; i < (sizeof(struct bau_target_nodemask) * BITSPERBYTE);
120 i++) {
121 if (bau_node_isset(i, distribution)) {
122 bau_tablesp = uv_bau_table_bases[i];
123 for (msg = bau_tablesp->va_queue_first, j = 0;
124 j < DESTINATION_PAYLOAD_QUEUE_SIZE; msg++, j++) {
125 if ((msg->sending_cpu == sender) &&
126 (!msg->replied_to)) {
127 msp = bau_tablesp->msg_statuses + j;
128 printk(KERN_DEBUG
129 "blade %d: address:%#lx %d of %d, not cpu(s): ",
130 i, msg->address,
131 msg->acknowledge_count,
132 msg->number_of_cpus);
133 for (k = 0; k < msg->number_of_cpus;
134 k++) {
135 if (!((long)1 << k & msp->
136 seen_by.bits)) {
137 count++;
138 printk("%d ", k);
139 }
140 }
141 printk("\n");
142 }
143 }
144 }
145 }
146 return count;
147}
148
149/**
150 * uv_flush_tlb_others - globally purge translation cache of a virtual
151 * address or all TLB's
152 * @cpumaskp: mask of all cpu's in which the address is to be removed
153 * @mm: mm_struct containing virtual address range
154 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
155 *
156 * This is the entry point for initiating any UV global TLB shootdown.
157 *
158 * Purges the translation caches of all specified processors of the given
159 * virtual address, or purges all TLB's on specified processors.
160 *
161 * The caller has derived the cpumaskp from the mm_struct and has subtracted
162 * the local cpu from the mask. This function is called only if there
163 * are bits set in the mask. (e.g. flush_tlb_page())
164 *
165 * The cpumaskp is converted into a nodemask of the nodes containing
166 * the cpus.
167 */
168int
169uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, unsigned long va)
170{
171 int i;
172 int blade;
173 int cpu;
174 int bit;
175 int right_shift;
176 int this_blade;
177 int exams = 0;
178 int tries = 0;
179 long source_timeouts = 0;
180 long destination_timeouts = 0;
181 unsigned long index;
182 unsigned long mmr_offset;
183 unsigned long descriptor_status;
184 struct bau_activation_descriptor *bau_desc;
185 ktime_t time1, time2;
186
187 cpu = uv_blade_processor_id();
188 this_blade = uv_numa_blade_id();
189 bau_desc = __get_cpu_var(bau_control).descriptor_base;
190 bau_desc += (UV_ITEMS_PER_DESCRIPTOR * cpu);
191
192 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
193
194 i = 0;
195 for_each_cpu_mask(bit, *cpumaskp) {
196 blade = uv_cpu_to_blade_id(bit);
197 if (blade > (UV_DISTRIBUTION_SIZE - 1))
198 BUG();
199 if (blade == this_blade)
200 continue;
201 bau_node_set(blade, &bau_desc->distribution);
202 /* leave the bits for the remote cpu's in the mask until
203 success; on failure we fall back to the IPI method */
204 i++;
205 }
206 if (i == 0)
207 goto none_to_flush;
208 __get_cpu_var(ptcstats).requestor++;
209 __get_cpu_var(ptcstats).ntargeted += i;
210
211 bau_desc->payload.address = va;
212 bau_desc->payload.sending_cpu = smp_processor_id();
213
214 if (cpu < UV_CPUS_PER_ACT_STATUS) {
215 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
216 right_shift = cpu * UV_ACT_STATUS_SIZE;
217 } else {
218 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
219 right_shift =
220 ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
221 }
222 time1 = ktime_get();
223
224retry:
225 tries++;
226 index = ((unsigned long)
227 1 << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | cpu;
228 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
229
230 while ((descriptor_status = (((unsigned long)
231 uv_read_local_mmr(mmr_offset) >>
232 right_shift) & UV_ACT_STATUS_MASK)) !=
233 DESC_STATUS_IDLE) {
234 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
235 source_timeouts++;
236 if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
237 source_timeouts = 0;
238 __get_cpu_var(ptcstats).s_retry++;
239 goto retry;
240 }
241 /* spin here looking for progress at the destinations */
242 if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) {
243 destination_timeouts++;
244 if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) {
245 /* returns # of cpus not responding */
246 if (uv_examine_destinations
247 (&bau_desc->distribution) == 0) {
248 __get_cpu_var(ptcstats).d_retry++;
249 goto retry;
250 }
251 exams++;
252 if (exams >= uv_bau_retry_limit) {
253 printk(KERN_DEBUG
254 "uv_flush_tlb_others");
255 printk("giving up on cpu %d\n",
256 smp_processor_id());
257 goto unsuccessful;
258 }
259 /* delays can hang up the simulator
260 udelay(1000);
261 */
262 destination_timeouts = 0;
263 }
264 }
265 }
266 if (tries > 1)
267 __get_cpu_var(ptcstats).retriesok++;
268 /* on success, clear the remote cpu's from the mask so we don't
269 use the IPI method of shootdown on them */
270 for_each_cpu_mask(bit, *cpumaskp) {
271 blade = uv_cpu_to_blade_id(bit);
272 if (blade == this_blade)
273 continue;
274 cpu_clear(bit, *cpumaskp);
275 }
276
277unsuccessful:
278 time2 = ktime_get();
279 __get_cpu_var(ptcstats).sflush_ns += (time2.tv64 - time1.tv64);
280
281none_to_flush:
282 if (cpus_empty(*cpumaskp))
283 return 1;
284
285 /* Cause the caller to do an IPI-style TLB shootdown on
286 the cpu's still in the mask */
287 __get_cpu_var(ptcstats).ptc_i++;
288 return 0;
289}
290
291/*
292 * The BAU message interrupt comes here. (registered by set_intr_gate)
293 * See entry_64.S
294 *
295 * We received a broadcast assist message.
296 *
297 * Interrupts may have been disabled; this interrupt could represent
298 * the receipt of several messages.
299 *
300 * All cores/threads on this node get this interrupt.
301 * The last one to see it does the s/w ack.
302 * (the resource will not be freed until noninterruptable cpus see this
303 * interrupt; hardware will timeout the s/w ack and reply ERROR)
304 */
305void
306uv_bau_message_interrupt(struct pt_regs *regs)
307{
308 struct bau_payload_queue_entry *pqp;
309 struct bau_payload_queue_entry *msg;
310 struct pt_regs *old_regs = set_irq_regs(regs);
311 ktime_t time1, time2;
312 int msg_slot;
313 int sw_ack_slot;
314 int fw;
315 int count = 0;
316 unsigned long local_pnode;
317
318 ack_APIC_irq();
319 exit_idle();
320 irq_enter();
321
322 time1 = ktime_get();
323
324 local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
325
326 pqp = __get_cpu_var(bau_control).va_queue_first;
327 msg = __get_cpu_var(bau_control).bau_msg_head;
328 while (msg->sw_ack_vector) {
329 count++;
330 fw = msg->sw_ack_vector;
331 msg_slot = msg - pqp;
332 sw_ack_slot = ffs(fw) - 1;
333
334 uv_bau_process_message(msg, msg_slot, sw_ack_slot);
335
336 msg++;
337 if (msg > __get_cpu_var(bau_control).va_queue_last)
338 msg = __get_cpu_var(bau_control).va_queue_first;
339 __get_cpu_var(bau_control).bau_msg_head = msg;
340 }
341 if (!count)
342 __get_cpu_var(ptcstats).nomsg++;
343 else if (count > 1)
344 __get_cpu_var(ptcstats).multmsg++;
345
346 time2 = ktime_get();
347 __get_cpu_var(ptcstats).dflush_ns += (time2.tv64 - time1.tv64);
348
349 irq_exit();
350 set_irq_regs(old_regs);
351 return;
352}
353
354static void
355uv_enable_timeouts(void)
356{
357 int i;
358 int blade;
359 int last_blade;
360 int pnode;
361 int cur_cpu = 0;
362 unsigned long apicid;
363
364 /* better if we had each_online_blade */
365 last_blade = -1;
366 for_each_online_node(i) {
367 blade = uv_node_to_blade_id(i);
368 if (blade == last_blade)
369 continue;
370 last_blade = blade;
371 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
372 pnode = uv_blade_to_pnode(blade);
373 cur_cpu += uv_blade_nr_possible_cpus(i);
374 }
375 return;
376}
377
378static void *
379uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
380{
381 if (*offset < num_possible_cpus())
382 return offset;
383 return NULL;
384}
385
386static void *
387uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
388{
389 (*offset)++;
390 if (*offset < num_possible_cpus())
391 return offset;
392 return NULL;
393}
394
395static void
396uv_ptc_seq_stop(struct seq_file *file, void *data)
397{
398}
399
400/*
401 * Display the statistics thru /proc
402 * data points to the cpu number
403 */
404static int
405uv_ptc_seq_show(struct seq_file *file, void *data)
406{
407 struct ptc_stats *stat;
408 int cpu;
409
410 cpu = *(loff_t *)data;
411
412 if (!cpu) {
413 seq_printf(file,
414 "# cpu requestor requestee one all sretry dretry ptc_i ");
415 seq_printf(file,
416 "sw_ack sflush_us dflush_us sok dnomsg dmult starget\n");
417 }
418 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
419 stat = &per_cpu(ptcstats, cpu);
420 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
421 cpu, stat->requestor,
422 stat->requestee, stat->onetlb, stat->alltlb,
423 stat->s_retry, stat->d_retry, stat->ptc_i);
424 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
425 uv_read_global_mmr64(uv_blade_to_pnode
426 (uv_cpu_to_blade_id(cpu)),
427 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
428 stat->sflush_ns / 1000, stat->dflush_ns / 1000,
429 stat->retriesok, stat->nomsg,
430 stat->multmsg, stat->ntargeted);
431 }
432
433 return 0;
434}
435
436/*
437 * 0: display meaning of the statistics
438 * >0: retry limit
439 */
440static ssize_t
441uv_ptc_proc_write(struct file *file, const char __user *user,
442 size_t count, loff_t *data)
443{
444 long newmode;
445 char optstr[64];
446
447 if (copy_from_user(optstr, user, count))
448 return -EFAULT;
449 optstr[count - 1] = '\0';
450 if (strict_strtoul(optstr, 10, &newmode) < 0) {
451 printk(KERN_DEBUG "%s is invalid\n", optstr);
452 return -EINVAL;
453 }
454
455 if (newmode == 0) {
456 printk(KERN_DEBUG "# cpu: cpu number\n");
457 printk(KERN_DEBUG
458 "requestor: times this cpu was the flush requestor\n");
459 printk(KERN_DEBUG
460 "requestee: times this cpu was requested to flush its TLBs\n");
461 printk(KERN_DEBUG
462 "one: times requested to flush a single address\n");
463 printk(KERN_DEBUG
464 "all: times requested to flush all TLB's\n");
465 printk(KERN_DEBUG
466 "sretry: number of retries of source-side timeouts\n");
467 printk(KERN_DEBUG
468 "dretry: number of retries of destination-side timeouts\n");
469 printk(KERN_DEBUG
470 "ptc_i: times UV fell through to IPI-style flushes\n");
471 printk(KERN_DEBUG
472 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
473 printk(KERN_DEBUG
474 "sflush_us: microseconds spent in uv_flush_tlb_others()\n");
475 printk(KERN_DEBUG
476 "dflush_us: microseconds spent in handling flush requests\n");
477 printk(KERN_DEBUG "sok: successes on retry\n");
478 printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
479 printk(KERN_DEBUG
480 "dmult: interrupts with multiple messages\n");
481 printk(KERN_DEBUG "starget: nodes targeted\n");
482 } else {
483 uv_bau_retry_limit = newmode;
484 printk(KERN_DEBUG "timeout retry limit:%d\n",
485 uv_bau_retry_limit);
486 }
487
488 return count;
489}
490
491static const struct seq_operations uv_ptc_seq_ops = {
492 .start = uv_ptc_seq_start,
493 .next = uv_ptc_seq_next,
494 .stop = uv_ptc_seq_stop,
495 .show = uv_ptc_seq_show
496};
497
498static int
499uv_ptc_proc_open(struct inode *inode, struct file *file)
500{
501 return seq_open(file, &uv_ptc_seq_ops);
502}
503
504static const struct file_operations proc_uv_ptc_operations = {
505 .open = uv_ptc_proc_open,
506 .read = seq_read,
507 .write = uv_ptc_proc_write,
508 .llseek = seq_lseek,
509 .release = seq_release,
510};
511
512static struct proc_dir_entry *proc_uv_ptc;
513
514static int __init
515uv_ptc_init(void)
516{
517 static struct proc_dir_entry *sgi_proc_dir;
518
519 sgi_proc_dir = NULL;
520
521 if (!is_uv_system())
522 return 0;
523
524 sgi_proc_dir = proc_mkdir("sgi_uv", NULL);
525 if (!sgi_proc_dir)
526 return -EINVAL;
527
528 proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL);
529 if (!proc_uv_ptc) {
530 printk(KERN_ERR "unable to create %s proc entry\n",
531 UV_PTC_BASENAME);
532 return -EINVAL;
533 }
534 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
535 return 0;
536}
537
538static void __exit
539uv_ptc_exit(void)
540{
541 remove_proc_entry(UV_PTC_BASENAME, NULL);
542}
543
544module_init(uv_ptc_init);
545module_exit(uv_ptc_exit);
546
547/*
548 * Initialization of BAU-related structures
549 */
550int __init
551uv_bau_init(void)
552{
553 int i;
554 int j;
555 int blade;
556 int nblades;
557 int *ip;
558 int pnode;
559 int last_blade;
560 int cur_cpu = 0;
561 unsigned long pa;
562 unsigned long n;
563 unsigned long m;
564 unsigned long mmr_image;
565 unsigned long apicid;
566 char *cp;
567 struct bau_control *bau_tablesp;
568 struct bau_activation_descriptor *adp, *ad2;
569 struct bau_payload_queue_entry *pqp;
570 struct bau_msg_status *msp;
571 struct bau_control *bcp;
572
573 if (!is_uv_system())
574 return 0;
575
576 uv_bau_retry_limit = 1;
577
578 if ((sizeof(struct bau_local_cpumask) * BITSPERBYTE) <
579 MAX_CPUS_PER_NODE) {
580 printk(KERN_ERR
581 "uv_bau_init: bau_local_cpumask.bits too small\n");
582 BUG();
583 }
584
585 uv_nshift = uv_hub_info->n_val;
586 uv_mmask = ((unsigned long)1 << uv_hub_info->n_val) - 1;
587 nblades = 0;
588 last_blade = -1;
589 for_each_online_node(i) {
590 blade = uv_node_to_blade_id(i);
591 if (blade == last_blade)
592 continue;
593 last_blade = blade;
594 nblades++;
595 }
596
597 uv_bau_table_bases = (struct bau_control **)
598 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
599 if (!uv_bau_table_bases)
600 BUG();
601
602 /* better if we had each_online_blade */
603 last_blade = -1;
604 for_each_online_node(i) {
605 blade = uv_node_to_blade_id(i);
606 if (blade == last_blade)
607 continue;
608 last_blade = blade;
609
610 bau_tablesp =
611 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, i);
612 if (!bau_tablesp)
613 BUG();
614
615 bau_tablesp->msg_statuses =
616 kmalloc_node(sizeof(struct bau_msg_status) *
617 DESTINATION_PAYLOAD_QUEUE_SIZE, GFP_KERNEL, i);
618 if (!bau_tablesp->msg_statuses)
619 BUG();
620 for (j = 0, msp = bau_tablesp->msg_statuses;
621 j < DESTINATION_PAYLOAD_QUEUE_SIZE; j++, msp++) {
622 bau_cpubits_clear(&msp->seen_by, (int)
623 uv_blade_nr_possible_cpus(blade));
624 }
625
626 bau_tablesp->watching =
627 kmalloc_node(sizeof(int) * DESTINATION_NUM_RESOURCES,
628 GFP_KERNEL, i);
629 if (!bau_tablesp->watching)
630 BUG();
631 for (j = 0, ip = bau_tablesp->watching;
632 j < DESTINATION_PAYLOAD_QUEUE_SIZE; j++, ip++) {
633 *ip = 0;
634 }
635
636 uv_bau_table_bases[i] = bau_tablesp;
637
638 pnode = uv_blade_to_pnode(blade);
639
640 if (sizeof(struct bau_activation_descriptor) != 64)
641 BUG();
642
643 adp = (struct bau_activation_descriptor *)
644 kmalloc_node(16384, GFP_KERNEL, i);
645 if (!adp)
646 BUG();
647 if ((unsigned long)adp & 0xfff)
648 BUG();
649 pa = __pa((unsigned long)adp);
650 n = pa >> uv_nshift;
651 m = pa & uv_mmask;
652
653 mmr_image = uv_read_global_mmr64(pnode,
654 UVH_LB_BAU_SB_DESCRIPTOR_BASE);
655 if (mmr_image)
656 uv_write_global_mmr64(pnode, (unsigned long)
657 UVH_LB_BAU_SB_DESCRIPTOR_BASE,
658 (n << UV_DESC_BASE_PNODE_SHIFT |
659 m));
660 for (j = 0, ad2 = adp; j < UV_ACTIVATION_DESCRIPTOR_SIZE;
661 j++, ad2++) {
662 memset(ad2, 0,
663 sizeof(struct bau_activation_descriptor));
664 ad2->header.sw_ack_flag = 1;
665 ad2->header.base_dest_nodeid =
666 uv_blade_to_pnode(uv_cpu_to_blade_id(0));
667 ad2->header.command = UV_NET_ENDPOINT_INTD;
668 ad2->header.int_both = 1;
669 /* all others need to be set to zero:
670 fairness chaining multilevel count replied_to */
671 }
672
673 pqp = (struct bau_payload_queue_entry *)
674 kmalloc_node((DESTINATION_PAYLOAD_QUEUE_SIZE + 1) *
675 sizeof(struct bau_payload_queue_entry),
676 GFP_KERNEL, i);
677 if (!pqp)
678 BUG();
679 if (sizeof(struct bau_payload_queue_entry) != 32)
680 BUG();
681 if ((unsigned long)(&((struct bau_payload_queue_entry *)0)->
682 sw_ack_vector) != 15)
683 BUG();
684
685 cp = (char *)pqp + 31;
686 pqp = (struct bau_payload_queue_entry *)
687 (((unsigned long)cp >> 5) << 5);
688 bau_tablesp->va_queue_first = pqp;
689 uv_write_global_mmr64(pnode,
690 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
691 ((unsigned long)pnode <<
692 UV_PAYLOADQ_PNODE_SHIFT) |
693 uv_physnodeaddr(pqp));
694 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
695 uv_physnodeaddr(pqp));
696 bau_tablesp->va_queue_last =
697 pqp + (DESTINATION_PAYLOAD_QUEUE_SIZE - 1);
698 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
699 (unsigned long)
700 uv_physnodeaddr(bau_tablesp->
701 va_queue_last));
702 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) *
703 DESTINATION_PAYLOAD_QUEUE_SIZE);
704
705 /* this initialization can't be in firmware because the
706 messaging IRQ will be determined by the OS */
707 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
708 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
709 if ((pa & 0xff) != UV_BAU_MESSAGE) {
710 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
711 ((apicid << 32) |
712 UV_BAU_MESSAGE));
713 }
714
715 for (j = cur_cpu; j < (cur_cpu + uv_blade_nr_possible_cpus(i));
716 j++) {
717 bcp = (struct bau_control *)&per_cpu(bau_control, j);
718 bcp->bau_msg_head = bau_tablesp->va_queue_first;
719 bcp->va_queue_first = bau_tablesp->va_queue_first;
720
721 bcp->va_queue_last = bau_tablesp->va_queue_last;
722 bcp->watching = bau_tablesp->watching;
723 bcp->msg_statuses = bau_tablesp->msg_statuses;
724 bcp->descriptor_base = adp;
725 }
726 cur_cpu += uv_blade_nr_possible_cpus(i);
727 }
728
729 set_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
730
731 uv_enable_timeouts();
732
733 return 0;
734}
735
736__initcall(uv_bau_init);
diff --git a/include/asm-x86/uv/uv_bau.h b/include/asm-x86/uv/uv_bau.h
new file mode 100644
index 000000000000..f125f86c89ac
--- /dev/null
+++ b/include/asm-x86/uv/uv_bau.h
@@ -0,0 +1,331 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV Broadcast Assist Unit definitions
7 *
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#ifndef __ASM_X86_UV_BAU__
12#define __ASM_X86_UV_BAU__
13
14#include <linux/bitmap.h>
15#define BITSPERBYTE 8
16
17/* Broadcast Assist Unit messaging structures */
18
19/*
20 * Selective Broadcast activations are induced by software action
21 * specifying a particular 8-descriptor "set" via a 6-bit index written
22 * to an MMR.
23 * Thus there are 64 unique 512-byte sets of SB descriptors - one set for
24 * each 6-bit index value. These descriptor sets are mapped in sequence
25 * starting with set 0 located at the address specified in the
26 * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
28 *
29 * We will use 31 sets, one for sending BAU messages from each of the 32
30 * cpu's on the node.
31 *
32 * TLB shootdown will use the first of the 8 descriptors of each set.
33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
34 */
35
36#define UV_ITEMS_PER_DESCRIPTOR 8
37#define UV_CPUS_PER_ACT_STATUS 32
38#define UV_ACT_STATUS_MASK 0x3
39#define UV_ACT_STATUS_SIZE 2
40#define UV_ACTIVATION_DESCRIPTOR_SIZE 32
41#define UV_DISTRIBUTION_SIZE 256
42#define UV_SW_ACK_NPENDING 8
43#define UV_BAU_MESSAGE 200 /* Messaging irq; see irq_64.h */
44 /* and include/asm-x86/hw_irq_64.h */
45 /* To be dynamically allocated in the future */
46#define UV_NET_ENDPOINT_INTD 0x38
47#define UV_DESC_BASE_PNODE_SHIFT 49 /* position of pnode (nasid>>1) in MMR */
48#define UV_PAYLOADQ_PNODE_SHIFT 49
49
50#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
51#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
52
53/* bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1 */
54#define DESC_STATUS_IDLE 0
55#define DESC_STATUS_ACTIVE 1
56#define DESC_STATUS_DESTINATION_TIMEOUT 2
57#define DESC_STATUS_SOURCE_TIMEOUT 3
58
59/* source side threshholds at which message retries print a warning */
60#define SOURCE_TIMEOUT_LIMIT 20
61#define DESTINATION_TIMEOUT_LIMIT 20
62
63/* number of entries in the destination side payload queue */
64#define DESTINATION_PAYLOAD_QUEUE_SIZE 17
65/* number of destination side software ack resources */
66#define DESTINATION_NUM_RESOURCES 8
67#define MAX_CPUS_PER_NODE 32
68
69/* Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) */
70/* If the 'multilevel' flag in the header portion of the descriptor
71 * has been set to 0, then endpoint multi-unicast mode is selected.
72 * The distribution specification (32 bytes) is interpreted as a 256-bit
73 * distribution vector. Adjacent bits correspond to consecutive even numbered
74 * nodeIDs. The result of adding the index of a given bit to the 15-bit
75 * 'base_dest_nodeid' field of the header corresponds to the
76 * destination nodeID associated with that specified bit. */
77struct bau_target_nodemask {
78 unsigned long bits[BITS_TO_LONGS(256)];
79};
80
81/* mask of cpu's on a node */
82/* (during initialization we need to check that unsigned long has
83 enough bits for max. cpu's per node) */
84struct bau_local_cpumask {
85 unsigned long bits;
86};
87
88/*
89 * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor)
90 * only 12 bytes (96 bits) of the payload area are usable.
91 * An additional 3 bytes (bits 27:4) of the header address are carried
92 * to the next bytes of the destination payload queue.
93 * And an additional 2 bytes of the header Suppl_A field are also
94 * carried to the destination payload queue.
95 * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte)
96 * of the destination payload queue, which is written by the hardware
97 * with the s/w ack resource bit vector.
98 * [ effective message contents (16 bytes (128 bits) maximum), not counting
99 * the s/w ack bit vector ]
100 */
101
102/* The payload is software-defined for INTD transactions */
103struct bau_msg_payload {
104 unsigned long address; /* signifies a page or all TLB's
105 of the cpu */
106 /* 64 bits */
107 unsigned short sending_cpu; /* filled in by sender */
108 /* 16 bits */
109 unsigned short acknowledge_count;/* filled in by destination */
110 /* 16 bits */
111 unsigned int reserved1:32; /* not usable */
112};
113
114
115/* Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) */
116/* see table 4.2.3.0.1 in broacast_assist spec. */
117struct bau_msg_header {
118 int dest_subnodeid:6; /* must be zero */
119 /* bits 5:0 */
120 int base_dest_nodeid:15; /* nasid>>1 (pnode) of first bit in node_map */
121 /* bits 20:6 */
122 int command:8; /* message type */
123 /* bits 28:21 */
124 /* 0x38: SN3net EndPoint Message */
125 int rsvd_1:3; /* must be zero */
126 /* bits 31:29 */
127 /* int will align on 32 bits */
128 int rsvd_2:9; /* must be zero */
129 /* bits 40:32 */
130 /* Suppl_A is 56-41 */
131 int payload_2a:8; /* becomes byte 16 of msg */
132 /* bits 48:41 */ /* not currently using */
133 int payload_2b:8; /* becomes byte 17 of msg */
134 /* bits 56:49 */ /* not currently using */
135 /* Address field (96:57) is never used as an
136 address (these are address bits 42:3) */
137 int rsvd_3:1; /* must be zero */
138 /* bit 57 */
139 /* address bits 27:4 are payload */
140 /* these 24 bits become bytes 12-14 of msg */
141 int replied_to:1; /* sent as 0 by the source to byte 12 */
142 /* bit 58 */
143
144 int payload_1a:5; /* not currently used */
145 /* bits 63:59 */
146 int payload_1b:8; /* not currently used */
147 /* bits 71:64 */
148 int payload_1c:8; /* not currently used */
149 /* bits 79:72 */
150 int payload_1d:2; /* not currently used */
151 /* bits 81:80 */
152
153 int rsvd_4:7; /* must be zero */
154 /* bits 88:82 */
155 int sw_ack_flag:1; /* software acknowledge flag */
156 /* bit 89 */
157 /* INTD trasactions at destination are to
158 wait for software acknowledge */
159 int rsvd_5:6; /* must be zero */
160 /* bits 95:90 */
161 int rsvd_6:5; /* must be zero */
162 /* bits 100:96 */
163 int int_both:1; /* if 1, interrupt both sockets on the blade */
164 /* bit 101*/
165 int fairness:3; /* usually zero */
166 /* bits 104:102 */
167 int multilevel:1; /* multi-level multicast format */
168 /* bit 105 */
169 /* 0 for TLB: endpoint multi-unicast messages */
170 int chaining:1; /* next descriptor is part of this activation*/
171 /* bit 106 */
172 int rsvd_7:21; /* must be zero */
173 /* bits 127:107 */
174};
175
176/* The format of the message to send, plus all accompanying control */
177/* Should be 64 bytes */
178struct bau_activation_descriptor {
179 struct bau_target_nodemask distribution;
180 /* message template, consisting of header and payload: */
181 struct bau_msg_header header;
182 struct bau_msg_payload payload;
183};
184/*
185 * -payload-- ---------header------
186 * bytes 0-11 bits 41-56 bits 58-81
187 * A B (2) C (3)
188 *
189 * A/B/C are moved to:
190 * A C B
191 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
192 * ------------payload queue-----------
193 */
194
195/*
196 * The payload queue on the destination side is an array of these.
197 * With BAU_MISC_CONTROL set for software acknowledge mode, the messages
198 * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
199 * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
200 * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
201 * sw_ack_vector and payload_2)
202 * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
203 * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
204 * operation."
205 */
206struct bau_payload_queue_entry {
207 unsigned long address; /* signifies a page or all TLB's
208 of the cpu */
209 /* 64 bits, bytes 0-7 */
210
211 unsigned short sending_cpu; /* cpu that sent the message */
212 /* 16 bits, bytes 8-9 */
213
214 unsigned short acknowledge_count; /* filled in by destination */
215 /* 16 bits, bytes 10-11 */
216
217 unsigned short replied_to:1; /* sent as 0 by the source */
218 /* 1 bit */
219 unsigned short unused1:7; /* not currently using */
220 /* 7 bits: byte 12) */
221
222 unsigned char unused2[2]; /* not currently using */
223 /* bytes 13-14 */
224
225 unsigned char sw_ack_vector; /* filled in by the hardware */
226 /* byte 15 (bits 127:120) */
227
228 unsigned char unused4[3]; /* not currently using bytes 17-19 */
229 /* bytes 17-19 */
230
231 int number_of_cpus; /* filled in at destination */
232 /* 32 bits, bytes 20-23 (aligned) */
233
234 unsigned char unused5[8]; /* not using */
235 /* bytes 24-31 */
236};
237
238/* one for every slot in the destination payload queue */
239struct bau_msg_status {
240 struct bau_local_cpumask seen_by; /* map of cpu's */
241};
242
243/* one for every slot in the destination software ack resources */
244struct bau_sw_ack_status {
245 struct bau_payload_queue_entry *msg; /* associated message */
246 int watcher; /* cpu monitoring, or -1 */
247};
248
249/* one on every node and per-cpu; to locate the software tables */
250struct bau_control {
251 struct bau_activation_descriptor *descriptor_base;
252 struct bau_payload_queue_entry *bau_msg_head;
253 struct bau_payload_queue_entry *va_queue_first;
254 struct bau_payload_queue_entry *va_queue_last;
255 struct bau_msg_status *msg_statuses;
256 int *watching; /* pointer to array */
257};
258
259/*
260 * This structure is allocated per_cpu for UV TLB shootdown statistics.
261 */
262struct ptc_stats {
263 unsigned long ptc_i; /* number of IPI-style flushes */
264 unsigned long requestor; /* number of nodes this cpu sent to */
265 unsigned long requestee; /* times cpu was remotely requested */
266 unsigned long alltlb; /* times all tlb's on this cpu were flushed */
267 unsigned long onetlb; /* times just one tlb on this cpu was flushed */
268 unsigned long s_retry; /* retries on source side timeouts */
269 unsigned long d_retry; /* retries on destination side timeouts */
270 unsigned long sflush_ns;/* nanoseconds spent in uv_flush_tlb_others */
271 unsigned long dflush_ns;/* nanoseconds spent destination side */
272 unsigned long retriesok; /* successes on retries */
273 unsigned long nomsg; /* interrupts with no message */
274 unsigned long multmsg; /* interrupts with multiple messages */
275 unsigned long ntargeted;/* nodes targeted */
276};
277
278static inline int bau_node_isset(int node, struct bau_target_nodemask *dstp)
279{
280 return constant_test_bit(node, &dstp->bits[0]);
281}
282static inline void bau_node_set(int node, struct bau_target_nodemask *dstp)
283{
284 __set_bit(node, &dstp->bits[0]);
285}
286static inline void bau_nodes_clear(struct bau_target_nodemask *dstp, int nbits)
287{
288 bitmap_zero(&dstp->bits[0], nbits);
289}
290
291static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
292{
293 bitmap_zero(&dstp->bits, nbits);
294}
295
296/*
297 * atomic increment of a short integer
298 * (rather than using the __sync_add_and_fetch() intrinsic)
299 *
300 * returns the new value of the variable
301 */
302static inline short int atomic_inc_short(short int *v)
303{
304 asm volatile("movw $1, %%cx\n"
305 "lock ; xaddw %%cx, %0\n"
306 : "+m" (*v) /* outputs */
307 : : "%cx", "memory"); /* inputs : clobbereds */
308 return *v;
309}
310
311/*
312 * atomic OR of two long integers
313 * (rather than using the __sync_or_and_fetch() intrinsic)
314 */
315static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
316{
317 asm volatile("movq %0, %%rax; lea %1, %%rdx\n"
318 "lock ; orq %%rax, %%rdx\n"
319 : "+m" (*v1) /* outputs */
320 : "m" (v1), "m" (v2) /* inputs */
321 : "memory"); /* clobbereds */
322}
323
324#define cpubit_isset(cpu, bau_local_cpumask) \
325 test_bit((cpu), (bau_local_cpumask).bits)
326
327int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
328void uv_bau_message_intr1(void);
329void uv_bau_timeout_intr1(void);
330
331#endif /* __ASM_X86_UV_BAU__ */