aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2005-08-11 13:28:00 -0400
committerTony Luck <tony.luck@intel.com>2005-08-17 18:32:24 -0400
commit470ceb05d9a2b4d61c19fac615a79e56e8401565 (patch)
treeeb9e2b0b971bbe287dd4bef0b46c7307e3258bf0 /arch
parent68b9753f47953930cb94de0223c163f289399091 (diff)
[IA64-SGI] - New SN hardware support - ptc_fixes
Shub2 provides a much improved mechanism for issuing internode TLB purges. Add code to support the newer mechanism. There is also some debug code (disabled) that is useful for testing. Collect statistics on the number, type & duration of TLB purges. This data will be useful for making future improvements in the algorithms. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/sn/kernel/setup.c1
-rw-r--r--arch/ia64/sn/kernel/sn2/ptc_deadlock.S13
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c256
3 files changed, 243 insertions, 27 deletions
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index ed77715393ef..6648deb778a6 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -403,6 +403,7 @@ static void __init sn_init_pdas(char **cmdline_p)
403 memset(nodepdaindr[cnode], 0, sizeof(nodepda_t)); 403 memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
404 memset(nodepdaindr[cnode]->phys_cpuid, -1, 404 memset(nodepdaindr[cnode]->phys_cpuid, -1,
405 sizeof(nodepdaindr[cnode]->phys_cpuid)); 405 sizeof(nodepdaindr[cnode]->phys_cpuid));
406 spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
406 } 407 }
407 408
408 /* 409 /*
diff --git a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
index 96cb71d15682..3fa95065a446 100644
--- a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
+++ b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <asm/types.h> 9#include <asm/types.h>
@@ -11,7 +11,7 @@
11 11
12#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT 12#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
13#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK 13#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
14#define ALIAS_OFFSET (SH1_PIO_WRITE_STATUS_0_ALIAS-SH1_PIO_WRITE_STATUS_0) 14#define ALIAS_OFFSET 8
15 15
16 16
17 .global sn2_ptc_deadlock_recovery_core 17 .global sn2_ptc_deadlock_recovery_core
@@ -36,13 +36,15 @@ sn2_ptc_deadlock_recovery_core:
36 extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address 36 extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address
37 dep piowcphy=-1,piowcphy,63,1 37 dep piowcphy=-1,piowcphy,63,1
38 movl mask=WRITECOUNTMASK 38 movl mask=WRITECOUNTMASK
39 mov r8=r0
39 40
401: 411:
41 add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register 42 add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register
42 mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR 43 ;;
43 st8.rel [scr2]=scr1;; 44 ld8.acq scr1=[scr2];;
44 45
455: ld8.acq scr1=[piowc];; // Wait for PIOs to complete. 465: ld8.acq scr1=[piowc];; // Wait for PIOs to complete.
47 hint @pause
46 and scr2=scr1,mask;; // mask of writecount bits 48 and scr2=scr1,mask;; // mask of writecount bits
47 cmp.ne p6,p0=zeroval,scr2 49 cmp.ne p6,p0=zeroval,scr2
48(p6) br.cond.sptk 5b 50(p6) br.cond.sptk 5b
@@ -57,6 +59,7 @@ sn2_ptc_deadlock_recovery_core:
57 st8.rel [ptc0]=data0 // Write PTC0 & wait for completion. 59 st8.rel [ptc0]=data0 // Write PTC0 & wait for completion.
58 60
595: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. 615: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
62 hint @pause
60 and scr2=scr1,mask;; // mask of writecount bits 63 and scr2=scr1,mask;; // mask of writecount bits
61 cmp.ne p6,p0=zeroval,scr2 64 cmp.ne p6,p0=zeroval,scr2
62(p6) br.cond.sptk 5b;; 65(p6) br.cond.sptk 5b;;
@@ -67,6 +70,7 @@ sn2_ptc_deadlock_recovery_core:
67(p7) st8.rel [ptc1]=data1;; // Now write PTC1. 70(p7) st8.rel [ptc1]=data1;; // Now write PTC1.
68 71
695: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. 725: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
73 hint @pause
70 and scr2=scr1,mask;; // mask of writecount bits 74 and scr2=scr1,mask;; // mask of writecount bits
71 cmp.ne p6,p0=zeroval,scr2 75 cmp.ne p6,p0=zeroval,scr2
72(p6) br.cond.sptk 5b 76(p6) br.cond.sptk 5b
@@ -77,6 +81,7 @@ sn2_ptc_deadlock_recovery_core:
77 srlz.i;; 81 srlz.i;;
78 ////////////// END PHYSICAL MODE //////////////////// 82 ////////////// END PHYSICAL MODE ////////////////////
79 83
84(p8) add r8=1,r8
80(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred. 85(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred.
81 86
82 br.ret.sptk rp 87 br.ret.sptk rp
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 7af05a7ac743..0a4ee50c302f 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -5,7 +5,7 @@
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 7 *
8 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
@@ -20,6 +20,8 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/nodemask.h> 22#include <linux/nodemask.h>
23#include <linux/proc_fs.h>
24#include <linux/seq_file.h>
23 25
24#include <asm/processor.h> 26#include <asm/processor.h>
25#include <asm/irq.h> 27#include <asm/irq.h>
@@ -39,12 +41,120 @@
39#include <asm/sn/nodepda.h> 41#include <asm/sn/nodepda.h>
40#include <asm/sn/rw_mmr.h> 42#include <asm/sn/rw_mmr.h>
41 43
42void sn2_ptc_deadlock_recovery(volatile unsigned long *, unsigned long data0, 44DEFINE_PER_CPU(struct ptc_stats, ptcstats);
43 volatile unsigned long *, unsigned long data1); 45DECLARE_PER_CPU(struct ptc_stats, ptcstats);
44 46
45static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); 47static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
46 48
47static unsigned long sn2_ptc_deadlock_count; 49void sn2_ptc_deadlock_recovery(short *, short, int, volatile unsigned long *, unsigned long data0,
50 volatile unsigned long *, unsigned long data1);
51
52#ifdef DEBUG_PTC
53/*
54 * ptctest:
55 *
56 * xyz - 3 digit hex number:
57 * x - Force PTC purges to use shub:
58 * 0 - no force
59 * 1 - force
60 * y - interupt enable
61 * 0 - disable interrupts
62 * 1 - leave interuupts enabled
63 * z - type of lock:
64 * 0 - global lock
65 * 1 - node local lock
66 * 2 - no lock
67 *
68 * Note: on shub1, only ptctest == 0 is supported. Don't try other values!
69 */
70
71static unsigned int sn2_ptctest = 0;
72
73static int __init ptc_test(char *str)
74{
75 get_option(&str, &sn2_ptctest);
76 return 1;
77}
78__setup("ptctest=", ptc_test);
79
80static inline int ptc_lock(unsigned long *flagp)
81{
82 unsigned long opt = sn2_ptctest & 255;
83
84 switch (opt) {
85 case 0x00:
86 spin_lock_irqsave(&sn2_global_ptc_lock, *flagp);
87 break;
88 case 0x01:
89 spin_lock_irqsave(&sn_nodepda->ptc_lock, *flagp);
90 break;
91 case 0x02:
92 local_irq_save(*flagp);
93 break;
94 case 0x10:
95 spin_lock(&sn2_global_ptc_lock);
96 break;
97 case 0x11:
98 spin_lock(&sn_nodepda->ptc_lock);
99 break;
100 case 0x12:
101 break;
102 default:
103 BUG();
104 }
105 return opt;
106}
107
108static inline void ptc_unlock(unsigned long flags, int opt)
109{
110 switch (opt) {
111 case 0x00:
112 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
113 break;
114 case 0x01:
115 spin_unlock_irqrestore(&sn_nodepda->ptc_lock, flags);
116 break;
117 case 0x02:
118 local_irq_restore(flags);
119 break;
120 case 0x10:
121 spin_unlock(&sn2_global_ptc_lock);
122 break;
123 case 0x11:
124 spin_unlock(&sn_nodepda->ptc_lock);
125 break;
126 case 0x12:
127 break;
128 default:
129 BUG();
130 }
131}
132#else
133
134#define sn2_ptctest 0
135
136static inline int ptc_lock(unsigned long *flagp)
137{
138 spin_lock_irqsave(&sn2_global_ptc_lock, *flagp);
139 return 0;
140}
141
142static inline void ptc_unlock(unsigned long flags, int opt)
143{
144 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
145}
146#endif
147
148struct ptc_stats {
149 unsigned long ptc_l;
150 unsigned long change_rid;
151 unsigned long shub_ptc_flushes;
152 unsigned long nodes_flushed;
153 unsigned long deadlocks;
154 unsigned long lock_itc_clocks;
155 unsigned long shub_itc_clocks;
156 unsigned long shub_itc_clocks_max;
157};
48 158
49static inline unsigned long wait_piowc(void) 159static inline unsigned long wait_piowc(void)
50{ 160{
@@ -89,9 +199,9 @@ void
89sn2_global_tlb_purge(unsigned long start, unsigned long end, 199sn2_global_tlb_purge(unsigned long start, unsigned long end,
90 unsigned long nbits) 200 unsigned long nbits)
91{ 201{
92 int i, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0; 202 int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
93 volatile unsigned long *ptc0, *ptc1; 203 volatile unsigned long *ptc0, *ptc1;
94 unsigned long flags = 0, data0 = 0, data1 = 0; 204 unsigned long itc, itc2, flags, data0 = 0, data1 = 0;
95 struct mm_struct *mm = current->active_mm; 205 struct mm_struct *mm = current->active_mm;
96 short nasids[MAX_NUMNODES], nix; 206 short nasids[MAX_NUMNODES], nix;
97 nodemask_t nodes_flushed; 207 nodemask_t nodes_flushed;
@@ -114,16 +224,19 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
114 start += (1UL << nbits); 224 start += (1UL << nbits);
115 } while (start < end); 225 } while (start < end);
116 ia64_srlz_i(); 226 ia64_srlz_i();
227 __get_cpu_var(ptcstats).ptc_l++;
117 preempt_enable(); 228 preempt_enable();
118 return; 229 return;
119 } 230 }
120 231
121 if (atomic_read(&mm->mm_users) == 1) { 232 if (atomic_read(&mm->mm_users) == 1) {
122 flush_tlb_mm(mm); 233 flush_tlb_mm(mm);
234 __get_cpu_var(ptcstats).change_rid++;
123 preempt_enable(); 235 preempt_enable();
124 return; 236 return;
125 } 237 }
126 238
239 itc = ia64_get_itc();
127 nix = 0; 240 nix = 0;
128 for_each_node_mask(cnode, nodes_flushed) 241 for_each_node_mask(cnode, nodes_flushed)
129 nasids[nix++] = cnodeid_to_nasid(cnode); 242 nasids[nix++] = cnodeid_to_nasid(cnode);
@@ -148,7 +261,12 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
148 261
149 mynasid = get_nasid(); 262 mynasid = get_nasid();
150 263
151 spin_lock_irqsave(&sn2_global_ptc_lock, flags); 264 itc = ia64_get_itc();
265 opt = ptc_lock(&flags);
266 itc2 = ia64_get_itc();
267 __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
268 __get_cpu_var(ptcstats).shub_ptc_flushes++;
269 __get_cpu_var(ptcstats).nodes_flushed += nix;
152 270
153 do { 271 do {
154 if (shub1) 272 if (shub1)
@@ -157,7 +275,7 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
157 data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK); 275 data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
158 for (i = 0; i < nix; i++) { 276 for (i = 0; i < nix; i++) {
159 nasid = nasids[i]; 277 nasid = nasids[i];
160 if (unlikely(nasid == mynasid)) { 278 if ((!(sn2_ptctest & 3)) && unlikely(nasid == mynasid)) {
161 ia64_ptcga(start, nbits << 2); 279 ia64_ptcga(start, nbits << 2);
162 ia64_srlz_i(); 280 ia64_srlz_i();
163 } else { 281 } else {
@@ -169,18 +287,22 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
169 flushed = 1; 287 flushed = 1;
170 } 288 }
171 } 289 }
172
173 if (flushed 290 if (flushed
174 && (wait_piowc() & 291 && (wait_piowc() &
175 SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK)) { 292 (SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK))) {
176 sn2_ptc_deadlock_recovery(ptc0, data0, ptc1, data1); 293 sn2_ptc_deadlock_recovery(nasids, nix, mynasid, ptc0, data0, ptc1, data1);
177 } 294 }
178 295
179 start += (1UL << nbits); 296 start += (1UL << nbits);
180 297
181 } while (start < end); 298 } while (start < end);
182 299
183 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); 300 itc2 = ia64_get_itc() - itc2;
301 __get_cpu_var(ptcstats).shub_itc_clocks += itc2;
302 if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
303 __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
304
305 ptc_unlock(flags, opt);
184 306
185 preempt_enable(); 307 preempt_enable();
186} 308}
@@ -192,31 +314,29 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
192 * TLB flush transaction. The recovery sequence is somewhat tricky & is 314 * TLB flush transaction. The recovery sequence is somewhat tricky & is
193 * coded in assembly language. 315 * coded in assembly language.
194 */ 316 */
195void sn2_ptc_deadlock_recovery(volatile unsigned long *ptc0, unsigned long data0, 317void sn2_ptc_deadlock_recovery(short *nasids, short nix, int mynasid, volatile unsigned long *ptc0, unsigned long data0,
196 volatile unsigned long *ptc1, unsigned long data1) 318 volatile unsigned long *ptc1, unsigned long data1)
197{ 319{
198 extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, 320 extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
199 volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long); 321 volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
200 int cnode, mycnode, nasid; 322 short nasid, i;
201 volatile unsigned long *piows; 323 unsigned long *piows, zeroval;
202 volatile unsigned long zeroval;
203 324
204 sn2_ptc_deadlock_count++; 325 __get_cpu_var(ptcstats).deadlocks++;
205 326
206 piows = pda->pio_write_status_addr; 327 piows = (unsigned long *) pda->pio_write_status_addr;
207 zeroval = pda->pio_write_status_val; 328 zeroval = pda->pio_write_status_val;
208 329
209 mycnode = numa_node_id(); 330 for (i=0; i < nix; i++) {
210 331 nasid = nasids[i];
211 for_each_online_node(cnode) { 332 if (!(sn2_ptctest & 3) && nasid == mynasid)
212 if (is_headless_node(cnode) || cnode == mycnode)
213 continue; 333 continue;
214 nasid = cnodeid_to_nasid(cnode);
215 ptc0 = CHANGE_NASID(nasid, ptc0); 334 ptc0 = CHANGE_NASID(nasid, ptc0);
216 if (ptc1) 335 if (ptc1)
217 ptc1 = CHANGE_NASID(nasid, ptc1); 336 ptc1 = CHANGE_NASID(nasid, ptc1);
218 sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); 337 sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
219 } 338 }
339
220} 340}
221 341
222/** 342/**
@@ -293,3 +413,93 @@ void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
293 413
294 sn_send_IPI_phys(nasid, physid, vector, delivery_mode); 414 sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
295} 415}
416
417#ifdef CONFIG_PROC_FS
418
419#define PTC_BASENAME "sgi_sn/ptc_statistics"
420
421static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
422{
423 if (*offset < NR_CPUS)
424 return offset;
425 return NULL;
426}
427
428static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
429{
430 (*offset)++;
431 if (*offset < NR_CPUS)
432 return offset;
433 return NULL;
434}
435
436static void sn2_ptc_seq_stop(struct seq_file *file, void *data)
437{
438}
439
440static int sn2_ptc_seq_show(struct seq_file *file, void *data)
441{
442 struct ptc_stats *stat;
443 int cpu;
444
445 cpu = *(loff_t *) data;
446
447 if (!cpu) {
448 seq_printf(file, "# ptc_l change_rid shub_ptc_flushes shub_nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max\n");
449 seq_printf(file, "# ptctest %d\n", sn2_ptctest);
450 }
451
452 if (cpu < NR_CPUS && cpu_online(cpu)) {
453 stat = &per_cpu(ptcstats, cpu);
454 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
455 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
456 stat->deadlocks,
457 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
458 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
459 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec);
460 }
461
462 return 0;
463}
464
465static struct seq_operations sn2_ptc_seq_ops = {
466 .start = sn2_ptc_seq_start,
467 .next = sn2_ptc_seq_next,
468 .stop = sn2_ptc_seq_stop,
469 .show = sn2_ptc_seq_show
470};
471
472int sn2_ptc_proc_open(struct inode *inode, struct file *file)
473{
474 return seq_open(file, &sn2_ptc_seq_ops);
475}
476
477static struct file_operations proc_sn2_ptc_operations = {
478 .open = sn2_ptc_proc_open,
479 .read = seq_read,
480 .llseek = seq_lseek,
481 .release = seq_release,
482};
483
484static struct proc_dir_entry *proc_sn2_ptc;
485
486static int __init sn2_ptc_init(void)
487{
488 if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) {
489 printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
490 return -EINVAL;
491 }
492 proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations;
493 spin_lock_init(&sn2_global_ptc_lock);
494 return 0;
495}
496
497static void __exit sn2_ptc_exit(void)
498{
499 remove_proc_entry(PTC_BASENAME, NULL);
500}
501
502module_init(sn2_ptc_init);
503module_exit(sn2_ptc_exit);
504#endif /* CONFIG_PROC_FS */
505