aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/sn/kernel
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/ia64/sn/kernel')
-rw-r--r--arch/ia64/sn/kernel/Makefile12
-rw-r--r--arch/ia64/sn/kernel/bte.c453
-rw-r--r--arch/ia64/sn/kernel/bte_error.c198
-rw-r--r--arch/ia64/sn/kernel/huberror.c201
-rw-r--r--arch/ia64/sn/kernel/idle.c30
-rw-r--r--arch/ia64/sn/kernel/io_init.c411
-rw-r--r--arch/ia64/sn/kernel/iomv.c70
-rw-r--r--arch/ia64/sn/kernel/irq.c431
-rw-r--r--arch/ia64/sn/kernel/klconflib.c108
-rw-r--r--arch/ia64/sn/kernel/machvec.c11
-rw-r--r--arch/ia64/sn/kernel/mca.c135
-rw-r--r--arch/ia64/sn/kernel/setup.c621
-rw-r--r--arch/ia64/sn/kernel/sn2/Makefile13
-rw-r--r--arch/ia64/sn/kernel/sn2/cache.c34
-rw-r--r--arch/ia64/sn/kernel/sn2/io.c101
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c279
-rw-r--r--arch/ia64/sn/kernel/sn2/ptc_deadlock.S82
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c295
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c690
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c149
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c36
-rw-r--r--arch/ia64/sn/kernel/sn2/timer_interrupt.c63
22 files changed, 4423 insertions, 0 deletions
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
new file mode 100644
index 000000000000..6c7f4d9e8ea0
--- /dev/null
+++ b/arch/ia64/sn/kernel/Makefile
@@ -0,0 +1,12 @@
1# arch/ia64/sn/kernel/Makefile
2#
3# This file is subject to the terms and conditions of the GNU General Public
4# License. See the file "COPYING" in the main directory of this archive
5# for more details.
6#
7# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
8#
9
10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
11 huberror.o io_init.o iomv.o klconflib.o sn2/
12obj-$(CONFIG_IA64_GENERIC) += machvec.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
new file mode 100644
index 000000000000..ce0bc4085eae
--- /dev/null
+++ b/arch/ia64/sn/kernel/bte.c
@@ -0,0 +1,453 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <asm/sn/nodepda.h>
12#include <asm/sn/addrs.h>
13#include <asm/sn/arch.h>
14#include <asm/sn/sn_cpuid.h>
15#include <asm/sn/pda.h>
16#include <asm/sn/shubio.h>
17#include <asm/nodedata.h>
18#include <asm/delay.h>
19
20#include <linux/bootmem.h>
21#include <linux/string.h>
22#include <linux/sched.h>
23
24#include <asm/sn/bte.h>
25
26#ifndef L1_CACHE_MASK
27#define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
28#endif
29
30/* two interfaces on two btes */
31#define MAX_INTERFACES_TO_TRY 4
32
33static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
34{
35 nodepda_t *tmp_nodepda;
36
37 tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
38 return &tmp_nodepda->bte_if[interface];
39
40}
41
42/************************************************************************
43 * Block Transfer Engine copy related functions.
44 *
45 ***********************************************************************/
46
47/*
48 * bte_copy(src, dest, len, mode, notification)
49 *
50 * Use the block transfer engine to move kernel memory from src to dest
51 * using the assigned mode.
52 *
53 * Paramaters:
54 * src - physical address of the transfer source.
55 * dest - physical address of the transfer destination.
56 * len - number of bytes to transfer from source to dest.
57 * mode - hardware defined. See reference information
58 * for IBCT0/1 in the SHUB Programmers Reference
59 * notification - kernel virtual address of the notification cache
60 * line. If NULL, the default is used and
61 * the bte_copy is synchronous.
62 *
63 * NOTE: This function requires src, dest, and len to
64 * be cacheline aligned.
65 */
66bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
67{
68 u64 transfer_size;
69 u64 transfer_stat;
70 struct bteinfo_s *bte;
71 bte_result_t bte_status;
72 unsigned long irq_flags;
73 unsigned long itc_end = 0;
74 struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY];
75 int bte_if_index;
76 int bte_pri, bte_sec;
77
78 BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
79 src, dest, len, mode, notification));
80
81 if (len == 0) {
82 return BTE_SUCCESS;
83 }
84
85 BUG_ON((len & L1_CACHE_MASK) ||
86 (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK));
87 BUG_ON(!(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT)));
88
89 /* CPU 0 (per node) tries bte0 first, CPU 1 try bte1 first */
90 if (cpuid_to_subnode(smp_processor_id()) == 0) {
91 bte_pri = 0;
92 bte_sec = 1;
93 } else {
94 bte_pri = 1;
95 bte_sec = 0;
96 }
97
98 if (mode & BTE_USE_DEST) {
99 /* try remote then local */
100 btes_to_try[0] = bte_if_on_node(NASID_GET(dest), bte_pri);
101 btes_to_try[1] = bte_if_on_node(NASID_GET(dest), bte_sec);
102 if (mode & BTE_USE_ANY) {
103 btes_to_try[2] = bte_if_on_node(get_nasid(), bte_pri);
104 btes_to_try[3] = bte_if_on_node(get_nasid(), bte_sec);
105 } else {
106 btes_to_try[2] = NULL;
107 btes_to_try[3] = NULL;
108 }
109 } else {
110 /* try local then remote */
111 btes_to_try[0] = bte_if_on_node(get_nasid(), bte_pri);
112 btes_to_try[1] = bte_if_on_node(get_nasid(), bte_sec);
113 if (mode & BTE_USE_ANY) {
114 btes_to_try[2] = bte_if_on_node(NASID_GET(dest), bte_pri);
115 btes_to_try[3] = bte_if_on_node(NASID_GET(dest), bte_sec);
116 } else {
117 btes_to_try[2] = NULL;
118 btes_to_try[3] = NULL;
119 }
120 }
121
122retry_bteop:
123 do {
124 local_irq_save(irq_flags);
125
126 bte_if_index = 0;
127
128 /* Attempt to lock one of the BTE interfaces. */
129 while (bte_if_index < MAX_INTERFACES_TO_TRY) {
130 bte = btes_to_try[bte_if_index++];
131
132 if (bte == NULL) {
133 continue;
134 }
135
136 if (spin_trylock(&bte->spinlock)) {
137 if (!(*bte->most_rcnt_na & BTE_WORD_AVAILABLE) ||
138 (BTE_LNSTAT_LOAD(bte) & BTE_ACTIVE)) {
139 /* Got the lock but BTE still busy */
140 spin_unlock(&bte->spinlock);
141 } else {
142 /* we got the lock and it's not busy */
143 break;
144 }
145 }
146 bte = NULL;
147 }
148
149 if (bte != NULL) {
150 break;
151 }
152
153 local_irq_restore(irq_flags);
154
155 if (!(mode & BTE_WACQUIRE)) {
156 return BTEFAIL_NOTAVAIL;
157 }
158 } while (1);
159
160 if (notification == NULL) {
161 /* User does not want to be notified. */
162 bte->most_rcnt_na = &bte->notify;
163 } else {
164 bte->most_rcnt_na = notification;
165 }
166
167 /* Calculate the number of cache lines to transfer. */
168 transfer_size = ((len >> L1_CACHE_SHIFT) & BTE_LEN_MASK);
169
170 /* Initialize the notification to a known value. */
171 *bte->most_rcnt_na = BTE_WORD_BUSY;
172
173 /* Set the status reg busy bit and transfer length */
174 BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
175 BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
176
177 /* Set the source and destination registers */
178 BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
179 BTE_SRC_STORE(bte, TO_PHYS(src));
180 BTE_PRINTKV(("IBDA = 0x%lx)\n", (TO_PHYS(dest))));
181 BTE_DEST_STORE(bte, TO_PHYS(dest));
182
183 /* Set the notification register */
184 BTE_PRINTKV(("IBNA = 0x%lx)\n",
185 TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
186 BTE_NOTIF_STORE(bte,
187 TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
188
189 /* Initiate the transfer */
190 BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
191 BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode));
192
193 itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
194
195 spin_unlock_irqrestore(&bte->spinlock, irq_flags);
196
197 if (notification != NULL) {
198 return BTE_SUCCESS;
199 }
200
201 while ((transfer_stat = *bte->most_rcnt_na) == BTE_WORD_BUSY) {
202 if (ia64_get_itc() > itc_end) {
203 BTE_PRINTK(("BTE timeout nasid 0x%x bte%d IBLS = 0x%lx na 0x%lx\n",
204 NASID_GET(bte->bte_base_addr), bte->bte_num,
205 BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na) );
206 bte->bte_error_count++;
207 bte->bh_error = IBLS_ERROR;
208 bte_error_handler((unsigned long)NODEPDA(bte->bte_cnode));
209 *bte->most_rcnt_na = BTE_WORD_AVAILABLE;
210 goto retry_bteop;
211 }
212 }
213
214 BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
215 BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
216
217 if (transfer_stat & IBLS_ERROR) {
218 bte_status = transfer_stat & ~IBLS_ERROR;
219 } else {
220 bte_status = BTE_SUCCESS;
221 }
222 *bte->most_rcnt_na = BTE_WORD_AVAILABLE;
223
224 BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n",
225 BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
226
227 return bte_status;
228}
229
230EXPORT_SYMBOL(bte_copy);
231
232/*
233 * bte_unaligned_copy(src, dest, len, mode)
234 *
235 * use the block transfer engine to move kernel
236 * memory from src to dest using the assigned mode.
237 *
238 * Paramaters:
239 * src - physical address of the transfer source.
240 * dest - physical address of the transfer destination.
241 * len - number of bytes to transfer from source to dest.
242 * mode - hardware defined. See reference information
243 * for IBCT0/1 in the SGI documentation.
244 *
245 * NOTE: If the source, dest, and len are all cache line aligned,
246 * then it would be _FAR_ preferrable to use bte_copy instead.
247 */
248bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
249{
250 int destFirstCacheOffset;
251 u64 headBteSource;
252 u64 headBteLen;
253 u64 headBcopySrcOffset;
254 u64 headBcopyDest;
255 u64 headBcopyLen;
256 u64 footBteSource;
257 u64 footBteLen;
258 u64 footBcopyDest;
259 u64 footBcopyLen;
260 bte_result_t rv;
261 char *bteBlock, *bteBlock_unaligned;
262
263 if (len == 0) {
264 return BTE_SUCCESS;
265 }
266
267 /* temporary buffer used during unaligned transfers */
268 bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES,
269 GFP_KERNEL | GFP_DMA);
270 if (bteBlock_unaligned == NULL) {
271 return BTEFAIL_NOTAVAIL;
272 }
273 bteBlock = (char *)L1_CACHE_ALIGN((u64) bteBlock_unaligned);
274
275 headBcopySrcOffset = src & L1_CACHE_MASK;
276 destFirstCacheOffset = dest & L1_CACHE_MASK;
277
278 /*
279 * At this point, the transfer is broken into
280 * (up to) three sections. The first section is
281 * from the start address to the first physical
282 * cache line, the second is from the first physical
283 * cache line to the last complete cache line,
284 * and the third is from the last cache line to the
285 * end of the buffer. The first and third sections
286 * are handled by bte copying into a temporary buffer
287 * and then bcopy'ing the necessary section into the
288 * final location. The middle section is handled with
289 * a standard bte copy.
290 *
291 * One nasty exception to the above rule is when the
292 * source and destination are not symetrically
293 * mis-aligned. If the source offset from the first
294 * cache line is different from the destination offset,
295 * we make the first section be the entire transfer
296 * and the bcopy the entire block into place.
297 */
298 if (headBcopySrcOffset == destFirstCacheOffset) {
299
300 /*
301 * Both the source and destination are the same
302 * distance from a cache line boundary so we can
303 * use the bte to transfer the bulk of the
304 * data.
305 */
306 headBteSource = src & ~L1_CACHE_MASK;
307 headBcopyDest = dest;
308 if (headBcopySrcOffset) {
309 headBcopyLen =
310 (len >
311 (L1_CACHE_BYTES -
312 headBcopySrcOffset) ? L1_CACHE_BYTES
313 - headBcopySrcOffset : len);
314 headBteLen = L1_CACHE_BYTES;
315 } else {
316 headBcopyLen = 0;
317 headBteLen = 0;
318 }
319
320 if (len > headBcopyLen) {
321 footBcopyLen = (len - headBcopyLen) & L1_CACHE_MASK;
322 footBteLen = L1_CACHE_BYTES;
323
324 footBteSource = src + len - footBcopyLen;
325 footBcopyDest = dest + len - footBcopyLen;
326
327 if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
328 /*
329 * We have two contigous bcopy
330 * blocks. Merge them.
331 */
332 headBcopyLen += footBcopyLen;
333 headBteLen += footBteLen;
334 } else if (footBcopyLen > 0) {
335 rv = bte_copy(footBteSource,
336 ia64_tpa((unsigned long)bteBlock),
337 footBteLen, mode, NULL);
338 if (rv != BTE_SUCCESS) {
339 kfree(bteBlock_unaligned);
340 return rv;
341 }
342
343 memcpy(__va(footBcopyDest),
344 (char *)bteBlock, footBcopyLen);
345 }
346 } else {
347 footBcopyLen = 0;
348 footBteLen = 0;
349 }
350
351 if (len > (headBcopyLen + footBcopyLen)) {
352 /* now transfer the middle. */
353 rv = bte_copy((src + headBcopyLen),
354 (dest +
355 headBcopyLen),
356 (len - headBcopyLen -
357 footBcopyLen), mode, NULL);
358 if (rv != BTE_SUCCESS) {
359 kfree(bteBlock_unaligned);
360 return rv;
361 }
362
363 }
364 } else {
365
366 /*
367 * The transfer is not symetric, we will
368 * allocate a buffer large enough for all the
369 * data, bte_copy into that buffer and then
370 * bcopy to the destination.
371 */
372
373 /* Add the leader from source */
374 headBteLen = len + (src & L1_CACHE_MASK);
375 /* Add the trailing bytes from footer. */
376 headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
377 headBteSource = src & ~L1_CACHE_MASK;
378 headBcopySrcOffset = src & L1_CACHE_MASK;
379 headBcopyDest = dest;
380 headBcopyLen = len;
381 }
382
383 if (headBcopyLen > 0) {
384 rv = bte_copy(headBteSource,
385 ia64_tpa((unsigned long)bteBlock), headBteLen,
386 mode, NULL);
387 if (rv != BTE_SUCCESS) {
388 kfree(bteBlock_unaligned);
389 return rv;
390 }
391
392 memcpy(__va(headBcopyDest), ((char *)bteBlock +
393 headBcopySrcOffset), headBcopyLen);
394 }
395 kfree(bteBlock_unaligned);
396 return BTE_SUCCESS;
397}
398
399EXPORT_SYMBOL(bte_unaligned_copy);
400
401/************************************************************************
402 * Block Transfer Engine initialization functions.
403 *
404 ***********************************************************************/
405
406/*
407 * bte_init_node(nodepda, cnode)
408 *
409 * Initialize the nodepda structure with BTE base addresses and
410 * spinlocks.
411 */
412void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
413{
414 int i;
415
416 /*
417 * Indicate that all the block transfer engines on this node
418 * are available.
419 */
420
421 /*
422 * Allocate one bte_recover_t structure per node. It holds
423 * the recovery lock for node. All the bte interface structures
424 * will point at this one bte_recover structure to get the lock.
425 */
426 spin_lock_init(&mynodepda->bte_recovery_lock);
427 init_timer(&mynodepda->bte_recovery_timer);
428 mynodepda->bte_recovery_timer.function = bte_error_handler;
429 mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
430
431 for (i = 0; i < BTES_PER_NODE; i++) {
432 /* Which link status register should we use? */
433 unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1);
434 mynodepda->bte_if[i].bte_base_addr = (u64 *)
435 REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status);
436
437 /*
438 * Initialize the notification and spinlock
439 * so the first transfer can occur.
440 */
441 mynodepda->bte_if[i].most_rcnt_na =
442 &(mynodepda->bte_if[i].notify);
443 mynodepda->bte_if[i].notify = BTE_WORD_AVAILABLE;
444 spin_lock_init(&mynodepda->bte_if[i].spinlock);
445
446 mynodepda->bte_if[i].bte_cnode = cnode;
447 mynodepda->bte_if[i].bte_error_count = 0;
448 mynodepda->bte_if[i].bte_num = i;
449 mynodepda->bte_if[i].cleanup_active = 0;
450 mynodepda->bte_if[i].bh_error = 0;
451 }
452
453}
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
new file mode 100644
index 000000000000..fd104312c6bd
--- /dev/null
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -0,0 +1,198 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#include <linux/types.h>
10#include <asm/sn/sn_sal.h>
11#include "ioerror.h"
12#include <asm/sn/addrs.h>
13#include <asm/sn/shubio.h>
14#include <asm/sn/geo.h>
15#include "xtalk/xwidgetdev.h"
16#include "xtalk/hubdev.h"
17#include <asm/sn/bte.h>
18#include <asm/param.h>
19
20/*
21 * Bte error handling is done in two parts. The first captures
22 * any crb related errors. Since there can be multiple crbs per
23 * interface and multiple interfaces active, we need to wait until
24 * all active crbs are completed. This is the first job of the
25 * second part error handler. When all bte related CRBs are cleanly
26 * completed, it resets the interfaces and gets them ready for new
27 * transfers to be queued.
28 */
29
30void bte_error_handler(unsigned long);
31
32/*
33 * Wait until all BTE related CRBs are completed
34 * and then reset the interfaces.
35 */
36void bte_error_handler(unsigned long _nodepda)
37{
38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
39 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
40 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
41 nasid_t nasid;
42 int i;
43 int valid_crbs;
44 unsigned long irq_flags;
45 volatile u64 *notify;
46 bte_result_t bh_error;
47 ii_imem_u_t imem; /* II IMEM Register */
48 ii_icrb0_d_u_t icrbd; /* II CRB Register D */
49 ii_ibcr_u_t ibcr;
50 ii_icmr_u_t icmr;
51 ii_ieclr_u_t ieclr;
52
53 BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
54 smp_processor_id()));
55
56 spin_lock_irqsave(recovery_lock, irq_flags);
57
58 if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
59 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
60 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
61 smp_processor_id()));
62 spin_unlock_irqrestore(recovery_lock, irq_flags);
63 return;
64 }
65 /*
66 * Lock all interfaces on this node to prevent new transfers
67 * from being queued.
68 */
69 for (i = 0; i < BTES_PER_NODE; i++) {
70 if (err_nodepda->bte_if[i].cleanup_active) {
71 continue;
72 }
73 spin_lock(&err_nodepda->bte_if[i].spinlock);
74 BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
75 smp_processor_id(), i));
76 err_nodepda->bte_if[i].cleanup_active = 1;
77 }
78
79 /* Determine information about our hub */
80 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
81
82 /*
83 * A BTE transfer can use multiple CRBs. We need to make sure
84 * that all the BTE CRBs are complete (or timed out) before
85 * attempting to clean up the error. Resetting the BTE while
86 * there are still BTE CRBs active will hang the BTE.
87 * We should look at all the CRBs to see if they are allocated
88 * to the BTE and see if they are still active. When none
89 * are active, we can continue with the cleanup.
90 *
91 * We also want to make sure that the local NI port is up.
92 * When a router resets the NI port can go down, while it
93 * goes through the LLP handshake, but then comes back up.
94 */
95 icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
96 if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
97 /*
98 * There are errors which still need to be cleaned up by
99 * hubiio_crb_error_handler
100 */
101 mod_timer(recovery_timer, HZ * 5);
102 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
103 smp_processor_id()));
104 spin_unlock_irqrestore(recovery_lock, irq_flags);
105 return;
106 }
107 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
108
109 valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;
110
111 for (i = 0; i < IIO_NUM_CRBS; i++) {
112 if (!((1 << i) & valid_crbs)) {
113 /* This crb was not marked as valid, ignore */
114 continue;
115 }
116 icrbd.ii_icrb0_d_regval =
117 REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
118 if (icrbd.d_bteop) {
119 mod_timer(recovery_timer, HZ * 5);
120 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
121 err_nodepda, smp_processor_id(),
122 i));
123 spin_unlock_irqrestore(recovery_lock,
124 irq_flags);
125 return;
126 }
127 }
128 }
129
130 BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
131 /* Reenable both bte interfaces */
132 imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
133 imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
134 REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
135
136 /* Clear BTE0/1 error bits */
137 ieclr.ii_ieclr_regval = 0;
138 if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS)
139 ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1;
140 if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS)
141 ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1;
142 REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval);
143
144 /* Reinitialize both BTE state machines. */
145 ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
146 ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
147 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
148
149 for (i = 0; i < BTES_PER_NODE; i++) {
150 bh_error = err_nodepda->bte_if[i].bh_error;
151 if (bh_error != BTE_SUCCESS) {
152 /* There is an error which needs to be notified */
153 notify = err_nodepda->bte_if[i].most_rcnt_na;
154 BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
155 err_nodepda->bte_if[i].bte_cnode,
156 err_nodepda->bte_if[i].bte_num,
157 IBLS_ERROR | (u64) bh_error));
158 *notify = IBLS_ERROR | bh_error;
159 err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
160 }
161
162 err_nodepda->bte_if[i].cleanup_active = 0;
163 BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
164 smp_processor_id(), i));
165 spin_unlock(&err_nodepda->bte_if[i].spinlock);
166 }
167
168 del_timer(recovery_timer);
169
170 spin_unlock_irqrestore(recovery_lock, irq_flags);
171}
172
173/*
174 * First part error handler. This is called whenever any error CRB interrupt
175 * is generated by the II.
176 */
177void
178bte_crb_error_handler(cnodeid_t cnode, int btenum,
179 int crbnum, ioerror_t * ioe, int bteop)
180{
181 struct bteinfo_s *bte;
182
183
184 bte = &(NODEPDA(cnode)->bte_if[btenum]);
185
186 /*
187 * The caller has already figured out the error type, we save that
188 * in the bte handle structure for the thread excercising the
189 * interface to consume.
190 */
191 bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
192 bte->bte_error_count++;
193
194 BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n",
195 bte->bte_cnode, bte->bte_num, ioe->ie_errortype));
196 bte_error_handler((unsigned long) NODEPDA(cnode));
197}
198
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
new file mode 100644
index 000000000000..2bdf684c5066
--- /dev/null
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -0,0 +1,201 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <asm/delay.h>
13#include <asm/sn/sn_sal.h>
14#include "ioerror.h"
15#include <asm/sn/addrs.h>
16#include <asm/sn/shubio.h>
17#include <asm/sn/geo.h>
18#include "xtalk/xwidgetdev.h"
19#include "xtalk/hubdev.h"
20#include <asm/sn/bte.h>
21
22void hubiio_crb_error_handler(struct hubdev_info *hubdev_info);
23extern void bte_crb_error_handler(cnodeid_t, int, int, ioerror_t *,
24 int);
25static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
26{
27 struct hubdev_info *hubdev_info;
28 struct ia64_sal_retval ret_stuff;
29 nasid_t nasid;
30
31 ret_stuff.status = 0;
32 ret_stuff.v0 = 0;
33 hubdev_info = (struct hubdev_info *)arg;
34 nasid = hubdev_info->hdi_nasid;
35 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
36 (u64) nasid, 0, 0, 0, 0, 0, 0);
37
38 if ((int)ret_stuff.v0)
39 panic("hubii_eint_handler(): Fatal TIO Error");
40
41 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
42 (void)hubiio_crb_error_handler(hubdev_info);
43
44 return IRQ_HANDLED;
45}
46
47/*
48 * Free the hub CRB "crbnum" which encountered an error.
49 * Assumption is, error handling was successfully done,
50 * and we now want to return the CRB back to Hub for normal usage.
51 *
52 * In order to free the CRB, all that's needed is to de-allocate it
53 *
54 * Assumption:
55 * No other processor is mucking around with the hub control register.
56 * So, upper layer has to single thread this.
57 */
58void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum)
59{
60 ii_icrb0_b_u_t icrbb;
61
62 /*
63 * The hardware does NOT clear the mark bit, so it must get cleared
64 * here to be sure the error is not processed twice.
65 */
66 icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(hubdev_info->hdi_nasid,
67 IIO_ICRB_B(crbnum));
68 icrbb.b_mark = 0;
69 REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum),
70 icrbb.ii_icrb0_b_regval);
71 /*
72 * Deallocate the register wait till hub indicates it's done.
73 */
74 REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum));
75 while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND)
76 udelay(1);
77
78}
79
80/*
81 * hubiio_crb_error_handler
82 *
83 * This routine gets invoked when a hub gets an error
84 * interrupt. So, the routine is running in interrupt context
85 * at error interrupt level.
86 * Action:
87 * It's responsible for identifying ALL the CRBs that are marked
88 * with error, and process them.
89 *
90 * If you find the CRB that's marked with error, map this to the
91 * reason it caused error, and invoke appropriate error handler.
92 *
93 * XXX Be aware of the information in the context register.
94 *
95 * NOTE:
96 * Use REMOTE_HUB_* macro instead of LOCAL_HUB_* so that the interrupt
97 * handler can be run on any node. (not necessarily the node
98 * corresponding to the hub that encountered error).
99 */
100
101void hubiio_crb_error_handler(struct hubdev_info *hubdev_info)
102{
103 nasid_t nasid;
104 ii_icrb0_a_u_t icrba; /* II CRB Register A */
105 ii_icrb0_b_u_t icrbb; /* II CRB Register B */
106 ii_icrb0_c_u_t icrbc; /* II CRB Register C */
107 ii_icrb0_d_u_t icrbd; /* II CRB Register D */
108 ii_icrb0_e_u_t icrbe; /* II CRB Register D */
109 int i;
110 int num_errors = 0; /* Num of errors handled */
111 ioerror_t ioerror;
112
113 nasid = hubdev_info->hdi_nasid;
114
115 /*
116 * XXX - Add locking for any recovery actions
117 */
118 /*
119 * Scan through all CRBs in the Hub, and handle the errors
120 * in any of the CRBs marked.
121 */
122 for (i = 0; i < IIO_NUM_CRBS; i++) {
123 /* Check this crb entry to see if it is in error. */
124 icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i));
125
126 if (icrbb.b_mark == 0) {
127 continue;
128 }
129
130 icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i));
131
132 IOERROR_INIT(&ioerror);
133
134 /* read other CRB error registers. */
135 icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i));
136 icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
137 icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i));
138
139 IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode);
140
141 /* Check if this error is due to BTE operation,
142 * and handle it separately.
143 */
144 if (icrbd.d_bteop ||
145 ((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 ||
146 icrbb.b_initiator == IIO_ICRB_INIT_BTE1) &&
147 (icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE ||
148 icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) {
149
150 int bte_num;
151
152 if (icrbd.d_bteop)
153 bte_num = icrbc.c_btenum;
154 else /* b_initiator bit 2 gives BTE number */
155 bte_num = (icrbb.b_initiator & 0x4) >> 2;
156
157 hubiio_crb_free(hubdev_info, i);
158
159 bte_crb_error_handler(nasid_to_cnodeid(nasid), bte_num,
160 i, &ioerror, icrbd.d_bteop);
161 num_errors++;
162 continue;
163 }
164 }
165}
166
167/*
168 * Function : hub_error_init
169 * Purpose : initialize the error handling requirements for a given hub.
170 * Parameters : cnode, the compact nodeid.
171 * Assumptions : Called only once per hub, either by a local cpu. Or by a
172 * remote cpu, when this hub is headless.(cpuless)
173 * Returns : None
174 */
175void hub_error_init(struct hubdev_info *hubdev_info)
176{
177 if (request_irq(SGI_II_ERROR, (void *)hub_eint_handler, SA_SHIRQ,
178 "SN_hub_error", (void *)hubdev_info))
179 printk("hub_error_init: Failed to request_irq for 0x%p\n",
180 hubdev_info);
181 return;
182}
183
184
185/*
186 * Function : ice_error_init
187 * Purpose : initialize the error handling requirements for a given tio.
188 * Parameters : cnode, the compact nodeid.
189 * Assumptions : Called only once per tio.
190 * Returns : None
191 */
192void ice_error_init(struct hubdev_info *hubdev_info)
193{
194 if (request_irq
195 (SGI_TIO_ERROR, (void *)hub_eint_handler, SA_SHIRQ, "SN_TIO_error",
196 (void *)hubdev_info))
197 printk("ice_error_init: request_irq() error hubdev_info 0x%p\n",
198 hubdev_info);
199 return;
200}
201
diff --git a/arch/ia64/sn/kernel/idle.c b/arch/ia64/sn/kernel/idle.c
new file mode 100644
index 000000000000..49d178f022b5
--- /dev/null
+++ b/arch/ia64/sn/kernel/idle.c
@@ -0,0 +1,30 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <asm/sn/leds.h>
10
11void snidle(int state)
12{
13 if (state) {
14 if (pda->idle_flag == 0) {
15 /*
16 * Turn the activity LED off.
17 */
18 set_led_bits(0, LED_CPU_ACTIVITY);
19 }
20
21 pda->idle_flag = 1;
22 } else {
23 /*
24 * Turn the activity LED on.
25 */
26 set_led_bits(LED_CPU_ACTIVITY, LED_CPU_ACTIVITY);
27
28 pda->idle_flag = 0;
29 }
30}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
new file mode 100644
index 000000000000..001880812b7c
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -0,0 +1,411 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/bootmem.h>
10#include <linux/nodemask.h>
11#include <asm/sn/types.h>
12#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h>
14#include "pci/pcibus_provider_defs.h"
15#include "pci/pcidev.h"
16#include "pci/pcibr_provider.h"
17#include "xtalk/xwidgetdev.h"
18#include <asm/sn/geo.h>
19#include "xtalk/hubdev.h"
20#include <asm/sn/io.h>
21#include <asm/sn/simulator.h>
22
23char master_baseio_wid;
24nasid_t master_nasid = INVALID_NASID; /* Partition Master */
25
26struct slab_info {
27 struct hubdev_info hubdev;
28};
29
30struct brick {
31 moduleid_t id; /* Module ID of this module */
32 struct slab_info slab_info[MAX_SLABS + 1];
33};
34
35int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */
36
37/*
38 * Retrieve the DMA Flush List given nasid. This list is needed
39 * to implement the WAR - Flush DMA data on PIO Reads.
40 */
41static inline uint64_t
42sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address)
43{
44
45 struct ia64_sal_retval ret_stuff;
46 ret_stuff.status = 0;
47 ret_stuff.v0 = 0;
48
49 SAL_CALL_NOLOCK(ret_stuff,
50 (u64) SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
51 (u64) nasid, (u64) widget_num, (u64) address, 0, 0, 0,
52 0);
53 return ret_stuff.v0;
54
55}
56
57/*
58 * Retrieve the hub device info structure for the given nasid.
59 */
60static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address)
61{
62
63 struct ia64_sal_retval ret_stuff;
64 ret_stuff.status = 0;
65 ret_stuff.v0 = 0;
66
67 SAL_CALL_NOLOCK(ret_stuff,
68 (u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
69 (u64) handle, (u64) address, 0, 0, 0, 0, 0);
70 return ret_stuff.v0;
71}
72
73/*
74 * Retrieve the pci bus information given the bus number.
75 */
76static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
77{
78
79 struct ia64_sal_retval ret_stuff;
80 ret_stuff.status = 0;
81 ret_stuff.v0 = 0;
82
83 SAL_CALL_NOLOCK(ret_stuff,
84 (u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
85 (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
86 return ret_stuff.v0;
87}
88
89/*
90 * Retrieve the pci device information given the bus and device|function number.
91 */
92static inline uint64_t
93sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
94 u64 sn_irq_info)
95{
96 struct ia64_sal_retval ret_stuff;
97 ret_stuff.status = 0;
98 ret_stuff.v0 = 0;
99
100 SAL_CALL_NOLOCK(ret_stuff,
101 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
102 (u64) segment, (u64) bus_number, (u64) devfn,
103 (u64) pci_dev,
104 sn_irq_info, 0, 0);
105 return ret_stuff.v0;
106}
107
108/*
109 * sn_alloc_pci_sysdata() - This routine allocates a pci controller
110 * which is expected as the pci_dev and pci_bus sysdata by the Linux
111 * PCI infrastructure.
112 */
113static inline struct pci_controller *sn_alloc_pci_sysdata(void)
114{
115 struct pci_controller *pci_sysdata;
116
117 pci_sysdata = kmalloc(sizeof(*pci_sysdata), GFP_KERNEL);
118 if (!pci_sysdata)
119 BUG();
120
121 memset(pci_sysdata, 0, sizeof(*pci_sysdata));
122 return pci_sysdata;
123}
124
125/*
126 * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
127 * each node in the system.
128 */
129static void sn_fixup_ionodes(void)
130{
131
132 struct sn_flush_device_list *sn_flush_device_list;
133 struct hubdev_info *hubdev;
134 uint64_t status;
135 uint64_t nasid;
136 int i, widget;
137
138 for (i = 0; i < numionodes; i++) {
139 hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
140 nasid = cnodeid_to_nasid(i);
141 status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev));
142 if (status)
143 continue;
144
145 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
146 hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
147
148 if (!hubdev->hdi_flush_nasid_list.widget_p)
149 continue;
150
151 hubdev->hdi_flush_nasid_list.widget_p =
152 kmalloc((HUB_WIDGET_ID_MAX + 1) *
153 sizeof(struct sn_flush_device_list *), GFP_KERNEL);
154
155 memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
156 (HUB_WIDGET_ID_MAX + 1) *
157 sizeof(struct sn_flush_device_list *));
158
159 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
160 sn_flush_device_list = kmalloc(DEV_PER_WIDGET *
161 sizeof(struct
162 sn_flush_device_list),
163 GFP_KERNEL);
164 memset(sn_flush_device_list, 0x0,
165 DEV_PER_WIDGET *
166 sizeof(struct sn_flush_device_list));
167
168 status =
169 sal_get_widget_dmaflush_list(nasid, widget,
170 (uint64_t)
171 __pa
172 (sn_flush_device_list));
173 if (status) {
174 kfree(sn_flush_device_list);
175 continue;
176 }
177
178 hubdev->hdi_flush_nasid_list.widget_p[widget] =
179 sn_flush_device_list;
180 }
181
182 if (!(i & 1))
183 hub_error_init(hubdev);
184 else
185 ice_error_init(hubdev);
186 }
187
188}
189
190/*
191 * sn_pci_fixup_slot() - This routine sets up a slot's resources
192 * consistent with the Linux PCI abstraction layer. Resources acquired
193 * from our PCI provider include PIO maps to BAR space and interrupt
194 * objects.
195 */
196static void sn_pci_fixup_slot(struct pci_dev *dev)
197{
198 int idx;
199 int segment = 0;
200 uint64_t size;
201 struct sn_irq_info *sn_irq_info;
202 struct pci_dev *host_pci_dev;
203 int status = 0;
204
205 dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
206 if (SN_PCIDEV_INFO(dev) <= 0)
207 BUG(); /* Cannot afford to run out of memory */
208 memset(SN_PCIDEV_INFO(dev), 0, sizeof(struct pcidev_info));
209
210 sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
211 if (sn_irq_info <= 0)
212 BUG(); /* Cannot afford to run out of memory */
213 memset(sn_irq_info, 0, sizeof(struct sn_irq_info));
214
215 /* Call to retrieve pci device information needed by kernel. */
216 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
217 dev->devfn,
218 (u64) __pa(SN_PCIDEV_INFO(dev)),
219 (u64) __pa(sn_irq_info));
220 if (status)
221 BUG(); /* Cannot get platform pci device information information */
222
223 /* Copy over PIO Mapped Addresses */
224 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
225 unsigned long start, end, addr;
226
227 if (!SN_PCIDEV_INFO(dev)->pdi_pio_mapped_addr[idx])
228 continue;
229
230 start = dev->resource[idx].start;
231 end = dev->resource[idx].end;
232 size = end - start;
233 addr = SN_PCIDEV_INFO(dev)->pdi_pio_mapped_addr[idx];
234 addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
235 dev->resource[idx].start = addr;
236 dev->resource[idx].end = addr + size;
237 if (dev->resource[idx].flags & IORESOURCE_IO)
238 dev->resource[idx].parent = &ioport_resource;
239 else
240 dev->resource[idx].parent = &iomem_resource;
241 }
242
243 /* set up host bus linkages */
244 host_pci_dev =
245 pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32,
246 SN_PCIDEV_INFO(dev)->
247 pdi_slot_host_handle & 0xffffffff);
248 SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info =
249 SN_PCIDEV_INFO(host_pci_dev);
250 SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev;
251 SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus);
252
253 /* Only set up IRQ stuff if this device has a host bus context */
254 if (SN_PCIDEV_BUSSOFT(dev) && sn_irq_info->irq_irq) {
255 SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info;
256 dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq;
257 sn_irq_fixup(dev, sn_irq_info);
258 }
259}
260
261/*
262 * sn_pci_controller_fixup() - This routine sets up a bus's resources
263 * consistent with the Linux PCI abstraction layer.
264 */
265static void sn_pci_controller_fixup(int segment, int busnum)
266{
267 int status = 0;
268 int nasid, cnode;
269 struct pci_bus *bus;
270 struct pci_controller *controller;
271 struct pcibus_bussoft *prom_bussoft_ptr;
272 struct hubdev_info *hubdev_info;
273 void *provider_soft;
274
275 status =
276 sal_get_pcibus_info((u64) segment, (u64) busnum,
277 (u64) ia64_tpa(&prom_bussoft_ptr));
278 if (status > 0) {
279 return; /* bus # does not exist */
280 }
281
282 prom_bussoft_ptr = __va(prom_bussoft_ptr);
283 controller = sn_alloc_pci_sysdata();
284 /* controller non-zero is BUG'd in sn_alloc_pci_sysdata */
285
286 bus = pci_scan_bus(busnum, &pci_root_ops, controller);
287 if (bus == NULL) {
288 return; /* error, or bus already scanned */
289 }
290
291 /*
292 * Per-provider fixup. Copies the contents from prom to local
293 * area and links SN_PCIBUS_BUSSOFT().
294 *
295 * Note: Provider is responsible for ensuring that prom_bussoft_ptr
296 * represents an asic-type that it can handle.
297 */
298
299 if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) {
300 return; /* no further fixup necessary */
301 }
302
303 provider_soft = pcibr_bus_fixup(prom_bussoft_ptr);
304 if (provider_soft == NULL) {
305 return; /* fixup failed or not applicable */
306 }
307
308 /*
309 * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
310 * after this point.
311 */
312
313 bus->sysdata = controller;
314 PCI_CONTROLLER(bus)->platform_data = provider_soft;
315
316 nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
317 cnode = nasid_to_cnodeid(nasid);
318 hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
319 SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
320 &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
321}
322
323/*
324 * Ugly hack to get PCI setup until we have a proper ACPI namespace.
325 */
326
327#define PCI_BUSES_TO_SCAN 256
328
329static int __init sn_pci_init(void)
330{
331 int i = 0;
332 struct pci_dev *pci_dev = NULL;
333 extern void sn_init_cpei_timer(void);
334#ifdef CONFIG_PROC_FS
335 extern void register_sn_procfs(void);
336#endif
337
338 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR())
339 return 0;
340
341 /*
342 * This is needed to avoid bounce limit checks in the blk layer
343 */
344 ia64_max_iommu_merge_mask = ~PAGE_MASK;
345 sn_fixup_ionodes();
346 sn_irq = kmalloc(sizeof(struct sn_irq_info *) * NR_IRQS, GFP_KERNEL);
347 if (sn_irq <= 0)
348 BUG(); /* Canno afford to run out of memory. */
349 memset(sn_irq, 0, sizeof(struct sn_irq_info *) * NR_IRQS);
350
351 sn_init_cpei_timer();
352
353#ifdef CONFIG_PROC_FS
354 register_sn_procfs();
355#endif
356
357 for (i = 0; i < PCI_BUSES_TO_SCAN; i++) {
358 sn_pci_controller_fixup(0, i);
359 }
360
361 /*
362 * Generic Linux PCI Layer has created the pci_bus and pci_dev
363 * structures - time for us to add our SN PLatform specific
364 * information.
365 */
366
367 while ((pci_dev =
368 pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
369 sn_pci_fixup_slot(pci_dev);
370 }
371
372 sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
373
374 return 0;
375}
376
377/*
378 * hubdev_init_node() - Creates the HUB data structure and link them to it's
379 * own NODE specific data area.
380 */
381void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
382{
383
384 struct hubdev_info *hubdev_info;
385
386 if (node >= num_online_nodes()) /* Headless/memless IO nodes */
387 hubdev_info =
388 (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0),
389 sizeof(struct
390 hubdev_info));
391 else
392 hubdev_info =
393 (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node),
394 sizeof(struct
395 hubdev_info));
396 npda->pdinfo = (void *)hubdev_info;
397
398}
399
400geoid_t
401cnodeid_get_geoid(cnodeid_t cnode)
402{
403
404 struct hubdev_info *hubdev;
405
406 hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
407 return hubdev->hdi_geoid;
408
409}
410
411subsys_initcall(sn_pci_init);
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
new file mode 100644
index 000000000000..fec6d8b8237b
--- /dev/null
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -0,0 +1,70 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/module.h>
10#include <asm/io.h>
11#include <asm/delay.h>
12#include <asm/sn/nodepda.h>
13#include <asm/sn/simulator.h>
14#include <asm/sn/pda.h>
15#include <asm/sn/sn_cpuid.h>
16#include <asm/sn/shub_mmr.h>
17
18/**
19 * sn_io_addr - convert an in/out port to an i/o address
20 * @port: port to convert
21 *
22 * Legacy in/out instructions are converted to ld/st instructions
23 * on IA64. This routine will convert a port number into a valid
24 * SN i/o address. Used by sn_in*() and sn_out*().
25 */
26void *sn_io_addr(unsigned long port)
27{
28 if (!IS_RUNNING_ON_SIMULATOR()) {
29 /* On sn2, legacy I/O ports don't point at anything */
30 if (port < (64 * 1024))
31 return NULL;
32 return ((void *)(port | __IA64_UNCACHED_OFFSET));
33 } else {
34 /* but the simulator uses them... */
35 unsigned long addr;
36
37 /*
38 * word align port, but need more than 10 bits
39 * for accessing registers in bedrock local block
40 * (so we don't do port&0xfff)
41 */
42 addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12);
43 if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7)
44 addr |= port;
45 return (void *)addr;
46 }
47}
48
49EXPORT_SYMBOL(sn_io_addr);
50
51/**
52 * __sn_mmiowb - I/O space memory barrier
53 *
54 * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl
55 * for details.
56 *
57 * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
58 * See PV 871084 for details about the WAR about zero value.
59 *
60 */
61void __sn_mmiowb(void)
62{
63 volatile unsigned long *adr = pda->pio_write_status_addr;
64 unsigned long val = pda->pio_write_status_val;
65
66 while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)
67 cpu_relax();
68}
69
70EXPORT_SYMBOL(__sn_mmiowb);
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
new file mode 100644
index 000000000000..3be44724f6c8
--- /dev/null
+++ b/arch/ia64/sn/kernel/irq.c
@@ -0,0 +1,431 @@
1/*
2 * Platform dependent support for SGI SN
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
9 */
10
11#include <linux/irq.h>
12#include <asm/sn/intr.h>
13#include <asm/sn/addrs.h>
14#include <asm/sn/arch.h>
15#include "xtalk/xwidgetdev.h"
16#include "pci/pcibus_provider_defs.h"
17#include "pci/pcidev.h"
18#include "pci/pcibr_provider.h"
19#include <asm/sn/shub_mmr.h>
20#include <asm/sn/sn_sal.h>
21
22static void force_interrupt(int irq);
23static void register_intr_pda(struct sn_irq_info *sn_irq_info);
24static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
25
26extern int sn_force_interrupt_flag;
27extern int sn_ioif_inited;
28struct sn_irq_info **sn_irq;
29
30static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
31 u64 sn_irq_info,
32 int req_irq, nasid_t req_nasid,
33 int req_slice)
34{
35 struct ia64_sal_retval ret_stuff;
36 ret_stuff.status = 0;
37 ret_stuff.v0 = 0;
38
39 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
40 (u64) SAL_INTR_ALLOC, (u64) local_nasid,
41 (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
42 (u64) req_nasid, (u64) req_slice);
43 return ret_stuff.status;
44}
45
46static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
47 struct sn_irq_info *sn_irq_info)
48{
49 struct ia64_sal_retval ret_stuff;
50 ret_stuff.status = 0;
51 ret_stuff.v0 = 0;
52
53 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
54 (u64) SAL_INTR_FREE, (u64) local_nasid,
55 (u64) local_widget, (u64) sn_irq_info->irq_irq,
56 (u64) sn_irq_info->irq_cookie, 0, 0);
57}
58
59static unsigned int sn_startup_irq(unsigned int irq)
60{
61 return 0;
62}
63
64static void sn_shutdown_irq(unsigned int irq)
65{
66}
67
68static void sn_disable_irq(unsigned int irq)
69{
70}
71
72static void sn_enable_irq(unsigned int irq)
73{
74}
75
76static void sn_ack_irq(unsigned int irq)
77{
78 uint64_t event_occurred, mask = 0;
79 int nasid;
80
81 irq = irq & 0xff;
82 nasid = get_nasid();
83 event_occurred =
84 HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
85 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
86 mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
87 }
88 if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
89 mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
90 }
91 if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
92 mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
93 }
94 if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
95 mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
96 }
97 HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
98 mask);
99 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
100
101 move_irq(irq);
102}
103
104static void sn_end_irq(unsigned int irq)
105{
106 int nasid;
107 int ivec;
108 uint64_t event_occurred;
109
110 ivec = irq & 0xff;
111 if (ivec == SGI_UART_VECTOR) {
112 nasid = get_nasid();
113 event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
114 (nasid, SH_EVENT_OCCURRED));
115 /* If the UART bit is set here, we may have received an
116 * interrupt from the UART that the driver missed. To
117 * make sure, we IPI ourselves to force us to look again.
118 */
119 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
120 platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
121 IA64_IPI_DM_INT, 0);
122 }
123 }
124 __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
125 if (sn_force_interrupt_flag)
126 force_interrupt(irq);
127}
128
129static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
130{
131 struct sn_irq_info *sn_irq_info = sn_irq[irq];
132 struct sn_irq_info *tmp_sn_irq_info;
133 int cpuid, cpuphys;
134 nasid_t t_nasid; /* nasid to target */
135 int t_slice; /* slice to target */
136
137 /* allocate a temp sn_irq_info struct to get new target info */
138 tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL);
139 if (!tmp_sn_irq_info)
140 return;
141
142 cpuid = first_cpu(mask);
143 cpuphys = cpu_physical_id(cpuid);
144 t_nasid = cpuid_to_nasid(cpuid);
145 t_slice = cpuid_to_slice(cpuid);
146
147 while (sn_irq_info) {
148 int status;
149 int local_widget;
150 uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
151 nasid_t local_nasid = NASID_GET(bridge);
152
153 if (!bridge)
154 break; /* irq is not a device interrupt */
155
156 if (local_nasid & 1)
157 local_widget = TIO_SWIN_WIDGETNUM(bridge);
158 else
159 local_widget = SWIN_WIDGETNUM(bridge);
160
161 /* Free the old PROM sn_irq_info structure */
162 sn_intr_free(local_nasid, local_widget, sn_irq_info);
163
164 /* allocate a new PROM sn_irq_info struct */
165 status = sn_intr_alloc(local_nasid, local_widget,
166 __pa(tmp_sn_irq_info), irq, t_nasid,
167 t_slice);
168
169 if (status == 0) {
170 /* Update kernels sn_irq_info with new target info */
171 unregister_intr_pda(sn_irq_info);
172 sn_irq_info->irq_cpuid = cpuid;
173 sn_irq_info->irq_nasid = t_nasid;
174 sn_irq_info->irq_slice = t_slice;
175 sn_irq_info->irq_xtalkaddr =
176 tmp_sn_irq_info->irq_xtalkaddr;
177 sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie;
178 register_intr_pda(sn_irq_info);
179
180 if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) {
181 pcibr_change_devices_irq(sn_irq_info);
182 }
183
184 sn_irq_info = sn_irq_info->irq_next;
185
186#ifdef CONFIG_SMP
187 set_irq_affinity_info((irq & 0xff), cpuphys, 0);
188#endif
189 } else {
190 break; /* snp_affinity failed the intr_alloc */
191 }
192 }
193 kfree(tmp_sn_irq_info);
194}
195
196struct hw_interrupt_type irq_type_sn = {
197 "SN hub",
198 sn_startup_irq,
199 sn_shutdown_irq,
200 sn_enable_irq,
201 sn_disable_irq,
202 sn_ack_irq,
203 sn_end_irq,
204 sn_set_affinity_irq
205};
206
207unsigned int sn_local_vector_to_irq(u8 vector)
208{
209 return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
210}
211
212void sn_irq_init(void)
213{
214 int i;
215 irq_desc_t *base_desc = irq_desc;
216
217 for (i = 0; i < NR_IRQS; i++) {
218 if (base_desc[i].handler == &no_irq_type) {
219 base_desc[i].handler = &irq_type_sn;
220 }
221 }
222}
223
224static void register_intr_pda(struct sn_irq_info *sn_irq_info)
225{
226 int irq = sn_irq_info->irq_irq;
227 int cpu = sn_irq_info->irq_cpuid;
228
229 if (pdacpu(cpu)->sn_last_irq < irq) {
230 pdacpu(cpu)->sn_last_irq = irq;
231 }
232
233 if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
234 pdacpu(cpu)->sn_first_irq = irq;
235 }
236}
237
238static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
239{
240 int irq = sn_irq_info->irq_irq;
241 int cpu = sn_irq_info->irq_cpuid;
242 struct sn_irq_info *tmp_irq_info;
243 int i, foundmatch;
244
245 if (pdacpu(cpu)->sn_last_irq == irq) {
246 foundmatch = 0;
247 for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) {
248 tmp_irq_info = sn_irq[i];
249 while (tmp_irq_info) {
250 if (tmp_irq_info->irq_cpuid == cpu) {
251 foundmatch++;
252 break;
253 }
254 tmp_irq_info = tmp_irq_info->irq_next;
255 }
256 if (foundmatch) {
257 break;
258 }
259 }
260 pdacpu(cpu)->sn_last_irq = i;
261 }
262
263 if (pdacpu(cpu)->sn_first_irq == irq) {
264 foundmatch = 0;
265 for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) {
266 tmp_irq_info = sn_irq[i];
267 while (tmp_irq_info) {
268 if (tmp_irq_info->irq_cpuid == cpu) {
269 foundmatch++;
270 break;
271 }
272 tmp_irq_info = tmp_irq_info->irq_next;
273 }
274 if (foundmatch) {
275 break;
276 }
277 }
278 pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
279 }
280}
281
282struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq,
283 nasid_t nasid, int slice)
284{
285 struct sn_irq_info *sn_irq_info;
286 int status;
287
288 sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL);
289 if (sn_irq_info == NULL)
290 return NULL;
291
292 memset(sn_irq_info, 0x0, sizeof(*sn_irq_info));
293
294 status =
295 sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq,
296 nasid, slice);
297
298 if (status) {
299 kfree(sn_irq_info);
300 return NULL;
301 } else {
302 return sn_irq_info;
303 }
304}
305
306void sn_irq_free(struct sn_irq_info *sn_irq_info)
307{
308 uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
309 nasid_t local_nasid = NASID_GET(bridge);
310 int local_widget;
311
312 if (local_nasid & 1) /* tio check */
313 local_widget = TIO_SWIN_WIDGETNUM(bridge);
314 else
315 local_widget = SWIN_WIDGETNUM(bridge);
316
317 sn_intr_free(local_nasid, local_widget, sn_irq_info);
318
319 kfree(sn_irq_info);
320}
321
322void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
323{
324 nasid_t nasid = sn_irq_info->irq_nasid;
325 int slice = sn_irq_info->irq_slice;
326 int cpu = nasid_slice_to_cpuid(nasid, slice);
327
328 sn_irq_info->irq_cpuid = cpu;
329 sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
330
331 /* link it into the sn_irq[irq] list */
332 sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq];
333 sn_irq[sn_irq_info->irq_irq] = sn_irq_info;
334
335 (void)register_intr_pda(sn_irq_info);
336}
337
338static void force_interrupt(int irq)
339{
340 struct sn_irq_info *sn_irq_info;
341
342 if (!sn_ioif_inited)
343 return;
344 sn_irq_info = sn_irq[irq];
345 while (sn_irq_info) {
346 if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
347 (sn_irq_info->irq_bridge != NULL)) {
348 pcibr_force_interrupt(sn_irq_info);
349 }
350 sn_irq_info = sn_irq_info->irq_next;
351 }
352}
353
354/*
355 * Check for lost interrupts. If the PIC int_status reg. says that
356 * an interrupt has been sent, but not handled, and the interrupt
357 * is not pending in either the cpu irr regs or in the soft irr regs,
358 * and the interrupt is not in service, then the interrupt may have
359 * been lost. Force an interrupt on that pin. It is possible that
360 * the interrupt is in flight, so we may generate a spurious interrupt,
361 * but we should never miss a real lost interrupt.
362 */
363static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
364{
365 uint64_t regval;
366 int irr_reg_num;
367 int irr_bit;
368 uint64_t irr_reg;
369 struct pcidev_info *pcidev_info;
370 struct pcibus_info *pcibus_info;
371
372 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
373 if (!pcidev_info)
374 return;
375
376 pcibus_info =
377 (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
378 pdi_pcibus_info;
379 regval = pcireg_intr_status_get(pcibus_info);
380
381 irr_reg_num = irq_to_vector(irq) / 64;
382 irr_bit = irq_to_vector(irq) % 64;
383 switch (irr_reg_num) {
384 case 0:
385 irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
386 break;
387 case 1:
388 irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
389 break;
390 case 2:
391 irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
392 break;
393 case 3:
394 irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
395 break;
396 }
397 if (!test_bit(irr_bit, &irr_reg)) {
398 if (!test_bit(irq, pda->sn_soft_irr)) {
399 if (!test_bit(irq, pda->sn_in_service_ivecs)) {
400 regval &= 0xff;
401 if (sn_irq_info->irq_int_bit & regval &
402 sn_irq_info->irq_last_intr) {
403 regval &=
404 ~(sn_irq_info->
405 irq_int_bit & regval);
406 pcibr_force_interrupt(sn_irq_info);
407 }
408 }
409 }
410 }
411 sn_irq_info->irq_last_intr = regval;
412}
413
414void sn_lb_int_war_check(void)
415{
416 int i;
417
418 if (!sn_ioif_inited || pda->sn_first_irq == 0)
419 return;
420 for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
421 struct sn_irq_info *sn_irq_info = sn_irq[i];
422 while (sn_irq_info) {
423 /* Only call for PCI bridges that are fully initialized. */
424 if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
425 (sn_irq_info->irq_bridge != NULL)) {
426 sn_check_intr(i, sn_irq_info);
427 }
428 sn_irq_info = sn_irq_info->irq_next;
429 }
430 }
431}
diff --git a/arch/ia64/sn/kernel/klconflib.c b/arch/ia64/sn/kernel/klconflib.c
new file mode 100644
index 000000000000..0f11a3299cd2
--- /dev/null
+++ b/arch/ia64/sn/kernel/klconflib.c
@@ -0,0 +1,108 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/ctype.h>
11#include <linux/string.h>
12#include <linux/kernel.h>
13#include <asm/sn/types.h>
14#include <asm/sn/module.h>
15#include <asm/sn/l1.h>
16
17char brick_types[MAX_BRICK_TYPES + 1] = "cri.xdpn%#=vo^kjbf890123456789...";
18/*
19 * Format a module id for printing.
20 *
21 * There are three possible formats:
22 *
23 * MODULE_FORMAT_BRIEF is the brief 6-character format, including
24 * the actual brick-type as recorded in the
25 * moduleid_t, eg. 002c15 for a C-brick, or
26 * 101#17 for a PX-brick.
27 *
28 * MODULE_FORMAT_LONG is the hwgraph format, eg. rack/002/bay/15
29 * of rack/101/bay/17 (note that the brick
30 * type does not appear in this format).
31 *
32 * MODULE_FORMAT_LCD is like MODULE_FORMAT_BRIEF, except that it
33 * ensures that the module id provided appears
34 * exactly as it would on the LCD display of
35 * the corresponding brick, eg. still 002c15
36 * for a C-brick, but 101p17 for a PX-brick.
37 *
38 * maule (9/13/04): Removed top-level check for (fmt == MODULE_FORMAT_LCD)
39 * making MODULE_FORMAT_LCD equivalent to MODULE_FORMAT_BRIEF. It was
40 * decided that all callers should assume the returned string should be what
41 * is displayed on the brick L1 LCD.
42 */
43void
44format_module_id(char *buffer, moduleid_t m, int fmt)
45{
46 int rack, position;
47 unsigned char brickchar;
48
49 rack = MODULE_GET_RACK(m);
50 brickchar = MODULE_GET_BTCHAR(m);
51
52 /* Be sure we use the same brick type character as displayed
53 * on the brick's LCD
54 */
55 switch (brickchar)
56 {
57 case L1_BRICKTYPE_GA:
58 case L1_BRICKTYPE_OPUS_TIO:
59 brickchar = L1_BRICKTYPE_C;
60 break;
61
62 case L1_BRICKTYPE_PX:
63 case L1_BRICKTYPE_PE:
64 case L1_BRICKTYPE_PA:
65 case L1_BRICKTYPE_SA: /* we can move this to the "I's" later
66 * if that makes more sense
67 */
68 brickchar = L1_BRICKTYPE_P;
69 break;
70
71 case L1_BRICKTYPE_IX:
72 case L1_BRICKTYPE_IA:
73
74 brickchar = L1_BRICKTYPE_I;
75 break;
76 }
77
78 position = MODULE_GET_BPOS(m);
79
80 if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
81 /* Brief module number format, eg. 002c15 */
82
83 /* Decompress the rack number */
84 *buffer++ = '0' + RACK_GET_CLASS(rack);
85 *buffer++ = '0' + RACK_GET_GROUP(rack);
86 *buffer++ = '0' + RACK_GET_NUM(rack);
87
88 /* Add the brick type */
89 *buffer++ = brickchar;
90 }
91 else if (fmt == MODULE_FORMAT_LONG) {
92 /* Fuller hwgraph format, eg. rack/002/bay/15 */
93
94 strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
95
96 *buffer++ = '0' + RACK_GET_CLASS(rack);
97 *buffer++ = '0' + RACK_GET_GROUP(rack);
98 *buffer++ = '0' + RACK_GET_NUM(rack);
99
100 strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
101 }
102
103 /* Add the bay position, using at least two digits */
104 if (position < 10)
105 *buffer++ = '0';
106 sprintf(buffer, "%d", position);
107
108}
diff --git a/arch/ia64/sn/kernel/machvec.c b/arch/ia64/sn/kernel/machvec.c
new file mode 100644
index 000000000000..02bb9155840c
--- /dev/null
+++ b/arch/ia64/sn/kernel/machvec.c
@@ -0,0 +1,11 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#define MACHVEC_PLATFORM_NAME sn2
10#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h>
11#include <asm/machvec_init.h>
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
new file mode 100644
index 000000000000..857774bb2c9a
--- /dev/null
+++ b/arch/ia64/sn/kernel/mca.c
@@ -0,0 +1,135 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/timer.h>
12#include <linux/vmalloc.h>
13#include <asm/mca.h>
14#include <asm/sal.h>
15#include <asm/sn/sn_sal.h>
16
17/*
18 * Interval for calling SAL to poll for errors that do NOT cause error
19 * interrupts. SAL will raise a CPEI if any errors are present that
20 * need to be logged.
21 */
22#define CPEI_INTERVAL (5*HZ)
23
24struct timer_list sn_cpei_timer;
25void sn_init_cpei_timer(void);
26
27/* Printing oemdata from mca uses data that is not passed through SAL, it is
28 * global. Only one user at a time.
29 */
30static DECLARE_MUTEX(sn_oemdata_mutex);
31static u8 **sn_oemdata;
32static u64 *sn_oemdata_size, sn_oemdata_bufsize;
33
34/*
35 * print_hook
36 *
37 * This function is the callback routine that SAL calls to log error
38 * info for platform errors. buf is appended to sn_oemdata, resizing as
39 * required.
40 */
41static int print_hook(const char *fmt, ...)
42{
43 char buf[400];
44 int len;
45 va_list args;
46 va_start(args, fmt);
47 vsnprintf(buf, sizeof(buf), fmt, args);
48 va_end(args);
49 len = strlen(buf);
50 while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) {
51 u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000);
52 if (!newbuf) {
53 printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
54 __FUNCTION__);
55 return 0;
56 }
57 memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
58 vfree(*sn_oemdata);
59 *sn_oemdata = newbuf;
60 }
61 memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1);
62 *sn_oemdata_size += len;
63 return 0;
64}
65
66static void sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
67{
68 /*
69 * this function's sole purpose is to call SAL when we receive
70 * a CE interrupt from SHUB or when the timer routine decides
71 * we need to call SAL to check for CEs.
72 */
73
74 /* CALL SAL_LOG_CE */
75
76 ia64_sn_plat_cpei_handler();
77}
78
79static void sn_cpei_timer_handler(unsigned long dummy)
80{
81 sn_cpei_handler(-1, NULL, NULL);
82 mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL);
83}
84
85void sn_init_cpei_timer(void)
86{
87 init_timer(&sn_cpei_timer);
88 sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
89 sn_cpei_timer.function = sn_cpei_timer_handler;
90 add_timer(&sn_cpei_timer);
91}
92
93static int
94sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
95 u64 * oemdata_size)
96{
97 down(&sn_oemdata_mutex);
98 sn_oemdata = oemdata;
99 sn_oemdata_size = oemdata_size;
100 sn_oemdata_bufsize = 0;
101 ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
102 up(&sn_oemdata_mutex);
103 return 0;
104}
105
106/* Callback when userspace salinfo wants to decode oem data via the platform
107 * kernel and/or prom.
108 */
109int sn_salinfo_platform_oemdata(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size)
110{
111 efi_guid_t guid = *(efi_guid_t *)sect_header;
112 int valid = 0;
113 *oemdata_size = 0;
114 vfree(*oemdata);
115 *oemdata = NULL;
116 if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0) {
117 sal_log_plat_specific_err_info_t *psei = (sal_log_plat_specific_err_info_t *)sect_header;
118 valid = psei->valid.oem_data;
119 } else if (efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0) {
120 sal_log_mem_dev_err_info_t *mdei = (sal_log_mem_dev_err_info_t *)sect_header;
121 valid = mdei->valid.oem_data;
122 }
123 if (valid)
124 return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size);
125 else
126 return 0;
127}
128
129static int __init sn_salinfo_init(void)
130{
131 salinfo_platform_oemdata = &sn_salinfo_platform_oemdata;
132 return 0;
133}
134
135module_init(sn_salinfo_init)
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
new file mode 100644
index 000000000000..f0306b516afb
--- /dev/null
+++ b/arch/ia64/sn/kernel/setup.c
@@ -0,0 +1,621 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/delay.h>
13#include <linux/kernel.h>
14#include <linux/kdev_t.h>
15#include <linux/string.h>
16#include <linux/tty.h>
17#include <linux/console.h>
18#include <linux/timex.h>
19#include <linux/sched.h>
20#include <linux/ioport.h>
21#include <linux/mm.h>
22#include <linux/serial.h>
23#include <linux/irq.h>
24#include <linux/bootmem.h>
25#include <linux/mmzone.h>
26#include <linux/interrupt.h>
27#include <linux/acpi.h>
28#include <linux/compiler.h>
29#include <linux/sched.h>
30#include <linux/root_dev.h>
31#include <linux/nodemask.h>
32
33#include <asm/io.h>
34#include <asm/sal.h>
35#include <asm/machvec.h>
36#include <asm/system.h>
37#include <asm/processor.h>
38#include <asm/sn/arch.h>
39#include <asm/sn/addrs.h>
40#include <asm/sn/pda.h>
41#include <asm/sn/nodepda.h>
42#include <asm/sn/sn_cpuid.h>
43#include <asm/sn/simulator.h>
44#include <asm/sn/leds.h>
45#include <asm/sn/bte.h>
46#include <asm/sn/shub_mmr.h>
47#include <asm/sn/clksupport.h>
48#include <asm/sn/sn_sal.h>
49#include <asm/sn/geo.h>
50#include "xtalk/xwidgetdev.h"
51#include "xtalk/hubdev.h"
52#include <asm/sn/klconfig.h>
53
54
55DEFINE_PER_CPU(struct pda_s, pda_percpu);
56
57#define MAX_PHYS_MEMORY (1UL << 49) /* 1 TB */
58
59lboard_t *root_lboard[MAX_COMPACT_NODES];
60
61extern void bte_init_node(nodepda_t *, cnodeid_t);
62
63extern void sn_timer_init(void);
64extern unsigned long last_time_offset;
65extern void (*ia64_mark_idle) (int);
66extern void snidle(int);
67extern unsigned char acpi_kbd_controller_present;
68
69unsigned long sn_rtc_cycles_per_second;
70EXPORT_SYMBOL(sn_rtc_cycles_per_second);
71
72DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
73EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
74
75partid_t sn_partid = -1;
76EXPORT_SYMBOL(sn_partid);
77char sn_system_serial_number_string[128];
78EXPORT_SYMBOL(sn_system_serial_number_string);
79u64 sn_partition_serial_number;
80EXPORT_SYMBOL(sn_partition_serial_number);
81u8 sn_partition_id;
82EXPORT_SYMBOL(sn_partition_id);
83u8 sn_system_size;
84EXPORT_SYMBOL(sn_system_size);
85u8 sn_sharing_domain_size;
86EXPORT_SYMBOL(sn_sharing_domain_size);
87u8 sn_coherency_id;
88EXPORT_SYMBOL(sn_coherency_id);
89u8 sn_region_size;
90EXPORT_SYMBOL(sn_region_size);
91
92short physical_node_map[MAX_PHYSNODE_ID];
93
94EXPORT_SYMBOL(physical_node_map);
95
96int numionodes;
97
98static void sn_init_pdas(char **);
99static void scan_for_ionodes(void);
100
101static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
102
103/*
104 * The format of "screen_info" is strange, and due to early i386-setup
105 * code. This is just enough to make the console code think we're on a
106 * VGA color display.
107 */
108struct screen_info sn_screen_info = {
109 .orig_x = 0,
110 .orig_y = 0,
111 .orig_video_mode = 3,
112 .orig_video_cols = 80,
113 .orig_video_ega_bx = 3,
114 .orig_video_lines = 25,
115 .orig_video_isVGA = 1,
116 .orig_video_points = 16
117};
118
119/*
120 * This is here so we can use the CMOS detection in ide-probe.c to
121 * determine what drives are present. In theory, we don't need this
122 * as the auto-detection could be done via ide-probe.c:do_probe() but
123 * in practice that would be much slower, which is painful when
124 * running in the simulator. Note that passing zeroes in DRIVE_INFO
125 * is sufficient (the IDE driver will autodetect the drive geometry).
126 */
127#ifdef CONFIG_IA64_GENERIC
128extern char drive_info[4 * 16];
129#else
130char drive_info[4 * 16];
131#endif
132
133/*
134 * Get nasid of current cpu early in boot before nodepda is initialized
135 */
136static int
137boot_get_nasid(void)
138{
139 int nasid;
140
141 if (ia64_sn_get_sapic_info(get_sapicid(), &nasid, NULL, NULL))
142 BUG();
143 return nasid;
144}
145
146/*
147 * This routine can only be used during init, since
148 * smp_boot_data is an init data structure.
149 * We have to use smp_boot_data.cpu_phys_id to find
150 * the physical id of the processor because the normal
151 * cpu_physical_id() relies on data structures that
152 * may not be initialized yet.
153 */
154
155static int __init pxm_to_nasid(int pxm)
156{
157 int i;
158 int nid;
159
160 nid = pxm_to_nid_map[pxm];
161 for (i = 0; i < num_node_memblks; i++) {
162 if (node_memblk[i].nid == nid) {
163 return NASID_GET(node_memblk[i].start_paddr);
164 }
165 }
166 return -1;
167}
168
169/**
170 * early_sn_setup - early setup routine for SN platforms
171 *
172 * Sets up an initial console to aid debugging. Intended primarily
173 * for bringup. See start_kernel() in init/main.c.
174 */
175
176void __init early_sn_setup(void)
177{
178 efi_system_table_t *efi_systab;
179 efi_config_table_t *config_tables;
180 struct ia64_sal_systab *sal_systab;
181 struct ia64_sal_desc_entry_point *ep;
182 char *p;
183 int i, j;
184
185 /*
186 * Parse enough of the SAL tables to locate the SAL entry point. Since, console
187 * IO on SN2 is done via SAL calls, early_printk won't work without this.
188 *
189 * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
190 * Any changes to those file may have to be made hereas well.
191 */
192 efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
193 config_tables = __va(efi_systab->tables);
194 for (i = 0; i < efi_systab->nr_tables; i++) {
195 if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
196 0) {
197 sal_systab = __va(config_tables[i].table);
198 p = (char *)(sal_systab + 1);
199 for (j = 0; j < sal_systab->entry_count; j++) {
200 if (*p == SAL_DESC_ENTRY_POINT) {
201 ep = (struct ia64_sal_desc_entry_point
202 *)p;
203 ia64_sal_handler_init(__va
204 (ep->sal_proc),
205 __va(ep->gp));
206 return;
207 }
208 p += SAL_DESC_SIZE(*p);
209 }
210 }
211 }
212 /* Uh-oh, SAL not available?? */
213 printk(KERN_ERR "failed to find SAL entry point\n");
214}
215
216extern int platform_intr_list[];
217extern nasid_t master_nasid;
218static int shub_1_1_found __initdata;
219
220/*
221 * sn_check_for_wars
222 *
223 * Set flag for enabling shub specific wars
224 */
225
226static inline int __init is_shub_1_1(int nasid)
227{
228 unsigned long id;
229 int rev;
230
231 if (is_shub2())
232 return 0;
233 id = REMOTE_HUB_L(nasid, SH1_SHUB_ID);
234 rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT;
235 return rev <= 2;
236}
237
238static void __init sn_check_for_wars(void)
239{
240 int cnode;
241
242 if (is_shub2()) {
243 /* none yet */
244 } else {
245 for_each_online_node(cnode) {
246 if (is_shub_1_1(cnodeid_to_nasid(cnode)))
247 sn_hub_info->shub_1_1_found = 1;
248 }
249 }
250}
251
252/**
253 * sn_setup - SN platform setup routine
254 * @cmdline_p: kernel command line
255 *
256 * Handles platform setup for SN machines. This includes determining
257 * the RTC frequency (via a SAL call), initializing secondary CPUs, and
258 * setting up per-node data areas. The console is also initialized here.
259 */
260void __init sn_setup(char **cmdline_p)
261{
262 long status, ticks_per_sec, drift;
263 int pxm;
264 int major = sn_sal_rev_major(), minor = sn_sal_rev_minor();
265 extern void sn_cpu_init(void);
266
267 /*
268 * If the generic code has enabled vga console support - lets
269 * get rid of it again. This is a kludge for the fact that ACPI
270 * currtently has no way of informing us if legacy VGA is available
271 * or not.
272 */
273#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
274 if (conswitchp == &vga_con) {
275 printk(KERN_DEBUG "SGI: Disabling VGA console\n");
276#ifdef CONFIG_DUMMY_CONSOLE
277 conswitchp = &dummy_con;
278#else
279 conswitchp = NULL;
280#endif /* CONFIG_DUMMY_CONSOLE */
281 }
282#endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
283
284 MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
285
286 memset(physical_node_map, -1, sizeof(physical_node_map));
287 for (pxm = 0; pxm < MAX_PXM_DOMAINS; pxm++)
288 if (pxm_to_nid_map[pxm] != -1)
289 physical_node_map[pxm_to_nasid(pxm)] =
290 pxm_to_nid_map[pxm];
291
292 /*
293 * Old PROMs do not provide an ACPI FADT. Disable legacy keyboard
294 * support here so we don't have to listen to failed keyboard probe
295 * messages.
296 */
297 if ((major < 2 || (major == 2 && minor <= 9)) &&
298 acpi_kbd_controller_present) {
299 printk(KERN_INFO "Disabling legacy keyboard support as prom "
300 "is too old and doesn't provide FADT\n");
301 acpi_kbd_controller_present = 0;
302 }
303
304 printk("SGI SAL version %x.%02x\n", major, minor);
305
306 /*
307 * Confirm the SAL we're running on is recent enough...
308 */
309 if ((major < SN_SAL_MIN_MAJOR) || (major == SN_SAL_MIN_MAJOR &&
310 minor < SN_SAL_MIN_MINOR)) {
311 printk(KERN_ERR "This kernel needs SGI SAL version >= "
312 "%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR);
313 panic("PROM version too old\n");
314 }
315
316 master_nasid = boot_get_nasid();
317
318 status =
319 ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
320 &drift);
321 if (status != 0 || ticks_per_sec < 100000) {
322 printk(KERN_WARNING
323 "unable to determine platform RTC clock frequency, guessing.\n");
324 /* PROM gives wrong value for clock freq. so guess */
325 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
326 } else
327 sn_rtc_cycles_per_second = ticks_per_sec;
328
329 platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
330
331 /*
332 * we set the default root device to /dev/hda
333 * to make simulation easy
334 */
335 ROOT_DEV = Root_HDA1;
336
337 /*
338 * Create the PDAs and NODEPDAs for all the cpus.
339 */
340 sn_init_pdas(cmdline_p);
341
342 ia64_mark_idle = &snidle;
343
344 /*
345 * For the bootcpu, we do this here. All other cpus will make the
346 * call as part of cpu_init in slave cpu initialization.
347 */
348 sn_cpu_init();
349
350#ifdef CONFIG_SMP
351 init_smp_config();
352#endif
353 screen_info = sn_screen_info;
354
355 sn_timer_init();
356}
357
358/**
359 * sn_init_pdas - setup node data areas
360 *
361 * One time setup for Node Data Area. Called by sn_setup().
362 */
363static void __init sn_init_pdas(char **cmdline_p)
364{
365 cnodeid_t cnode;
366
367 memset(pda->cnodeid_to_nasid_table, -1,
368 sizeof(pda->cnodeid_to_nasid_table));
369 for_each_online_node(cnode)
370 pda->cnodeid_to_nasid_table[cnode] =
371 pxm_to_nasid(nid_to_pxm_map[cnode]);
372
373 numionodes = num_online_nodes();
374 scan_for_ionodes();
375
376 /*
377 * Allocate & initalize the nodepda for each node.
378 */
379 for_each_online_node(cnode) {
380 nodepdaindr[cnode] =
381 alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
382 memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
383 memset(nodepdaindr[cnode]->phys_cpuid, -1,
384 sizeof(nodepdaindr[cnode]->phys_cpuid));
385 }
386
387 /*
388 * Allocate & initialize nodepda for TIOs. For now, put them on node 0.
389 */
390 for (cnode = num_online_nodes(); cnode < numionodes; cnode++) {
391 nodepdaindr[cnode] =
392 alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
393 memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
394 }
395
396 /*
397 * Now copy the array of nodepda pointers to each nodepda.
398 */
399 for (cnode = 0; cnode < numionodes; cnode++)
400 memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
401 sizeof(nodepdaindr));
402
403 /*
404 * Set up IO related platform-dependent nodepda fields.
405 * The following routine actually sets up the hubinfo struct
406 * in nodepda.
407 */
408 for_each_online_node(cnode) {
409 bte_init_node(nodepdaindr[cnode], cnode);
410 }
411
412 /*
413 * Initialize the per node hubdev. This includes IO Nodes and
414 * headless/memless nodes.
415 */
416 for (cnode = 0; cnode < numionodes; cnode++) {
417 hubdev_init_node(nodepdaindr[cnode], cnode);
418 }
419}
420
421/**
422 * sn_cpu_init - initialize per-cpu data areas
423 * @cpuid: cpuid of the caller
424 *
425 * Called during cpu initialization on each cpu as it starts.
426 * Currently, initializes the per-cpu data area for SNIA.
427 * Also sets up a few fields in the nodepda. Also known as
428 * platform_cpu_init() by the ia64 machvec code.
429 */
430void __init sn_cpu_init(void)
431{
432 int cpuid;
433 int cpuphyid;
434 int nasid;
435 int subnode;
436 int slice;
437 int cnode;
438 int i;
439 static int wars_have_been_checked;
440
441 memset(pda, 0, sizeof(pda));
442 if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
443 &sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
444 &sn_coherency_id, &sn_region_size))
445 BUG();
446 sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
447
448 /*
449 * The boot cpu makes this call again after platform initialization is
450 * complete.
451 */
452 if (nodepdaindr[0] == NULL)
453 return;
454
455 cpuid = smp_processor_id();
456 cpuphyid = get_sapicid();
457
458 if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
459 BUG();
460
461 for (i=0; i < MAX_NUMNODES; i++) {
462 if (nodepdaindr[i]) {
463 nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
464 nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
465 nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
466 }
467 }
468
469 cnode = nasid_to_cnodeid(nasid);
470
471 pda->p_nodepda = nodepdaindr[cnode];
472 pda->led_address =
473 (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
474 pda->led_state = LED_ALWAYS_SET;
475 pda->hb_count = HZ / 2;
476 pda->hb_state = 0;
477 pda->idle_flag = 0;
478
479 if (cpuid != 0) {
480 memcpy(pda->cnodeid_to_nasid_table,
481 pdacpu(0)->cnodeid_to_nasid_table,
482 sizeof(pda->cnodeid_to_nasid_table));
483 }
484
485 /*
486 * Check for WARs.
487 * Only needs to be done once, on BSP.
488 * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
489 * Has to be done before assignment below.
490 */
491 if (!wars_have_been_checked) {
492 sn_check_for_wars();
493 wars_have_been_checked = 1;
494 }
495 sn_hub_info->shub_1_1_found = shub_1_1_found;
496
497 /*
498 * Set up addresses of PIO/MEM write status registers.
499 */
500 {
501 u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
502 u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1,
503 SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3};
504 u64 *pio;
505 pio = is_shub1() ? pio1 : pio2;
506 pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]);
507 pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
508 }
509
510 /*
511 * WAR addresses for SHUB 1.x.
512 */
513 if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
514 int buddy_nasid;
515 buddy_nasid =
516 cnodeid_to_nasid(numa_node_id() ==
517 num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
518 pda->pio_shub_war_cam_addr =
519 (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
520 SH1_PI_CAM_CONTROL);
521 }
522}
523
524/*
525 * Scan klconfig for ionodes. Add the nasids to the
526 * physical_node_map and the pda and increment numionodes.
527 */
528
529static void __init scan_for_ionodes(void)
530{
531 int nasid = 0;
532 lboard_t *brd;
533
534 /* Setup ionodes with memory */
535 for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
536 char *klgraph_header;
537 cnodeid_t cnodeid;
538
539 if (physical_node_map[nasid] == -1)
540 continue;
541
542 cnodeid = -1;
543 klgraph_header = __va(ia64_sn_get_klconfig_addr(nasid));
544 if (!klgraph_header) {
545 if (IS_RUNNING_ON_SIMULATOR())
546 continue;
547 BUG(); /* All nodes must have klconfig tables! */
548 }
549 cnodeid = nasid_to_cnodeid(nasid);
550 root_lboard[cnodeid] = (lboard_t *)
551 NODE_OFFSET_TO_LBOARD((nasid),
552 ((kl_config_hdr_t
553 *) (klgraph_header))->
554 ch_board_info);
555 }
556
557 /* Scan headless/memless IO Nodes. */
558 for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
559 /* if there's no nasid, don't try to read the klconfig on the node */
560 if (physical_node_map[nasid] == -1)
561 continue;
562 brd = find_lboard_any((lboard_t *)
563 root_lboard[nasid_to_cnodeid(nasid)],
564 KLTYPE_SNIA);
565 if (brd) {
566 brd = KLCF_NEXT_ANY(brd); /* Skip this node's lboard */
567 if (!brd)
568 continue;
569 }
570
571 brd = find_lboard_any(brd, KLTYPE_SNIA);
572
573 while (brd) {
574 pda->cnodeid_to_nasid_table[numionodes] =
575 brd->brd_nasid;
576 physical_node_map[brd->brd_nasid] = numionodes;
577 root_lboard[numionodes] = brd;
578 numionodes++;
579 brd = KLCF_NEXT_ANY(brd);
580 if (!brd)
581 break;
582
583 brd = find_lboard_any(brd, KLTYPE_SNIA);
584 }
585 }
586
587 /* Scan for TIO nodes. */
588 for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
589 /* if there's no nasid, don't try to read the klconfig on the node */
590 if (physical_node_map[nasid] == -1)
591 continue;
592 brd = find_lboard_any((lboard_t *)
593 root_lboard[nasid_to_cnodeid(nasid)],
594 KLTYPE_TIO);
595 while (brd) {
596 pda->cnodeid_to_nasid_table[numionodes] =
597 brd->brd_nasid;
598 physical_node_map[brd->brd_nasid] = numionodes;
599 root_lboard[numionodes] = brd;
600 numionodes++;
601 brd = KLCF_NEXT_ANY(brd);
602 if (!brd)
603 break;
604
605 brd = find_lboard_any(brd, KLTYPE_TIO);
606 }
607 }
608
609}
610
611int
612nasid_slice_to_cpuid(int nasid, int slice)
613{
614 long cpu;
615
616 for (cpu=0; cpu < NR_CPUS; cpu++)
617 if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice)
618 return cpu;
619
620 return -1;
621}
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
new file mode 100644
index 000000000000..170bde4549da
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -0,0 +1,13 @@
1# arch/ia64/sn/kernel/sn2/Makefile
2#
3# This file is subject to the terms and conditions of the GNU General Public
4# License. See the file "COPYING" in the main directory of this archive
5# for more details.
6#
7# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
8#
9# sn2 specific kernel files
10#
11
12obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
13 prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
new file mode 100644
index 000000000000..bc3cfa17cd0f
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -0,0 +1,34 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
7 *
8 */
9#include <linux/module.h>
10#include <asm/pgalloc.h>
11
12/**
13 * sn_flush_all_caches - flush a range of address from all caches (incl. L4)
14 * @flush_addr: identity mapped region 7 address to start flushing
15 * @bytes: number of bytes to flush
16 *
17 * Flush a range of addresses from all caches including L4.
18 * All addresses fully or partially contained within
19 * @flush_addr to @flush_addr + @bytes are flushed
20 * from the all caches.
21 */
22void
23sn_flush_all_caches(long flush_addr, long bytes)
24{
25 flush_icache_range(flush_addr, flush_addr+bytes);
26 /*
27 * The last call may have returned before the caches
28 * were actually flushed, so we call it again to make
29 * sure.
30 */
31 flush_icache_range(flush_addr, flush_addr+bytes);
32 mb();
33}
34EXPORT_SYMBOL(sn_flush_all_caches);
diff --git a/arch/ia64/sn/kernel/sn2/io.c b/arch/ia64/sn/kernel/sn2/io.c
new file mode 100644
index 000000000000..a12c0586de38
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/io.c
@@ -0,0 +1,101 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
7 *
8 * The generic kernel requires function pointers to these routines, so
9 * we wrap the inlines from asm/ia64/sn/sn2/io.h here.
10 */
11
12#include <asm/sn/io.h>
13
14#ifdef CONFIG_IA64_GENERIC
15
16#undef __sn_inb
17#undef __sn_inw
18#undef __sn_inl
19#undef __sn_outb
20#undef __sn_outw
21#undef __sn_outl
22#undef __sn_readb
23#undef __sn_readw
24#undef __sn_readl
25#undef __sn_readq
26#undef __sn_readb_relaxed
27#undef __sn_readw_relaxed
28#undef __sn_readl_relaxed
29#undef __sn_readq_relaxed
30
31unsigned int __sn_inb(unsigned long port)
32{
33 return ___sn_inb(port);
34}
35
36unsigned int __sn_inw(unsigned long port)
37{
38 return ___sn_inw(port);
39}
40
41unsigned int __sn_inl(unsigned long port)
42{
43 return ___sn_inl(port);
44}
45
46void __sn_outb(unsigned char val, unsigned long port)
47{
48 ___sn_outb(val, port);
49}
50
51void __sn_outw(unsigned short val, unsigned long port)
52{
53 ___sn_outw(val, port);
54}
55
56void __sn_outl(unsigned int val, unsigned long port)
57{
58 ___sn_outl(val, port);
59}
60
61unsigned char __sn_readb(void __iomem *addr)
62{
63 return ___sn_readb(addr);
64}
65
66unsigned short __sn_readw(void __iomem *addr)
67{
68 return ___sn_readw(addr);
69}
70
71unsigned int __sn_readl(void __iomem *addr)
72{
73 return ___sn_readl(addr);
74}
75
76unsigned long __sn_readq(void __iomem *addr)
77{
78 return ___sn_readq(addr);
79}
80
81unsigned char __sn_readb_relaxed(void __iomem *addr)
82{
83 return ___sn_readb_relaxed(addr);
84}
85
86unsigned short __sn_readw_relaxed(void __iomem *addr)
87{
88 return ___sn_readw_relaxed(addr);
89}
90
91unsigned int __sn_readl_relaxed(void __iomem *addr)
92{
93 return ___sn_readl_relaxed(addr);
94}
95
96unsigned long __sn_readq_relaxed(void __iomem *addr)
97{
98 return ___sn_readq_relaxed(addr);
99}
100
101#endif
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
new file mode 100644
index 000000000000..81c63b2f8ae9
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -0,0 +1,279 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
7 *
8 * Module to export the system's Firmware Interface Tables, including
9 * PROM revision numbers and banners, in /proc
10 */
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/proc_fs.h>
15#include <linux/nodemask.h>
16#include <asm/system.h>
17#include <asm/io.h>
18#include <asm/sn/sn_sal.h>
19#include <asm/sn/sn_cpuid.h>
20#include <asm/sn/addrs.h>
21
22MODULE_DESCRIPTION("PROM version reporting for /proc");
23MODULE_AUTHOR("Chad Talbott");
24MODULE_LICENSE("GPL");
25
26/* Standard Intel FIT entry types */
27#define FIT_ENTRY_FIT_HEADER 0x00 /* FIT header entry */
28#define FIT_ENTRY_PAL_B 0x01 /* PAL_B entry */
29/* Entries 0x02 through 0x0D reserved by Intel */
30#define FIT_ENTRY_PAL_A_PROC 0x0E /* Processor-specific PAL_A entry */
31#define FIT_ENTRY_PAL_A 0x0F /* PAL_A entry, same as... */
32#define FIT_ENTRY_PAL_A_GEN 0x0F /* ...Generic PAL_A entry */
33#define FIT_ENTRY_UNUSED 0x7F /* Unused (reserved by Intel?) */
34/* OEM-defined entries range from 0x10 to 0x7E. */
35#define FIT_ENTRY_SAL_A 0x10 /* SAL_A entry */
36#define FIT_ENTRY_SAL_B 0x11 /* SAL_B entry */
37#define FIT_ENTRY_SALRUNTIME 0x12 /* SAL runtime entry */
38#define FIT_ENTRY_EFI 0x1F /* EFI entry */
39#define FIT_ENTRY_FPSWA 0x20 /* embedded fpswa entry */
40#define FIT_ENTRY_VMLINUX 0x21 /* embedded vmlinux entry */
41
42#define FIT_MAJOR_SHIFT (32 + 8)
43#define FIT_MAJOR_MASK ((1 << 8) - 1)
44#define FIT_MINOR_SHIFT 32
45#define FIT_MINOR_MASK ((1 << 8) - 1)
46
47#define FIT_MAJOR(q) \
48 ((unsigned) ((q) >> FIT_MAJOR_SHIFT) & FIT_MAJOR_MASK)
49#define FIT_MINOR(q) \
50 ((unsigned) ((q) >> FIT_MINOR_SHIFT) & FIT_MINOR_MASK)
51
52#define FIT_TYPE_SHIFT (32 + 16)
53#define FIT_TYPE_MASK ((1 << 7) - 1)
54
55#define FIT_TYPE(q) \
56 ((unsigned) ((q) >> FIT_TYPE_SHIFT) & FIT_TYPE_MASK)
57
58struct fit_type_map_t {
59 unsigned char type;
60 const char *name;
61};
62
63static const struct fit_type_map_t fit_entry_types[] = {
64 {FIT_ENTRY_FIT_HEADER, "FIT Header"},
65 {FIT_ENTRY_PAL_A_GEN, "Generic PAL_A"},
66 {FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A"},
67 {FIT_ENTRY_PAL_A, "PAL_A"},
68 {FIT_ENTRY_PAL_B, "PAL_B"},
69 {FIT_ENTRY_SAL_A, "SAL_A"},
70 {FIT_ENTRY_SAL_B, "SAL_B"},
71 {FIT_ENTRY_SALRUNTIME, "SAL runtime"},
72 {FIT_ENTRY_EFI, "EFI"},
73 {FIT_ENTRY_VMLINUX, "Embedded Linux"},
74 {FIT_ENTRY_FPSWA, "Embedded FPSWA"},
75 {FIT_ENTRY_UNUSED, "Unused"},
76 {0xff, "Error"},
77};
78
79static const char *fit_type_name(unsigned char type)
80{
81 struct fit_type_map_t const *mapp;
82
83 for (mapp = fit_entry_types; mapp->type != 0xff; mapp++)
84 if (type == mapp->type)
85 return mapp->name;
86
87 if ((type > FIT_ENTRY_PAL_A) && (type < FIT_ENTRY_UNUSED))
88 return "OEM type";
89 if ((type > FIT_ENTRY_PAL_B) && (type < FIT_ENTRY_PAL_A))
90 return "Reserved";
91
92 return "Unknown type";
93}
94
95static int
96get_fit_entry(unsigned long nasid, int index, unsigned long *fentry,
97 char *banner, int banlen)
98{
99 return ia64_sn_get_fit_compt(nasid, index, fentry, banner, banlen);
100}
101
102
103/*
104 * These two routines display the FIT table for each node.
105 */
106static int dump_fit_entry(char *page, unsigned long *fentry)
107{
108 unsigned type;
109
110 type = FIT_TYPE(fentry[1]);
111 return sprintf(page, "%02x %-25s %x.%02x %016lx %u\n",
112 type,
113 fit_type_name(type),
114 FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]),
115 fentry[0],
116 /* mult by sixteen to get size in bytes */
117 (unsigned)(fentry[1] & 0xffffff) * 16);
118}
119
120
121/*
122 * We assume that the fit table will be small enough that we can print
123 * the whole thing into one page. (This is true for our default 16kB
124 * pages -- each entry is about 60 chars wide when printed.) I read
125 * somewhere that the maximum size of the FIT is 128 entries, so we're
126 * OK except for 4kB pages (and no one is going to do that on SN
127 * anyway).
128 */
129static int
130dump_fit(char *page, unsigned long nasid)
131{
132 unsigned long fentry[2];
133 int index;
134 char *p;
135
136 p = page;
137 for (index=0;;index++) {
138 BUG_ON(index * 60 > PAGE_SIZE);
139 if (get_fit_entry(nasid, index, fentry, NULL, 0))
140 break;
141 p += dump_fit_entry(p, fentry);
142 }
143
144 return p - page;
145}
146
147static int
148dump_version(char *page, unsigned long nasid)
149{
150 unsigned long fentry[2];
151 char banner[128];
152 int index;
153 int len;
154
155 for (index = 0; ; index++) {
156 if (get_fit_entry(nasid, index, fentry, banner,
157 sizeof(banner)))
158 return 0;
159 if (FIT_TYPE(fentry[1]) == FIT_ENTRY_SAL_A)
160 break;
161 }
162
163 len = sprintf(page, "%x.%02x\n", FIT_MAJOR(fentry[1]),
164 FIT_MINOR(fentry[1]));
165 page += len;
166
167 if (banner[0])
168 len += snprintf(page, PAGE_SIZE-len, "%s\n", banner);
169
170 return len;
171}
172
173/* same as in proc_misc.c */
174static int
175proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof,
176 int len)
177{
178 if (len <= off + count)
179 *eof = 1;
180 *start = page + off;
181 len -= off;
182 if (len > count)
183 len = count;
184 if (len < 0)
185 len = 0;
186 return len;
187}
188
189static int
190read_version_entry(char *page, char **start, off_t off, int count, int *eof,
191 void *data)
192{
193 int len = 0;
194
195 /* data holds the NASID of the node */
196 len = dump_version(page, (unsigned long)data);
197 len = proc_calc_metrics(page, start, off, count, eof, len);
198 return len;
199}
200
201static int
202read_fit_entry(char *page, char **start, off_t off, int count, int *eof,
203 void *data)
204{
205 int len = 0;
206
207 /* data holds the NASID of the node */
208 len = dump_fit(page, (unsigned long)data);
209 len = proc_calc_metrics(page, start, off, count, eof, len);
210
211 return len;
212}
213
214/* module entry points */
215int __init prominfo_init(void);
216void __exit prominfo_exit(void);
217
218module_init(prominfo_init);
219module_exit(prominfo_exit);
220
221static struct proc_dir_entry **proc_entries;
222static struct proc_dir_entry *sgi_prominfo_entry;
223
224#define NODE_NAME_LEN 11
225
226int __init prominfo_init(void)
227{
228 struct proc_dir_entry **entp;
229 struct proc_dir_entry *p;
230 cnodeid_t cnodeid;
231 unsigned long nasid;
232 char name[NODE_NAME_LEN];
233
234 if (!ia64_platform_is("sn2"))
235 return 0;
236
237 proc_entries = kmalloc(num_online_nodes() * sizeof(struct proc_dir_entry *),
238 GFP_KERNEL);
239
240 sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);
241
242 entp = proc_entries;
243 for_each_online_node(cnodeid) {
244 sprintf(name, "node%d", cnodeid);
245 *entp = proc_mkdir(name, sgi_prominfo_entry);
246 nasid = cnodeid_to_nasid(cnodeid);
247 p = create_proc_read_entry(
248 "fit", 0, *entp, read_fit_entry,
249 (void *)nasid);
250 if (p)
251 p->owner = THIS_MODULE;
252 p = create_proc_read_entry(
253 "version", 0, *entp, read_version_entry,
254 (void *)nasid);
255 if (p)
256 p->owner = THIS_MODULE;
257 entp++;
258 }
259
260 return 0;
261}
262
263void __exit prominfo_exit(void)
264{
265 struct proc_dir_entry **entp;
266 unsigned cnodeid;
267 char name[NODE_NAME_LEN];
268
269 entp = proc_entries;
270 for_each_online_node(cnodeid) {
271 remove_proc_entry("fit", *entp);
272 remove_proc_entry("version", *entp);
273 sprintf(name, "node%d", cnodeid);
274 remove_proc_entry(name, sgi_prominfo_entry);
275 entp++;
276 }
277 remove_proc_entry("sgi_prominfo", NULL);
278 kfree(proc_entries);
279}
diff --git a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
new file mode 100644
index 000000000000..7947312801ec
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
@@ -0,0 +1,82 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <asm/sn/shub_mmr.h>
10
11#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
12#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
13#define ALIAS_OFFSET (SH1_PIO_WRITE_STATUS_0_ALIAS-SH1_PIO_WRITE_STATUS_0)
14
15
16 .global sn2_ptc_deadlock_recovery_core
17 .proc sn2_ptc_deadlock_recovery_core
18
19sn2_ptc_deadlock_recovery_core:
20 .regstk 6,0,0,0
21
22 ptc0 = in0
23 data0 = in1
24 ptc1 = in2
25 data1 = in3
26 piowc = in4
27 zeroval = in5
28 piowcphy = r30
29 psrsave = r2
30 scr1 = r16
31 scr2 = r17
32 mask = r18
33
34
35 extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address
36 dep piowcphy=-1,piowcphy,63,1
37 movl mask=WRITECOUNTMASK
38
391:
40 add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register
41 mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
42 st8.rel [scr2]=scr1;;
43
445: ld8.acq scr1=[piowc];; // Wait for PIOs to complete.
45 and scr2=scr1,mask;; // mask of writecount bits
46 cmp.ne p6,p0=zeroval,scr2
47(p6) br.cond.sptk 5b
48
49
50
51 ////////////// BEGIN PHYSICAL MODE ////////////////////
52 mov psrsave=psr // Disable IC (no PMIs)
53 rsm psr.i | psr.dt | psr.ic;;
54 srlz.i;;
55
56 st8.rel [ptc0]=data0 // Write PTC0 & wait for completion.
57
585: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
59 and scr2=scr1,mask;; // mask of writecount bits
60 cmp.ne p6,p0=zeroval,scr2
61(p6) br.cond.sptk 5b;;
62
63 tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
64(p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1
65
66(p7) st8.rel [ptc1]=data1;; // Now write PTC1.
67
685: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
69 and scr2=scr1,mask;; // mask of writecount bits
70 cmp.ne p6,p0=zeroval,scr2
71(p6) br.cond.sptk 5b
72
73 tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
74
75 mov psr.l=psrsave;; // Reenable IC
76 srlz.i;;
77 ////////////// END PHYSICAL MODE ////////////////////
78
79(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred.
80
81 br.ret.sptk rp
82 .endp sn2_ptc_deadlock_recovery_core
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
new file mode 100644
index 000000000000..7af05a7ac743
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -0,0 +1,295 @@
1/*
2 * SN2 Platform specific SMP Support
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/spinlock.h>
14#include <linux/threads.h>
15#include <linux/sched.h>
16#include <linux/smp.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/mmzone.h>
20#include <linux/module.h>
21#include <linux/bitops.h>
22#include <linux/nodemask.h>
23
24#include <asm/processor.h>
25#include <asm/irq.h>
26#include <asm/sal.h>
27#include <asm/system.h>
28#include <asm/delay.h>
29#include <asm/io.h>
30#include <asm/smp.h>
31#include <asm/tlb.h>
32#include <asm/numa.h>
33#include <asm/hw_irq.h>
34#include <asm/current.h>
35#include <asm/sn/sn_cpuid.h>
36#include <asm/sn/sn_sal.h>
37#include <asm/sn/addrs.h>
38#include <asm/sn/shub_mmr.h>
39#include <asm/sn/nodepda.h>
40#include <asm/sn/rw_mmr.h>
41
42void sn2_ptc_deadlock_recovery(volatile unsigned long *, unsigned long data0,
43 volatile unsigned long *, unsigned long data1);
44
45static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
46
47static unsigned long sn2_ptc_deadlock_count;
48
49static inline unsigned long wait_piowc(void)
50{
51 volatile unsigned long *piows, zeroval;
52 unsigned long ws;
53
54 piows = pda->pio_write_status_addr;
55 zeroval = pda->pio_write_status_val;
56 do {
57 cpu_relax();
58 } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
59 return ws;
60}
61
62void sn_tlb_migrate_finish(struct mm_struct *mm)
63{
64 if (mm == current->mm)
65 flush_tlb_mm(mm);
66}
67
68/**
69 * sn2_global_tlb_purge - globally purge translation cache of virtual address range
70 * @start: start of virtual address range
71 * @end: end of virtual address range
72 * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
73 *
74 * Purges the translation caches of all processors of the given virtual address
75 * range.
76 *
77 * Note:
78 * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
79 * - cpu_vm_mask is converted into a nodemask of the nodes containing the
80 * cpus in cpu_vm_mask.
81 * - if only one bit is set in cpu_vm_mask & it is the current cpu,
82 * then only the local TLB needs to be flushed. This flushing can be done
83 * using ptc.l. This is the common case & avoids the global spinlock.
84 * - if multiple cpus have loaded the context, then flushing has to be
85 * done with ptc.g/MMRs under protection of the global ptc_lock.
86 */
87
88void
89sn2_global_tlb_purge(unsigned long start, unsigned long end,
90 unsigned long nbits)
91{
92 int i, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
93 volatile unsigned long *ptc0, *ptc1;
94 unsigned long flags = 0, data0 = 0, data1 = 0;
95 struct mm_struct *mm = current->active_mm;
96 short nasids[MAX_NUMNODES], nix;
97 nodemask_t nodes_flushed;
98
99 nodes_clear(nodes_flushed);
100 i = 0;
101
102 for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
103 cnode = cpu_to_node(cpu);
104 node_set(cnode, nodes_flushed);
105 lcpu = cpu;
106 i++;
107 }
108
109 preempt_disable();
110
111 if (likely(i == 1 && lcpu == smp_processor_id())) {
112 do {
113 ia64_ptcl(start, nbits << 2);
114 start += (1UL << nbits);
115 } while (start < end);
116 ia64_srlz_i();
117 preempt_enable();
118 return;
119 }
120
121 if (atomic_read(&mm->mm_users) == 1) {
122 flush_tlb_mm(mm);
123 preempt_enable();
124 return;
125 }
126
127 nix = 0;
128 for_each_node_mask(cnode, nodes_flushed)
129 nasids[nix++] = cnodeid_to_nasid(cnode);
130
131 shub1 = is_shub1();
132 if (shub1) {
133 data0 = (1UL << SH1_PTC_0_A_SHFT) |
134 (nbits << SH1_PTC_0_PS_SHFT) |
135 ((ia64_get_rr(start) >> 8) << SH1_PTC_0_RID_SHFT) |
136 (1UL << SH1_PTC_0_START_SHFT);
137 ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
138 ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
139 } else {
140 data0 = (1UL << SH2_PTC_A_SHFT) |
141 (nbits << SH2_PTC_PS_SHFT) |
142 (1UL << SH2_PTC_START_SHFT);
143 ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
144 ((ia64_get_rr(start) >> 8) << SH2_PTC_RID_SHFT) );
145 ptc1 = NULL;
146 }
147
148
149 mynasid = get_nasid();
150
151 spin_lock_irqsave(&sn2_global_ptc_lock, flags);
152
153 do {
154 if (shub1)
155 data1 = start | (1UL << SH1_PTC_1_START_SHFT);
156 else
157 data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
158 for (i = 0; i < nix; i++) {
159 nasid = nasids[i];
160 if (unlikely(nasid == mynasid)) {
161 ia64_ptcga(start, nbits << 2);
162 ia64_srlz_i();
163 } else {
164 ptc0 = CHANGE_NASID(nasid, ptc0);
165 if (ptc1)
166 ptc1 = CHANGE_NASID(nasid, ptc1);
167 pio_atomic_phys_write_mmrs(ptc0, data0, ptc1,
168 data1);
169 flushed = 1;
170 }
171 }
172
173 if (flushed
174 && (wait_piowc() &
175 SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK)) {
176 sn2_ptc_deadlock_recovery(ptc0, data0, ptc1, data1);
177 }
178
179 start += (1UL << nbits);
180
181 } while (start < end);
182
183 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
184
185 preempt_enable();
186}
187
188/*
189 * sn2_ptc_deadlock_recovery
190 *
191 * Recover from PTC deadlocks conditions. Recovery requires stepping thru each
192 * TLB flush transaction. The recovery sequence is somewhat tricky & is
193 * coded in assembly language.
194 */
195void sn2_ptc_deadlock_recovery(volatile unsigned long *ptc0, unsigned long data0,
196 volatile unsigned long *ptc1, unsigned long data1)
197{
198 extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
199 volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
200 int cnode, mycnode, nasid;
201 volatile unsigned long *piows;
202 volatile unsigned long zeroval;
203
204 sn2_ptc_deadlock_count++;
205
206 piows = pda->pio_write_status_addr;
207 zeroval = pda->pio_write_status_val;
208
209 mycnode = numa_node_id();
210
211 for_each_online_node(cnode) {
212 if (is_headless_node(cnode) || cnode == mycnode)
213 continue;
214 nasid = cnodeid_to_nasid(cnode);
215 ptc0 = CHANGE_NASID(nasid, ptc0);
216 if (ptc1)
217 ptc1 = CHANGE_NASID(nasid, ptc1);
218 sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
219 }
220}
221
222/**
223 * sn_send_IPI_phys - send an IPI to a Nasid and slice
224 * @nasid: nasid to receive the interrupt (may be outside partition)
225 * @physid: physical cpuid to receive the interrupt.
226 * @vector: command to send
227 * @delivery_mode: delivery mechanism
228 *
229 * Sends an IPI (interprocessor interrupt) to the processor specified by
230 * @physid
231 *
232 * @delivery_mode can be one of the following
233 *
234 * %IA64_IPI_DM_INT - pend an interrupt
235 * %IA64_IPI_DM_PMI - pend a PMI
236 * %IA64_IPI_DM_NMI - pend an NMI
237 * %IA64_IPI_DM_INIT - pend an INIT interrupt
238 */
239void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
240{
241 long val;
242 unsigned long flags = 0;
243 volatile long *p;
244
245 p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
246 val = (1UL << SH_IPI_INT_SEND_SHFT) |
247 (physid << SH_IPI_INT_PID_SHFT) |
248 ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
249 ((long)vector << SH_IPI_INT_IDX_SHFT) |
250 (0x000feeUL << SH_IPI_INT_BASE_SHFT);
251
252 mb();
253 if (enable_shub_wars_1_1()) {
254 spin_lock_irqsave(&sn2_global_ptc_lock, flags);
255 }
256 pio_phys_write_mmr(p, val);
257 if (enable_shub_wars_1_1()) {
258 wait_piowc();
259 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
260 }
261
262}
263
264EXPORT_SYMBOL(sn_send_IPI_phys);
265
266/**
267 * sn2_send_IPI - send an IPI to a processor
268 * @cpuid: target of the IPI
269 * @vector: command to send
270 * @delivery_mode: delivery mechanism
271 * @redirect: redirect the IPI?
272 *
273 * Sends an IPI (InterProcessor Interrupt) to the processor specified by
274 * @cpuid. @vector specifies the command to send, while @delivery_mode can
275 * be one of the following
276 *
277 * %IA64_IPI_DM_INT - pend an interrupt
278 * %IA64_IPI_DM_PMI - pend a PMI
279 * %IA64_IPI_DM_NMI - pend an NMI
280 * %IA64_IPI_DM_INIT - pend an INIT interrupt
281 */
282void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
283{
284 long physid;
285 int nasid;
286
287 physid = cpu_physical_id(cpuid);
288 nasid = cpuid_to_nasid(cpuid);
289
290 /* the following is used only when starting cpus at boot time */
291 if (unlikely(nasid == -1))
292 ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
293
294 sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
295}
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
new file mode 100644
index 000000000000..197356460ee1
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -0,0 +1,690 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
7 *
8 * SGI Altix topology and hardware performance monitoring API.
9 * Mark Goodwin <markgw@sgi.com>.
10 *
11 * Creates /proc/sgi_sn/sn_topology (read-only) to export
12 * info about Altix nodes, routers, CPUs and NumaLink
13 * interconnection/topology.
14 *
15 * Also creates a dynamic misc device named "sn_hwperf"
16 * that supports an ioctl interface to call down into SAL
17 * to discover hw objects, topology and to read/write
18 * memory mapped registers, e.g. for performance monitoring.
19 * The "sn_hwperf" device is registered only after the procfs
20 * file is first opened, i.e. only if/when it's needed.
21 *
22 * This API is used by SGI Performance Co-Pilot and other
23 * tools, see http://oss.sgi.com/projects/pcp
24 */
25
26#include <linux/fs.h>
27#include <linux/slab.h>
28#include <linux/vmalloc.h>
29#include <linux/seq_file.h>
30#include <linux/miscdevice.h>
31#include <linux/cpumask.h>
32#include <linux/smp_lock.h>
33#include <linux/nodemask.h>
34#include <asm/processor.h>
35#include <asm/topology.h>
36#include <asm/smp.h>
37#include <asm/semaphore.h>
38#include <asm/segment.h>
39#include <asm/uaccess.h>
40#include <asm/sal.h>
41#include <asm/sn/io.h>
42#include <asm/sn/sn_sal.h>
43#include <asm/sn/module.h>
44#include <asm/sn/geo.h>
45#include <asm/sn/sn2/sn_hwperf.h>
46
47static void *sn_hwperf_salheap = NULL;
48static int sn_hwperf_obj_cnt = 0;
49static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
50static int sn_hwperf_init(void);
51static DECLARE_MUTEX(sn_hwperf_init_mutex);
52
53static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
54{
55 int e;
56 u64 sz;
57 struct sn_hwperf_object_info *objbuf = NULL;
58
59 if ((e = sn_hwperf_init()) < 0) {
60 printk("sn_hwperf_init failed: err %d\n", e);
61 goto out;
62 }
63
64 sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info);
65 if ((objbuf = (struct sn_hwperf_object_info *) vmalloc(sz)) == NULL) {
66 printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz);
67 e = -ENOMEM;
68 goto out;
69 }
70
71 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_OBJECTS,
72 0, sz, (u64) objbuf, 0, 0, NULL);
73 if (e != SN_HWPERF_OP_OK) {
74 e = -EINVAL;
75 vfree(objbuf);
76 }
77
78out:
79 *nobj = sn_hwperf_obj_cnt;
80 *ret = objbuf;
81 return e;
82}
83
84static int sn_hwperf_geoid_to_cnode(char *location)
85{
86 int cnode;
87 geoid_t geoid;
88 moduleid_t module_id;
89 char type;
90 int rack, slot, slab;
91 int this_rack, this_slot, this_slab;
92
93 if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4)
94 return -1;
95
96 for (cnode = 0; cnode < numionodes; cnode++) {
97 geoid = cnodeid_get_geoid(cnode);
98 module_id = geo_module(geoid);
99 this_rack = MODULE_GET_RACK(module_id);
100 this_slot = MODULE_GET_BPOS(module_id);
101 this_slab = geo_slab(geoid);
102 if (rack == this_rack && slot == this_slot && slab == this_slab)
103 break;
104 }
105
106 return cnode < numionodes ? cnode : -1;
107}
108
109static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
110{
111 if (!obj->sn_hwp_this_part)
112 return -1;
113 return sn_hwperf_geoid_to_cnode(obj->location);
114}
115
116static int sn_hwperf_generic_ordinal(struct sn_hwperf_object_info *obj,
117 struct sn_hwperf_object_info *objs)
118{
119 int ordinal;
120 struct sn_hwperf_object_info *p;
121
122 for (ordinal=0, p=objs; p != obj; p++) {
123 if (SN_HWPERF_FOREIGN(p))
124 continue;
125 if (SN_HWPERF_SAME_OBJTYPE(p, obj))
126 ordinal++;
127 }
128
129 return ordinal;
130}
131
132static const char *slabname_node = "node"; /* SHub asic */
133static const char *slabname_ionode = "ionode"; /* TIO asic */
134static const char *slabname_router = "router"; /* NL3R or NL4R */
135static const char *slabname_other = "other"; /* unknown asic */
136
137static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
138 struct sn_hwperf_object_info *objs, int *ordinal)
139{
140 int isnode;
141 const char *slabname = slabname_other;
142
143 if ((isnode = SN_HWPERF_IS_NODE(obj)) || SN_HWPERF_IS_IONODE(obj)) {
144 slabname = isnode ? slabname_node : slabname_ionode;
145 *ordinal = sn_hwperf_obj_to_cnode(obj);
146 }
147 else {
148 *ordinal = sn_hwperf_generic_ordinal(obj, objs);
149 if (SN_HWPERF_IS_ROUTER(obj))
150 slabname = slabname_router;
151 }
152
153 return slabname;
154}
155
156static int sn_topology_show(struct seq_file *s, void *d)
157{
158 int sz;
159 int pt;
160 int e;
161 int i;
162 int j;
163 const char *slabname;
164 int ordinal;
165 cpumask_t cpumask;
166 char slice;
167 struct cpuinfo_ia64 *c;
168 struct sn_hwperf_port_info *ptdata;
169 struct sn_hwperf_object_info *p;
170 struct sn_hwperf_object_info *obj = d; /* this object */
171 struct sn_hwperf_object_info *objs = s->private; /* all objects */
172
173 if (obj == objs) {
174 seq_printf(s, "# sn_topology version 1\n");
175 seq_printf(s, "# objtype ordinal location partition"
176 " [attribute value [, ...]]\n");
177 }
178
179 if (SN_HWPERF_FOREIGN(obj)) {
180 /* private in another partition: not interesting */
181 return 0;
182 }
183
184 for (i = 0; obj->name[i]; i++) {
185 if (obj->name[i] == ' ')
186 obj->name[i] = '_';
187 }
188
189 slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
190 seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
191 obj->sn_hwp_this_part ? "local" : "shared", obj->name);
192
193 if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
194 seq_putc(s, '\n');
195 else {
196 seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
197 for (i=0; i < numionodes; i++) {
198 seq_printf(s, i ? ":%d" : ", dist %d",
199 node_distance(ordinal, i));
200 }
201 seq_putc(s, '\n');
202
203 /*
204 * CPUs on this node, if any
205 */
206 cpumask = node_to_cpumask(ordinal);
207 for_each_online_cpu(i) {
208 if (cpu_isset(i, cpumask)) {
209 slice = 'a' + cpuid_to_slice(i);
210 c = cpu_data(i);
211 seq_printf(s, "cpu %d %s%c local"
212 " freq %luMHz, arch ia64",
213 i, obj->location, slice,
214 c->proc_freq / 1000000);
215 for_each_online_cpu(j) {
216 seq_printf(s, j ? ":%d" : ", dist %d",
217 node_distance(
218 cpuid_to_cnodeid(i),
219 cpuid_to_cnodeid(j)));
220 }
221 seq_putc(s, '\n');
222 }
223 }
224 }
225
226 if (obj->ports) {
227 /*
228 * numalink ports
229 */
230 sz = obj->ports * sizeof(struct sn_hwperf_port_info);
231 if ((ptdata = vmalloc(sz)) == NULL)
232 return -ENOMEM;
233 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
234 SN_HWPERF_ENUM_PORTS, obj->id, sz,
235 (u64) ptdata, 0, 0, NULL);
236 if (e != SN_HWPERF_OP_OK)
237 return -EINVAL;
238 for (ordinal=0, p=objs; p != obj; p++) {
239 if (!SN_HWPERF_FOREIGN(p))
240 ordinal += p->ports;
241 }
242 for (pt = 0; pt < obj->ports; pt++) {
243 for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
244 if (ptdata[pt].conn_id == p->id) {
245 break;
246 }
247 }
248 seq_printf(s, "numalink %d %s-%d",
249 ordinal+pt, obj->location, ptdata[pt].port);
250
251 if (i >= sn_hwperf_obj_cnt) {
252 /* no connection */
253 seq_puts(s, " local endpoint disconnected"
254 ", protocol unknown\n");
255 continue;
256 }
257
258 if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
259 /* both ends local to this partition */
260 seq_puts(s, " local");
261 else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
262 /* both ends of the link in foreign partiton */
263 seq_puts(s, " foreign");
264 else
265 /* link straddles a partition */
266 seq_puts(s, " shared");
267
268 /*
269 * Unlikely, but strictly should query the LLP config
270 * registers because an NL4R can be configured to run
271 * NL3 protocol, even when not talking to an NL3 router.
272 * Ditto for node-node.
273 */
274 seq_printf(s, " endpoint %s-%d, protocol %s\n",
275 p->location, ptdata[pt].conn_port,
276 (SN_HWPERF_IS_NL3ROUTER(obj) ||
277 SN_HWPERF_IS_NL3ROUTER(p)) ? "LLP3" : "LLP4");
278 }
279 vfree(ptdata);
280 }
281
282 return 0;
283}
284
285static void *sn_topology_start(struct seq_file *s, loff_t * pos)
286{
287 struct sn_hwperf_object_info *objs = s->private;
288
289 if (*pos < sn_hwperf_obj_cnt)
290 return (void *)(objs + *pos);
291
292 return NULL;
293}
294
295static void *sn_topology_next(struct seq_file *s, void *v, loff_t * pos)
296{
297 ++*pos;
298 return sn_topology_start(s, pos);
299}
300
301static void sn_topology_stop(struct seq_file *m, void *v)
302{
303 return;
304}
305
306/*
307 * /proc/sgi_sn/sn_topology, read-only using seq_file
308 */
309static struct seq_operations sn_topology_seq_ops = {
310 .start = sn_topology_start,
311 .next = sn_topology_next,
312 .stop = sn_topology_stop,
313 .show = sn_topology_show
314};
315
316struct sn_hwperf_op_info {
317 u64 op;
318 struct sn_hwperf_ioctl_args *a;
319 void *p;
320 int *v0;
321 int ret;
322};
323
324static void sn_hwperf_call_sal(void *info)
325{
326 struct sn_hwperf_op_info *op_info = info;
327 int r;
328
329 r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->op,
330 op_info->a->arg, op_info->a->sz,
331 (u64) op_info->p, 0, 0, op_info->v0);
332 op_info->ret = r;
333}
334
335static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
336{
337 u32 cpu;
338 u32 use_ipi;
339 int r = 0;
340 cpumask_t save_allowed;
341
342 cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
343 use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
344 op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
345
346 if (cpu != SN_HWPERF_ARG_ANY_CPU) {
347 if (cpu >= num_online_cpus() || !cpu_online(cpu)) {
348 r = -EINVAL;
349 goto out;
350 }
351 }
352
353 if (cpu == SN_HWPERF_ARG_ANY_CPU || cpu == get_cpu()) {
354 /* don't care, or already on correct cpu */
355 sn_hwperf_call_sal(op_info);
356 }
357 else {
358 if (use_ipi) {
359 /* use an interprocessor interrupt to call SAL */
360 smp_call_function_single(cpu, sn_hwperf_call_sal,
361 op_info, 1, 1);
362 }
363 else {
364 /* migrate the task before calling SAL */
365 save_allowed = current->cpus_allowed;
366 set_cpus_allowed(current, cpumask_of_cpu(cpu));
367 sn_hwperf_call_sal(op_info);
368 set_cpus_allowed(current, save_allowed);
369 }
370 }
371 r = op_info->ret;
372
373out:
374 return r;
375}
376
377/* map SAL hwperf error code to system error code */
378static int sn_hwperf_map_err(int hwperf_err)
379{
380 int e;
381
382 switch(hwperf_err) {
383 case SN_HWPERF_OP_OK:
384 e = 0;
385 break;
386
387 case SN_HWPERF_OP_NOMEM:
388 e = -ENOMEM;
389 break;
390
391 case SN_HWPERF_OP_NO_PERM:
392 e = -EPERM;
393 break;
394
395 case SN_HWPERF_OP_IO_ERROR:
396 e = -EIO;
397 break;
398
399 case SN_HWPERF_OP_BUSY:
400 case SN_HWPERF_OP_RECONFIGURE:
401 e = -EAGAIN;
402 break;
403
404 case SN_HWPERF_OP_INVAL:
405 default:
406 e = -EINVAL;
407 break;
408 }
409
410 return e;
411}
412
413/*
414 * ioctl for "sn_hwperf" misc device
415 */
416static int
417sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
418{
419 struct sn_hwperf_ioctl_args a;
420 struct cpuinfo_ia64 *cdata;
421 struct sn_hwperf_object_info *objs;
422 struct sn_hwperf_object_info *cpuobj;
423 struct sn_hwperf_op_info op_info;
424 void *p = NULL;
425 int nobj;
426 char slice;
427 int node;
428 int r;
429 int v0;
430 int i;
431 int j;
432
433 unlock_kernel();
434
435 /* only user requests are allowed here */
436 if ((op & SN_HWPERF_OP_MASK) < 10) {
437 r = -EINVAL;
438 goto error;
439 }
440 r = copy_from_user(&a, (const void __user *)arg,
441 sizeof(struct sn_hwperf_ioctl_args));
442 if (r != 0) {
443 r = -EFAULT;
444 goto error;
445 }
446
447 /*
448 * Allocate memory to hold a kernel copy of the user buffer. The
449 * buffer contents are either copied in or out (or both) of user
450 * space depending on the flags encoded in the requested operation.
451 */
452 if (a.ptr) {
453 p = vmalloc(a.sz);
454 if (!p) {
455 r = -ENOMEM;
456 goto error;
457 }
458 }
459
460 if (op & SN_HWPERF_OP_MEM_COPYIN) {
461 r = copy_from_user(p, (const void __user *)a.ptr, a.sz);
462 if (r != 0) {
463 r = -EFAULT;
464 goto error;
465 }
466 }
467
468 switch (op) {
469 case SN_HWPERF_GET_CPU_INFO:
470 if (a.sz == sizeof(u64)) {
471 /* special case to get size needed */
472 *(u64 *) p = (u64) num_online_cpus() *
473 sizeof(struct sn_hwperf_object_info);
474 } else
475 if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) {
476 r = -ENOMEM;
477 goto error;
478 } else
479 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
480 memset(p, 0, a.sz);
481 for (i = 0; i < nobj; i++) {
482 node = sn_hwperf_obj_to_cnode(objs + i);
483 for_each_online_cpu(j) {
484 if (node != cpu_to_node(j))
485 continue;
486 cpuobj = (struct sn_hwperf_object_info *) p + j;
487 slice = 'a' + cpuid_to_slice(j);
488 cdata = cpu_data(j);
489 cpuobj->id = j;
490 snprintf(cpuobj->name,
491 sizeof(cpuobj->name),
492 "CPU %luMHz %s",
493 cdata->proc_freq / 1000000,
494 cdata->vendor);
495 snprintf(cpuobj->location,
496 sizeof(cpuobj->location),
497 "%s%c", objs[i].location,
498 slice);
499 }
500 }
501
502 vfree(objs);
503 }
504 break;
505
506 case SN_HWPERF_GET_NODE_NASID:
507 if (a.sz != sizeof(u64) ||
508 (node = a.arg) < 0 || node >= numionodes) {
509 r = -EINVAL;
510 goto error;
511 }
512 *(u64 *)p = (u64)cnodeid_to_nasid(node);
513 break;
514
515 case SN_HWPERF_GET_OBJ_NODE:
516 if (a.sz != sizeof(u64) || a.arg < 0) {
517 r = -EINVAL;
518 goto error;
519 }
520 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
521 if (a.arg >= nobj) {
522 r = -EINVAL;
523 vfree(objs);
524 goto error;
525 }
526 if (objs[(i = a.arg)].id != a.arg) {
527 for (i = 0; i < nobj; i++) {
528 if (objs[i].id == a.arg)
529 break;
530 }
531 }
532 if (i == nobj) {
533 r = -EINVAL;
534 vfree(objs);
535 goto error;
536 }
537 *(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i);
538 vfree(objs);
539 }
540 break;
541
542 case SN_HWPERF_GET_MMRS:
543 case SN_HWPERF_SET_MMRS:
544 case SN_HWPERF_OBJECT_DISTANCE:
545 op_info.p = p;
546 op_info.a = &a;
547 op_info.v0 = &v0;
548 op_info.op = op;
549 r = sn_hwperf_op_cpu(&op_info);
550 if (r) {
551 r = sn_hwperf_map_err(r);
552 goto error;
553 }
554 break;
555
556 default:
557 /* all other ops are a direct SAL call */
558 r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op,
559 a.arg, a.sz, (u64) p, 0, 0, &v0);
560 if (r) {
561 r = sn_hwperf_map_err(r);
562 goto error;
563 }
564 a.v0 = v0;
565 break;
566 }
567
568 if (op & SN_HWPERF_OP_MEM_COPYOUT) {
569 r = copy_to_user((void __user *)a.ptr, p, a.sz);
570 if (r != 0) {
571 r = -EFAULT;
572 goto error;
573 }
574 }
575
576error:
577 vfree(p);
578
579 lock_kernel();
580 return r;
581}
582
583static struct file_operations sn_hwperf_fops = {
584 .ioctl = sn_hwperf_ioctl,
585};
586
587static struct miscdevice sn_hwperf_dev = {
588 MISC_DYNAMIC_MINOR,
589 "sn_hwperf",
590 &sn_hwperf_fops
591};
592
593static int sn_hwperf_init(void)
594{
595 u64 v;
596 int salr;
597 int e = 0;
598
599 /* single threaded, once-only initialization */
600 down(&sn_hwperf_init_mutex);
601 if (sn_hwperf_salheap) {
602 up(&sn_hwperf_init_mutex);
603 return e;
604 }
605
606 /*
607 * The PROM code needs a fixed reference node. For convenience the
608 * same node as the console I/O is used.
609 */
610 sn_hwperf_master_nasid = (nasid_t) ia64_sn_get_console_nasid();
611
612 /*
613 * Request the needed size and install the PROM scratch area.
614 * The PROM keeps various tracking bits in this memory area.
615 */
616 salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
617 (u64) SN_HWPERF_GET_HEAPSIZE, 0,
618 (u64) sizeof(u64), (u64) &v, 0, 0, NULL);
619 if (salr != SN_HWPERF_OP_OK) {
620 e = -EINVAL;
621 goto out;
622 }
623
624 if ((sn_hwperf_salheap = vmalloc(v)) == NULL) {
625 e = -ENOMEM;
626 goto out;
627 }
628 salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
629 SN_HWPERF_INSTALL_HEAP, 0, v,
630 (u64) sn_hwperf_salheap, 0, 0, NULL);
631 if (salr != SN_HWPERF_OP_OK) {
632 e = -EINVAL;
633 goto out;
634 }
635
636 salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
637 SN_HWPERF_OBJECT_COUNT, 0,
638 sizeof(u64), (u64) &v, 0, 0, NULL);
639 if (salr != SN_HWPERF_OP_OK) {
640 e = -EINVAL;
641 goto out;
642 }
643 sn_hwperf_obj_cnt = (int)v;
644
645out:
646 if (e < 0 && sn_hwperf_salheap) {
647 vfree(sn_hwperf_salheap);
648 sn_hwperf_salheap = NULL;
649 sn_hwperf_obj_cnt = 0;
650 }
651
652 if (!e) {
653 /*
654 * Register a dynamic misc device for ioctl. Platforms
655 * supporting hotplug will create /dev/sn_hwperf, else
656 * user can to look up the minor number in /proc/misc.
657 */
658 if ((e = misc_register(&sn_hwperf_dev)) != 0) {
659 printk(KERN_ERR "sn_hwperf_init: misc register "
660 "for \"sn_hwperf\" failed, err %d\n", e);
661 }
662 }
663
664 up(&sn_hwperf_init_mutex);
665 return e;
666}
667
668int sn_topology_open(struct inode *inode, struct file *file)
669{
670 int e;
671 struct seq_file *seq;
672 struct sn_hwperf_object_info *objbuf;
673 int nobj;
674
675 if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
676 e = seq_open(file, &sn_topology_seq_ops);
677 seq = file->private_data;
678 seq->private = objbuf;
679 }
680
681 return e;
682}
683
684int sn_topology_release(struct inode *inode, struct file *file)
685{
686 struct seq_file *seq = file->private_data;
687
688 vfree(seq->private);
689 return seq_release(inode, file);
690}
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
new file mode 100644
index 000000000000..6a80fca807b9
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -0,0 +1,149 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#include <linux/config.h>
9#include <asm/uaccess.h>
10
11#ifdef CONFIG_PROC_FS
12#include <linux/proc_fs.h>
13#include <linux/seq_file.h>
14#include <asm/sn/sn_sal.h>
15
16static int partition_id_show(struct seq_file *s, void *p)
17{
18 seq_printf(s, "%d\n", sn_local_partid());
19 return 0;
20}
21
22static int partition_id_open(struct inode *inode, struct file *file)
23{
24 return single_open(file, partition_id_show, NULL);
25}
26
27static int system_serial_number_show(struct seq_file *s, void *p)
28{
29 seq_printf(s, "%s\n", sn_system_serial_number());
30 return 0;
31}
32
33static int system_serial_number_open(struct inode *inode, struct file *file)
34{
35 return single_open(file, system_serial_number_show, NULL);
36}
37
38static int licenseID_show(struct seq_file *s, void *p)
39{
40 seq_printf(s, "0x%lx\n", sn_partition_serial_number_val());
41 return 0;
42}
43
44static int licenseID_open(struct inode *inode, struct file *file)
45{
46 return single_open(file, licenseID_show, NULL);
47}
48
49/*
50 * Enable forced interrupt by default.
51 * When set, the sn interrupt handler writes the force interrupt register on
52 * the bridge chip. The hardware will then send an interrupt message if the
53 * interrupt line is active. This mimics a level sensitive interrupt.
54 */
55int sn_force_interrupt_flag = 1;
56
57static int sn_force_interrupt_show(struct seq_file *s, void *p)
58{
59 seq_printf(s, "Force interrupt is %s\n",
60 sn_force_interrupt_flag ? "enabled" : "disabled");
61 return 0;
62}
63
64static ssize_t sn_force_interrupt_write_proc(struct file *file,
65 const char __user *buffer, size_t count, loff_t *data)
66{
67 char val;
68
69 if (copy_from_user(&val, buffer, 1))
70 return -EFAULT;
71
72 sn_force_interrupt_flag = (val == '0') ? 0 : 1;
73 return count;
74}
75
76static int sn_force_interrupt_open(struct inode *inode, struct file *file)
77{
78 return single_open(file, sn_force_interrupt_show, NULL);
79}
80
81static int coherence_id_show(struct seq_file *s, void *p)
82{
83 seq_printf(s, "%d\n", partition_coherence_id());
84
85 return 0;
86}
87
88static int coherence_id_open(struct inode *inode, struct file *file)
89{
90 return single_open(file, coherence_id_show, NULL);
91}
92
93static struct proc_dir_entry *sn_procfs_create_entry(
94 const char *name, struct proc_dir_entry *parent,
95 int (*openfunc)(struct inode *, struct file *),
96 int (*releasefunc)(struct inode *, struct file *))
97{
98 struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
99
100 if (e) {
101 e->proc_fops = (struct file_operations *)kmalloc(
102 sizeof(struct file_operations), GFP_KERNEL);
103 if (e->proc_fops) {
104 memset(e->proc_fops, 0, sizeof(struct file_operations));
105 e->proc_fops->open = openfunc;
106 e->proc_fops->read = seq_read;
107 e->proc_fops->llseek = seq_lseek;
108 e->proc_fops->release = releasefunc;
109 }
110 }
111
112 return e;
113}
114
115/* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */
116extern int sn_topology_open(struct inode *, struct file *);
117extern int sn_topology_release(struct inode *, struct file *);
118
119void register_sn_procfs(void)
120{
121 static struct proc_dir_entry *sgi_proc_dir = NULL;
122 struct proc_dir_entry *e;
123
124 BUG_ON(sgi_proc_dir != NULL);
125 if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
126 return;
127
128 sn_procfs_create_entry("partition_id", sgi_proc_dir,
129 partition_id_open, single_release);
130
131 sn_procfs_create_entry("system_serial_number", sgi_proc_dir,
132 system_serial_number_open, single_release);
133
134 sn_procfs_create_entry("licenseID", sgi_proc_dir,
135 licenseID_open, single_release);
136
137 e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir,
138 sn_force_interrupt_open, single_release);
139 if (e)
140 e->proc_fops->write = sn_force_interrupt_write_proc;
141
142 sn_procfs_create_entry("coherence_id", sgi_proc_dir,
143 coherence_id_open, single_release);
144
145 sn_procfs_create_entry("sn_topology", sgi_proc_dir,
146 sn_topology_open, sn_topology_release);
147}
148
149#endif /* CONFIG_PROC_FS */
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
new file mode 100644
index 000000000000..deb9baf4d473
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -0,0 +1,36 @@
1/*
2 * linux/arch/ia64/sn/kernel/sn2/timer.c
3 *
4 * Copyright (C) 2003 Silicon Graphics, Inc.
5 * Copyright (C) 2003 Hewlett-Packard Co
6 * David Mosberger <davidm@hpl.hp.com>: updated for new timer-interpolation infrastructure
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/time.h>
13#include <linux/interrupt.h>
14
15#include <asm/hw_irq.h>
16#include <asm/system.h>
17
18#include <asm/sn/leds.h>
19#include <asm/sn/shub_mmr.h>
20#include <asm/sn/clksupport.h>
21
22extern unsigned long sn_rtc_cycles_per_second;
23
24static struct time_interpolator sn2_interpolator = {
25 .drift = -1,
26 .shift = 10,
27 .mask = (1LL << 55) - 1,
28 .source = TIME_SOURCE_MMIO64
29};
30
31void __init sn_timer_init(void)
32{
33 sn2_interpolator.frequency = sn_rtc_cycles_per_second;
34 sn2_interpolator.addr = RTC_COUNTER_ADDR;
35 register_time_interpolator(&sn2_interpolator);
36}
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
new file mode 100644
index 000000000000..cde7375390b0
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
@@ -0,0 +1,63 @@
1/*
2 *
3 *
4 * Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 * Further, this software is distributed without any warranty that it is
15 * free of the rightful claim of any third person regarding infringement
16 * or the like. Any license provided herein, whether implied or
17 * otherwise, applies only to this software file. Patent licenses, if
18 * any, provided herein do not apply to combinations of this program with
19 * other software, or any other product whatsoever.
20 *
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
24 *
25 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
26 * Mountain View, CA 94043, or:
27 *
28 * http://www.sgi.com
29 *
30 * For further information regarding this notice, see:
31 *
32 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
33 */
34
35#include <linux/interrupt.h>
36#include <asm/sn/pda.h>
37#include <asm/sn/leds.h>
38
39extern void sn_lb_int_war_check(void);
40extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
41
42#define SN_LB_INT_WAR_INTERVAL 100
43
44void sn_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
45{
46 /* LED blinking */
47 if (!pda->hb_count--) {
48 pda->hb_count = HZ / 2;
49 set_led_bits(pda->hb_state ^=
50 LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
51 }
52
53 if (enable_shub_wars_1_1()) {
54 /* Bugfix code for SHUB 1.1 */
55 if (pda->pio_shub_war_cam_addr)
56 *pda->pio_shub_war_cam_addr = 0x8000000000000010UL;
57 }
58 if (pda->sn_lb_int_war_ticks == 0)
59 sn_lb_int_war_check();
60 pda->sn_lb_int_war_ticks++;
61 if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL)
62 pda->sn_lb_int_war_ticks = 0;
63}