aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/sgi-ip27
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/mips/sgi-ip27
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/mips/sgi-ip27')
-rw-r--r--arch/mips/sgi-ip27/Makefile12
-rw-r--r--arch/mips/sgi-ip27/TODO23
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c94
-rw-r--r--arch/mips/sgi-ip27/ip27-console.c76
-rw-r--r--arch/mips/sgi-ip27/ip27-dbgio.c60
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c186
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c252
-rw-r--r--arch/mips/sgi-ip27/ip27-irq-glue.S45
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c457
-rw-r--r--arch/mips/sgi-ip27/ip27-klconfig.c135
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c135
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c586
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c249
-rw-r--r--arch/mips/sgi-ip27/ip27-reset.c81
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c225
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c243
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c135
17 files changed, 2994 insertions, 0 deletions
diff --git a/arch/mips/sgi-ip27/Makefile b/arch/mips/sgi-ip27/Makefile
new file mode 100644
index 000000000000..4ba340780c35
--- /dev/null
+++ b/arch/mips/sgi-ip27/Makefile
@@ -0,0 +1,12 @@
1#
2# Makefile for the IP27 specific kernel interface routines under Linux.
3#
4
5obj-y := ip27-berr.o ip27-console.o ip27-irq.o ip27-init.o ip27-irq-glue.o \
6 ip27-klconfig.o ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o \
7 ip27-timer.o ip27-hubio.o ip27-xtalk.o
8
9obj-$(CONFIG_KGDB) += ip27-dbgio.o
10obj-$(CONFIG_SMP) += ip27-smp.o
11
12EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/sgi-ip27/TODO b/arch/mips/sgi-ip27/TODO
new file mode 100644
index 000000000000..32106131b0d0
--- /dev/null
+++ b/arch/mips/sgi-ip27/TODO
@@ -0,0 +1,23 @@
11. Need to figure out why PCI writes to the IOC3 hang, and if it is okay
2not to write to the IOC3 ever.
32. Need to figure out RRB allocation in bridge_startup().
43. Need to figure out why address swaizzling is needed in inw/outw for
5Qlogic scsi controllers.
64. Need to integrate ip27-klconfig.c:find_lboard and
7ip27-init.c:find_lbaord_real. DONE
85. Is it okay to set calias space on all nodes as 0, instead of 8k as
9in irix?
106. Investigate why things do not work without the setup_test() call
11being invoked on all nodes in ip27-memory.c.
127. Too many CLIs in the locore handlers :
13For the low level handlers set up by set_except_vector(),
14__tlb_refill_debug_tramp, __xtlb_refill_debug_tramp and cacheerror,
15investigate whether the code should do CLI, STI or KMODE.
168. Too many do_page_faults invoked - investigate.
179. start_thread must turn off UX64 ... and define tlb_refill_debug.
1810. Need a bad pmd table, bad pte table. __bad_pmd_table/__bad_pagetable
19does not agree with pgd_bad/pmd_bad.
2011. All intrs (ip27_do_irq handlers) are targetted at cpu A on the node.
21This might need to change later. Only the timer intr is set up to be
22received on both Cpu A and B. (ip27_do_irq()/bridge_startup())
2313. Cache flushing (specially the SMP version) has to be investigated.
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
new file mode 100644
index 000000000000..e1829a5d3b19
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -0,0 +1,94 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995, 1996, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 by Silicon Graphics
8 * Copyright (C) 2002 Maciej W. Rozycki
9 */
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13
14#include <asm/module.h>
15#include <asm/sn/addrs.h>
16#include <asm/sn/arch.h>
17#include <asm/sn/sn0/hub.h>
18#include <asm/tlbdebug.h>
19#include <asm/traps.h>
20#include <asm/uaccess.h>
21
22extern void dump_tlb_addr(unsigned long addr);
23extern void dump_tlb_all(void);
24
25static void dump_hub_information(unsigned long errst0, unsigned long errst1)
26{
27 static char *err_type[2][8] = {
28 { NULL, "Uncached Partial Read PRERR", "DERR", "Read Timeout",
29 NULL, NULL, NULL, NULL },
30 { "WERR", "Uncached Partial Write", "PWERR", "Write Timeout",
31 NULL, NULL, NULL, NULL }
32 };
33 int wrb = errst1 & PI_ERR_ST1_WRBRRB_MASK;
34
35 if (!(errst0 & PI_ERR_ST0_VALID_MASK)) {
36 printk("Hub does not contain valid error information\n");
37 return;
38 }
39
40
41 printk("Hub has valid error information:\n");
42 if (errst0 & PI_ERR_ST0_OVERRUN_MASK)
43 printk("Overrun is set. Error stack may contain additional "
44 "information.\n");
45 printk("Hub error address is %08lx\n",
46 (errst0 & PI_ERR_ST0_ADDR_MASK) >> (PI_ERR_ST0_ADDR_SHFT - 3));
47 printk("Incoming message command 0x%lx\n",
48 (errst0 & PI_ERR_ST0_CMD_MASK) >> PI_ERR_ST0_CMD_SHFT);
49 printk("Supplemental field of incoming message is 0x%lx\n",
50 (errst0 & PI_ERR_ST0_SUPPL_MASK) >> PI_ERR_ST0_SUPPL_SHFT);
51 printk("T5 Rn (for RRB only) is 0x%lx\n",
52 (errst0 & PI_ERR_ST0_REQNUM_MASK) >> PI_ERR_ST0_REQNUM_SHFT);
53 printk("Error type is %s\n", err_type[wrb]
54 [(errst0 & PI_ERR_ST0_TYPE_MASK) >> PI_ERR_ST0_TYPE_SHFT]
55 ? : "invalid");
56}
57
58int ip27_be_handler(struct pt_regs *regs, int is_fixup)
59{
60 unsigned long errst0, errst1;
61 int data = regs->cp0_cause & 4;
62 int cpu = LOCAL_HUB_L(PI_CPU_NUM);
63
64 if (is_fixup)
65 return MIPS_BE_FIXUP;
66
67 printk("Slice %c got %cbe at 0x%lx\n", 'A' + cpu, data ? 'd' : 'i',
68 regs->cp0_epc);
69 printk("Hub information:\n");
70 printk("ERR_INT_PEND = 0x%06lx\n", LOCAL_HUB_L(PI_ERR_INT_PEND));
71 errst0 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS0_B : PI_ERR_STATUS0_A);
72 errst1 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS1_B : PI_ERR_STATUS1_A);
73 dump_hub_information(errst0, errst1);
74 show_regs(regs);
75 dump_tlb_all();
76 while(1);
77 force_sig(SIGBUS, current);
78}
79
80void __init ip27_be_init(void)
81{
82 /* XXX Initialize all the Hub & Bridge error handling here. */
83 int cpu = LOCAL_HUB_L(PI_CPU_NUM);
84 int cpuoff = cpu << 8;
85
86 board_be_handler = ip27_be_handler;
87
88 LOCAL_HUB_S(PI_ERR_INT_PEND,
89 cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
90 LOCAL_HUB_S(PI_ERR_INT_MASK_A + cpuoff, 0);
91 LOCAL_HUB_S(PI_ERR_STACK_ADDR_A + cpuoff, 0);
92 LOCAL_HUB_S(PI_ERR_STACK_SIZE, 0); /* Disable error stack */
93 LOCAL_HUB_S(PI_SYSAD_ERRCHK_EN, PI_SYSAD_CHECK_ALL);
94}
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
new file mode 100644
index 000000000000..d97f5b5ef844
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -0,0 +1,76 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001, 2002 Ralf Baechle
7 */
8#include <linux/init.h>
9#include <linux/console.h>
10#include <linux/kdev_t.h>
11#include <linux/major.h>
12#include <linux/termios.h>
13#include <linux/sched.h>
14#include <linux/tty.h>
15
16#include <asm/page.h>
17#include <asm/semaphore.h>
18#include <asm/sn/addrs.h>
19#include <asm/sn/sn0/hub.h>
20#include <asm/sn/klconfig.h>
21#include <asm/sn/ioc3.h>
22#include <asm/sn/sn_private.h>
23
24#include <linux/serial.h>
25#include <linux/serial_core.h>
26
27#define IOC3_CLK (22000000 / 3)
28#define IOC3_FLAGS (0)
29
30static inline struct ioc3_uartregs *console_uart(void)
31{
32 struct ioc3 *ioc3;
33
34 ioc3 = (struct ioc3 *)KL_CONFIG_CH_CONS_INFO(get_nasid())->memory_base;
35
36 return &ioc3->sregs.uarta;
37}
38
39void prom_putchar(char c)
40{
41 struct ioc3_uartregs *uart = console_uart();
42
43 while ((uart->iu_lsr & 0x20) == 0);
44 uart->iu_thr = c;
45}
46
47char __init prom_getchar(void)
48{
49 return 0;
50}
51
52static void inline ioc3_console_probe(void)
53{
54 struct uart_port up;
55
56 /*
57 * Register to interrupt zero because we share the interrupt with
58 * the serial driver which we don't properly support yet.
59 */
60 memset(&up, 0, sizeof(up));
61 up.membase = (unsigned char *) console_uart();
62 up.irq = 0;
63 up.uartclk = IOC3_CLK;
64 up.regshift = 0;
65 up.iotype = UPIO_MEM;
66 up.flags = IOC3_FLAGS;
67 up.line = 0;
68
69 if (early_serial_setup(&up))
70 printk(KERN_ERR "Early serial init of port 0 failed\n");
71}
72
73__init void ip27_setup_console(void)
74{
75 ioc3_console_probe();
76}
diff --git a/arch/mips/sgi-ip27/ip27-dbgio.c b/arch/mips/sgi-ip27/ip27-dbgio.c
new file mode 100644
index 000000000000..08fd88b36f80
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-dbgio.c
@@ -0,0 +1,60 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License as published by the
4 * Free Software Foundation; either version 2 of the License, or (at your
5 * option) any later version.
6 *
7 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
8 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
10 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
11 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
13 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
14 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
16 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 * Copyright 2004 Ralf Baechle <ralf@linux-mips.org>
23 */
24#include <asm/sn/addrs.h>
25#include <asm/sn/sn0/hub.h>
26#include <asm/sn/klconfig.h>
27#include <asm/sn/ioc3.h>
28#include <asm/sn/sn_private.h>
29
30#include <linux/serial.h>
31#include <linux/serial_core.h>
32#include <linux/serial_reg.h>
33
34#define IOC3_CLK (22000000 / 3)
35#define IOC3_FLAGS (0)
36
37static inline struct ioc3_uartregs *console_uart(void)
38{
39 struct ioc3 *ioc3;
40
41 ioc3 = (struct ioc3 *)KL_CONFIG_CH_CONS_INFO(get_nasid())->memory_base;
42
43 return &ioc3->sregs.uarta;
44}
45
46unsigned char getDebugChar(void)
47{
48 struct ioc3_uartregs *uart = console_uart();
49
50 while ((uart->iu_lsr & UART_LSR_DR) == 0);
51 return uart->iu_rbr;
52}
53
54void putDebugChar(unsigned char c)
55{
56 struct ioc3_uartregs *uart = console_uart();
57
58 while ((uart->iu_lsr & UART_LSR_THRE) == 0);
59 uart->iu_thr = c;
60}
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
new file mode 100644
index 000000000000..524b371f9397
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -0,0 +1,186 @@
1/*
2 * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.
3 * Copyright (C) 2004 Christoph Hellwig.
4 * Released under GPL v2.
5 *
6 * Support functions for the HUB ASIC - mostly PIO mapping related.
7 */
8
9#include <linux/bitops.h>
10#include <linux/string.h>
11#include <linux/mmzone.h>
12#include <asm/sn/addrs.h>
13#include <asm/sn/arch.h>
14#include <asm/sn/hub.h>
15
16
17static int force_fire_and_forget = 1;
18
19/**
20 * hub_pio_map - establish a HUB PIO mapping
21 *
22 * @hub: hub to perform PIO mapping on
23 * @widget: widget ID to perform PIO mapping for
24 * @xtalk_addr: xtalk_address that needs to be mapped
25 * @size: size of the PIO mapping
26 *
27 **/
28unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
29 unsigned long xtalk_addr, size_t size)
30{
31 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
32 volatile hubreg_t junk;
33 unsigned i;
34
35 /* use small-window mapping if possible */
36 if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE)
37 return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE);
38
39 if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) {
40 printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx"
41 " too big (%ld)\n",
42 nasid, widget, xtalk_addr, size);
43 return 0;
44 }
45
46 xtalk_addr &= ~(BWIN_SIZE-1);
47 for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
48 if (test_and_set_bit(i, hub_data(cnode)->h_bigwin_used))
49 continue;
50
51 /*
52 * The code below does a PIO write to setup an ITTE entry.
53 *
54 * We need to prevent other CPUs from seeing our updated
55 * memory shadow of the ITTE (in the piomap) until the ITTE
56 * entry is actually set up; otherwise, another CPU might
57 * attempt a PIO prematurely.
58 *
59 * Also, the only way we can know that an entry has been
60 * received by the hub and can be used by future PIO reads/
61 * writes is by reading back the ITTE entry after writing it.
62 *
63 * For these two reasons, we PIO read back the ITTE entry
64 * after we write it.
65 */
66 IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
67 junk = HUB_L(IIO_ITTE_GET(nasid, i));
68
69 return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
70 }
71
72 printk(KERN_WARNING "unable to establish PIO mapping for at"
73 " hub %d widget %d addr 0x%lx\n",
74 nasid, widget, xtalk_addr);
75 return 0;
76}
77
78
79/*
80 * hub_setup_prb(nasid, prbnum, credits, conveyor)
81 *
82 * Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
83 * put it into conveyor belt mode with the specified number of credits.
84 */
85static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
86{
87 iprb_t prb;
88 int prb_offset;
89
90 /*
91 * Get the current register value.
92 */
93 prb_offset = IIO_IOPRB(prbnum);
94 prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
95
96 /*
97 * Clear out some fields.
98 */
99 prb.iprb_ovflow = 1;
100 prb.iprb_bnakctr = 0;
101 prb.iprb_anakctr = 0;
102
103 /*
104 * Enable or disable fire-and-forget mode.
105 */
106 prb.iprb_ff = force_fire_and_forget ? 1 : 0;
107
108 /*
109 * Set the appropriate number of PIO cresits for the widget.
110 */
111 prb.iprb_xtalkctr = credits;
112
113 /*
114 * Store the new value to the register.
115 */
116 REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
117}
118
119/**
120 * hub_set_piomode - set pio mode for a given hub
121 *
122 * @nasid: physical node ID for the hub in question
123 *
124 * Put the hub into either "PIO conveyor belt" mode or "fire-and-forget" mode.
125 * To do this, we have to make absolutely sure that no PIOs are in progress
126 * so we turn off access to all widgets for the duration of the function.
127 *
128 * XXX - This code should really check what kind of widget we're talking
129 * to. Bridges can only handle three requests, but XG will do more.
130 * How many can crossbow handle to widget 0? We're assuming 1.
131 *
132 * XXX - There is a bug in the crossbow that link reset PIOs do not
133 * return write responses. The easiest solution to this problem is to
134 * leave widget 0 (xbow) in fire-and-forget mode at all times. This
135 * only affects pio's to xbow registers, which should be rare.
136 **/
137static void hub_set_piomode(nasid_t nasid)
138{
139 hubreg_t ii_iowa;
140 hubii_wcr_t ii_wcr;
141 unsigned i;
142
143 ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
144 REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
145
146 ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid, IIO_WCR);
147
148 if (ii_wcr.iwcr_dir_con) {
149 /*
150 * Assume a bridge here.
151 */
152 hub_setup_prb(nasid, 0, 3);
153 } else {
154 /*
155 * Assume a crossbow here.
156 */
157 hub_setup_prb(nasid, 0, 1);
158 }
159
160 /*
161 * XXX - Here's where we should take the widget type into
162 * when account assigning credits.
163 */
164 for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++)
165 hub_setup_prb(nasid, i, 3);
166
167 REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
168}
169
170/*
171 * hub_pio_init - PIO-related hub initalization
172 *
173 * @hub: hubinfo structure for our hub
174 */
175void hub_pio_init(cnodeid_t cnode)
176{
177 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
178 unsigned i;
179
180 /* initialize big window piomaps for this hub */
181 bitmap_zero(hub_data(cnode)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
182 for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
183 IIO_ITTE_DISABLE(nasid, i);
184
185 hub_set_piomode(nasid);
186}
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
new file mode 100644
index 000000000000..6dcee5c46c74
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -0,0 +1,252 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General
3 * Public License. See the file "COPYING" in the main directory of this
4 * archive for more details.
5 *
6 * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
7 * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
8 */
9#include <linux/config.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/cpumask.h>
16#include <asm/cpu.h>
17#include <asm/io.h>
18#include <asm/pgtable.h>
19#include <asm/time.h>
20#include <asm/sn/types.h>
21#include <asm/sn/sn0/addrs.h>
22#include <asm/sn/sn0/hubni.h>
23#include <asm/sn/sn0/hubio.h>
24#include <asm/sn/klconfig.h>
25#include <asm/sn/ioc3.h>
26#include <asm/mipsregs.h>
27#include <asm/sn/gda.h>
28#include <asm/sn/hub.h>
29#include <asm/sn/intr.h>
30#include <asm/current.h>
31#include <asm/smp.h>
32#include <asm/processor.h>
33#include <asm/mmu_context.h>
34#include <asm/thread_info.h>
35#include <asm/sn/launch.h>
36#include <asm/sn/sn_private.h>
37#include <asm/sn/sn0/ip27.h>
38#include <asm/sn/mapped_kernel.h>
39
40#define CPU_NONE (cpuid_t)-1
41
42static DECLARE_BITMAP(hub_init_mask, MAX_COMPACT_NODES);
43nasid_t master_nasid = INVALID_NASID;
44
45cnodeid_t nasid_to_compact_node[MAX_NASIDS];
46nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
47cnodeid_t cpuid_to_compact_node[MAXCPUS];
48
49EXPORT_SYMBOL(nasid_to_compact_node);
50
51extern void pcibr_setup(cnodeid_t);
52
53extern void xtalk_probe_node(cnodeid_t nid);
54
55static void __init per_hub_init(cnodeid_t cnode)
56{
57 struct hub_data *hub = hub_data(cnode);
58 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
59
60 cpu_set(smp_processor_id(), hub->h_cpus);
61
62 if (test_and_set_bit(cnode, hub_init_mask))
63 return;
64
65 /*
66 * Set CRB timeout at 5ms, (< PI timeout of 10ms)
67 */
68 REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
69 REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
70
71 hub_rtc_init(cnode);
72 xtalk_probe_node(cnode);
73
74#ifdef CONFIG_REPLICATE_EXHANDLERS
75 /*
76 * If this is not a headless node initialization,
77 * copy over the caliased exception handlers.
78 */
79 if (get_compact_nodeid() == cnode) {
80 extern char except_vec2_generic, except_vec3_generic;
81 extern void build_tlb_refill_handler(void);
82
83 memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
84 memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
85 build_tlb_refill_handler();
86 memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
87 memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
88 __flush_cache_all();
89 }
90#endif
91}
92
93void __init per_cpu_init(void)
94{
95 int cpu = smp_processor_id();
96 int slice = LOCAL_HUB_L(PI_CPU_NUM);
97 cnodeid_t cnode = get_compact_nodeid();
98 struct hub_data *hub = hub_data(cnode);
99 struct slice_data *si = hub->slice + slice;
100 int i;
101
102 if (test_and_set_bit(slice, &hub->slice_map))
103 return;
104
105 clear_c0_status(ST0_IM);
106
107 for (i = 0; i < LEVELS_PER_SLICE; i++)
108 si->level_to_irq[i] = -1;
109
110 /*
111 * Some interrupts are reserved by hardware or by software convention.
112 * Mark these as reserved right away so they won't be used accidently
113 * later.
114 */
115 for (i = 0; i <= BASE_PCI_IRQ; i++) {
116 __set_bit(i, si->irq_alloc_mask);
117 LOCAL_HUB_S(PI_INT_PEND_MOD, i);
118 }
119
120 __set_bit(IP_PEND0_6_63, si->irq_alloc_mask);
121 LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);
122
123 for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
124 __set_bit(i, si->irq_alloc_mask + 1);
125 LOCAL_HUB_S(PI_INT_PEND_MOD, i);
126 }
127
128 LOCAL_HUB_L(PI_INT_PEND0);
129
130 /*
131 * We use this so we can find the local hub's data as fast as only
132 * possible.
133 */
134 cpu_data[cpu].data = si;
135
136 cpu_time_init();
137 install_ipi();
138
139 /* Install our NMI handler if symmon hasn't installed one. */
140 install_cpu_nmi_handler(cputoslice(cpu));
141
142 set_c0_status(SRB_DEV0 | SRB_DEV1);
143
144 per_hub_init(cnode);
145}
146
147/*
148 * get_nasid() returns the physical node id number of the caller.
149 */
150nasid_t
151get_nasid(void)
152{
153 return (nasid_t)((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_NODEID_MASK)
154 >> NSRI_NODEID_SHFT);
155}
156
157/*
158 * Map the physical node id to a virtual node id (virtual node ids are contiguous).
159 */
160cnodeid_t get_compact_nodeid(void)
161{
162 return NASID_TO_COMPACT_NODEID(get_nasid());
163}
164
165/* Extracted from the IOC3 meta driver. FIXME. */
166static inline void ioc3_sio_init(void)
167{
168 struct ioc3 *ioc3;
169 nasid_t nid;
170 long loops;
171
172 nid = get_nasid();
173 ioc3 = (struct ioc3 *) KL_CONFIG_CH_CONS_INFO(nid)->memory_base;
174
175 ioc3->sscr_a = 0; /* PIO mode for uarta. */
176 ioc3->sscr_b = 0; /* PIO mode for uartb. */
177 ioc3->sio_iec = ~0;
178 ioc3->sio_ies = (SIO_IR_SA_INT | SIO_IR_SB_INT);
179
180 loops=1000000; while(loops--);
181 ioc3->sregs.uarta.iu_fcr = 0;
182 ioc3->sregs.uartb.iu_fcr = 0;
183 loops=1000000; while(loops--);
184}
185
186static inline void ioc3_eth_init(void)
187{
188 struct ioc3 *ioc3;
189 nasid_t nid;
190
191 nid = get_nasid();
192 ioc3 = (struct ioc3 *) KL_CONFIG_CH_CONS_INFO(nid)->memory_base;
193
194 ioc3->eier = 0;
195}
196
197extern void ip27_setup_console(void);
198extern void ip27_time_init(void);
199extern void ip27_reboot_setup(void);
200
201static int __init ip27_setup(void)
202{
203 hubreg_t p, e, n_mode;
204 nasid_t nid;
205
206 ip27_setup_console();
207 ip27_reboot_setup();
208
209 /*
210 * hub_rtc init and cpu clock intr enabled for later calibrate_delay.
211 */
212 nid = get_nasid();
213 printk("IP27: Running on node %d.\n", nid);
214
215 p = LOCAL_HUB_L(PI_CPU_PRESENT_A) & 1;
216 e = LOCAL_HUB_L(PI_CPU_ENABLE_A) & 1;
217 printk("Node %d has %s primary CPU%s.\n", nid,
218 p ? "a" : "no",
219 e ? ", CPU is running" : "");
220
221 p = LOCAL_HUB_L(PI_CPU_PRESENT_B) & 1;
222 e = LOCAL_HUB_L(PI_CPU_ENABLE_B) & 1;
223 printk("Node %d has %s secondary CPU%s.\n", nid,
224 p ? "a" : "no",
225 e ? ", CPU is running" : "");
226
227 /*
228 * Try to catch kernel missconfigurations and give user an
229 * indication what option to select.
230 */
231 n_mode = LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_MORENODES_MASK;
232 printk("Machine is in %c mode.\n", n_mode ? 'N' : 'M');
233#ifdef CONFIG_SGI_SN0_N_MODE
234 if (!n_mode)
235 panic("Kernel compiled for M mode.");
236#else
237 if (n_mode)
238 panic("Kernel compiled for N mode.");
239#endif
240
241 ioc3_sio_init();
242 ioc3_eth_init();
243 per_cpu_init();
244
245 set_io_port_base(IO_BASE);
246
247 board_time_init = ip27_time_init;
248
249 return 0;
250}
251
252early_initcall(ip27_setup);
diff --git a/arch/mips/sgi-ip27/ip27-irq-glue.S b/arch/mips/sgi-ip27/ip27-irq-glue.S
new file mode 100644
index 000000000000..c304df715e0a
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-irq-glue.S
@@ -0,0 +1,45 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999 Ralf Baechle
7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 */
9#include <asm/asm.h>
10#include <asm/mipsregs.h>
11#include <asm/regdef.h>
12#include <asm/stackframe.h>
13
14 .text
15 .align 5
16NESTED(ip27_irq, PT_SIZE, sp)
17 SAVE_ALL
18 CLI
19
20 mfc0 s0, CP0_CAUSE
21 mfc0 t0, CP0_STATUS
22 and s0, t0
23 move a0, sp
24 PTR_LA ra, ret_from_irq
25
26 /* First check for RT interrupt. */
27 andi t0, s0, CAUSEF_IP4
28 bnez t0, ip4
29 andi t0, s0, CAUSEF_IP2
30 bnez t0, ip2
31 andi t0, s0, CAUSEF_IP3
32 bnez t0, ip3
33 andi t0, s0, CAUSEF_IP5
34 bnez t0, ip5
35 andi t0, s0, CAUSEF_IP6
36 bnez t0, ip6
37 j ra
38
39ip2: j ip27_do_irq_mask0 # PI_INT_PEND_0 or CC_PEND_{A|B}
40ip3: j ip27_do_irq_mask1 # PI_INT_PEND_1
41ip4: j ip27_rt_timer_interrupt
42ip5: j ip27_prof_timer
43ip6: j ip27_hub_error
44
45 END(ip27_irq)
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
new file mode 100644
index 000000000000..61817a18aed2
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -0,0 +1,457 @@
1/*
2 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
3 *
4 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
5 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
6 * Copyright (C) 1999 - 2001 Kanoj Sarcar
7 */
8#include <linux/config.h>
9#include <linux/init.h>
10#include <linux/irq.h>
11#include <linux/errno.h>
12#include <linux/signal.h>
13#include <linux/sched.h>
14#include <linux/types.h>
15#include <linux/interrupt.h>
16#include <linux/ioport.h>
17#include <linux/irq.h>
18#include <linux/timex.h>
19#include <linux/slab.h>
20#include <linux/random.h>
21#include <linux/smp_lock.h>
22#include <linux/kernel_stat.h>
23#include <linux/delay.h>
24#include <linux/bitops.h>
25
26#include <asm/bootinfo.h>
27#include <asm/io.h>
28#include <asm/mipsregs.h>
29#include <asm/system.h>
30
31#include <asm/ptrace.h>
32#include <asm/processor.h>
33#include <asm/pci/bridge.h>
34#include <asm/sn/addrs.h>
35#include <asm/sn/agent.h>
36#include <asm/sn/arch.h>
37#include <asm/sn/hub.h>
38#include <asm/sn/intr.h>
39
40#undef DEBUG_IRQ
41#ifdef DEBUG_IRQ
42#define DBG(x...) printk(x)
43#else
44#define DBG(x...)
45#endif
46
47/*
48 * Linux has a controller-independent x86 interrupt architecture.
49 * every controller has a 'controller-template', that is used
50 * by the main code to do the right thing. Each driver-visible
51 * interrupt source is transparently wired to the apropriate
52 * controller. Thus drivers need not be aware of the
53 * interrupt-controller.
54 *
55 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
56 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
57 * (IO-APICs assumed to be messaging to Pentium local-APICs)
58 *
59 * the code is designed to be easily extended with new/different
60 * interrupt controllers, without having to do assembly magic.
61 */
62
63extern asmlinkage void ip27_irq(void);
64
65extern struct bridge_controller *irq_to_bridge[];
66extern int irq_to_slot[];
67
68/*
69 * use these macros to get the encoded nasid and widget id
70 * from the irq value
71 */
72#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)]
73#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
74
75static inline int alloc_level(int cpu, int irq)
76{
77 struct slice_data *si = cpu_data[cpu].data;
78 int level; /* pre-allocated entries */
79
80 level = find_first_zero_bit(si->irq_alloc_mask, LEVELS_PER_SLICE);
81 if (level >= LEVELS_PER_SLICE)
82 panic("Cpu %d flooded with devices\n", cpu);
83
84 __set_bit(level, si->irq_alloc_mask);
85 si->level_to_irq[level] = irq;
86
87 return level;
88}
89
90static inline int find_level(cpuid_t *cpunum, int irq)
91{
92 int cpu, i;
93
94 for (cpu = 0; cpu <= NR_CPUS; cpu++) {
95 struct slice_data *si = cpu_data[cpu].data;
96
97 if (!cpu_online(cpu))
98 continue;
99
100 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
101 if (si->level_to_irq[i] == irq) {
102 *cpunum = cpu;
103
104 return i;
105 }
106 }
107
108 panic("Could not identify cpu/level for irq %d\n", irq);
109}
110
111/*
112 * Find first bit set
113 */
114static int ms1bit(unsigned long x)
115{
116 int b = 0, s;
117
118 s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s;
119 s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s;
120 s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s;
121 s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s;
122 s = 1; if (x >> 1 == 0) s = 0; b += s;
123
124 return b;
125}
126
127/*
128 * This code is unnecessarily complex, because we do SA_INTERRUPT
129 * intr enabling. Basically, once we grab the set of intrs we need
130 * to service, we must mask _all_ these interrupts; firstly, to make
131 * sure the same intr does not intr again, causing recursion that
132 * can lead to stack overflow. Secondly, we can not just mask the
133 * one intr we are do_IRQing, because the non-masked intrs in the
134 * first set might intr again, causing multiple servicings of the
135 * same intr. This effect is mostly seen for intercpu intrs.
136 * Kanoj 05.13.00
137 */
138
139void ip27_do_irq_mask0(struct pt_regs *regs)
140{
141 int irq, swlevel;
142 hubreg_t pend0, mask0;
143 cpuid_t cpu = smp_processor_id();
144 int pi_int_mask0 =
145 (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B;
146
147 /* copied from Irix intpend0() */
148 pend0 = LOCAL_HUB_L(PI_INT_PEND0);
149 mask0 = LOCAL_HUB_L(pi_int_mask0);
150
151 pend0 &= mask0; /* Pick intrs we should look at */
152 if (!pend0)
153 return;
154
155 swlevel = ms1bit(pend0);
156#ifdef CONFIG_SMP
157 if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
158 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
159 } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
160 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
161 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
162 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
163 smp_call_function_interrupt();
164 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
165 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
166 smp_call_function_interrupt();
167 } else
168#endif
169 {
170 /* "map" swlevel to irq */
171 struct slice_data *si = cpu_data[cpu].data;
172
173 irq = si->level_to_irq[swlevel];
174 do_IRQ(irq, regs);
175 }
176
177 LOCAL_HUB_L(PI_INT_PEND0);
178}
179
180void ip27_do_irq_mask1(struct pt_regs *regs)
181{
182 int irq, swlevel;
183 hubreg_t pend1, mask1;
184 cpuid_t cpu = smp_processor_id();
185 int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B;
186 struct slice_data *si = cpu_data[cpu].data;
187
188 /* copied from Irix intpend0() */
189 pend1 = LOCAL_HUB_L(PI_INT_PEND1);
190 mask1 = LOCAL_HUB_L(pi_int_mask1);
191
192 pend1 &= mask1; /* Pick intrs we should look at */
193 if (!pend1)
194 return;
195
196 swlevel = ms1bit(pend1);
197 /* "map" swlevel to irq */
198 irq = si->level_to_irq[swlevel];
199 LOCAL_HUB_CLR_INTR(swlevel);
200 do_IRQ(irq, regs);
201
202 LOCAL_HUB_L(PI_INT_PEND1);
203}
204
205void ip27_prof_timer(struct pt_regs *regs)
206{
207 panic("CPU %d got a profiling interrupt", smp_processor_id());
208}
209
210void ip27_hub_error(struct pt_regs *regs)
211{
212 panic("CPU %d got a hub error interrupt", smp_processor_id());
213}
214
215static int intr_connect_level(int cpu, int bit)
216{
217 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
218 struct slice_data *si = cpu_data[cpu].data;
219
220 __set_bit(bit, si->irq_enable_mask);
221
222 if (!cputoslice(cpu)) {
223 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
224 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
225 } else {
226 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
227 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
228 }
229
230 return 0;
231}
232
233static int intr_disconnect_level(int cpu, int bit)
234{
235 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
236 struct slice_data *si = cpu_data[cpu].data;
237
238 __clear_bit(bit, si->irq_enable_mask);
239
240 if (!cputoslice(cpu)) {
241 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
242 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
243 } else {
244 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
245 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
246 }
247
248 return 0;
249}
250
251/* Startup one of the (PCI ...) IRQs routes over a bridge. */
252static unsigned int startup_bridge_irq(unsigned int irq)
253{
254 struct bridge_controller *bc;
255 bridgereg_t device;
256 bridge_t *bridge;
257 int pin, swlevel;
258 cpuid_t cpu;
259
260 pin = SLOT_FROM_PCI_IRQ(irq);
261 bc = IRQ_TO_BRIDGE(irq);
262 bridge = bc->base;
263
264 DBG("bridge_startup(): irq= 0x%x pin=%d\n", irq, pin);
265 /*
266 * "map" irq to a swlevel greater than 6 since the first 6 bits
267 * of INT_PEND0 are taken
268 */
269 swlevel = find_level(&cpu, irq);
270 bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8));
271 bridge->b_int_enable |= (1 << pin);
272 bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */
273
274 /*
275 * Enable sending of an interrupt clear packt to the hub on a high to
276 * low transition of the interrupt pin.
277 *
278 * IRIX sets additional bits in the address which are documented as
279 * reserved in the bridge docs.
280 */
281 bridge->b_int_mode |= (1UL << pin);
282
283 /*
284 * We assume the bridge to have a 1:1 mapping between devices
285 * (slots) and intr pins.
286 */
287 device = bridge->b_int_device;
288 device &= ~(7 << (pin*3));
289 device |= (pin << (pin*3));
290 bridge->b_int_device = device;
291
292 bridge->b_wid_tflush;
293
294 return 0; /* Never anything pending. */
295}
296
297/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
298static void shutdown_bridge_irq(unsigned int irq)
299{
300 struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
301 bridge_t *bridge = bc->base;
302 struct slice_data *si = cpu_data[bc->irq_cpu].data;
303 int pin, swlevel;
304 cpuid_t cpu;
305
306 DBG("bridge_shutdown: irq 0x%x\n", irq);
307 pin = SLOT_FROM_PCI_IRQ(irq);
308
309 /*
310 * map irq to a swlevel greater than 6 since the first 6 bits
311 * of INT_PEND0 are taken
312 */
313 swlevel = find_level(&cpu, irq);
314 intr_disconnect_level(cpu, swlevel);
315
316 __clear_bit(swlevel, si->irq_alloc_mask);
317 si->level_to_irq[swlevel] = -1;
318
319 bridge->b_int_enable &= ~(1 << pin);
320 bridge->b_wid_tflush;
321}
322
323static inline void enable_bridge_irq(unsigned int irq)
324{
325 cpuid_t cpu;
326 int swlevel;
327
328 swlevel = find_level(&cpu, irq); /* Criminal offence */
329 intr_connect_level(cpu, swlevel);
330}
331
332static inline void disable_bridge_irq(unsigned int irq)
333{
334 cpuid_t cpu;
335 int swlevel;
336
337 swlevel = find_level(&cpu, irq); /* Criminal offence */
338 intr_disconnect_level(cpu, swlevel);
339}
340
341static void mask_and_ack_bridge_irq(unsigned int irq)
342{
343 disable_bridge_irq(irq);
344}
345
346static void end_bridge_irq(unsigned int irq)
347{
348 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
349 irq_desc[irq].action)
350 enable_bridge_irq(irq);
351}
352
353static struct hw_interrupt_type bridge_irq_type = {
354 .typename = "bridge",
355 .startup = startup_bridge_irq,
356 .shutdown = shutdown_bridge_irq,
357 .enable = enable_bridge_irq,
358 .disable = disable_bridge_irq,
359 .ack = mask_and_ack_bridge_irq,
360 .end = end_bridge_irq,
361};
362
363static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
364
365static int allocate_irqno(void)
366{
367 int irq;
368
369again:
370 irq = find_first_zero_bit(irq_map, NR_IRQS);
371
372 if (irq >= NR_IRQS)
373 return -ENOSPC;
374
375 if (test_and_set_bit(irq, irq_map))
376 goto again;
377
378 return irq;
379}
380
381void free_irqno(unsigned int irq)
382{
383 clear_bit(irq, irq_map);
384}
385
386void __devinit register_bridge_irq(unsigned int irq)
387{
388 irq_desc[irq].status = IRQ_DISABLED;
389 irq_desc[irq].action = 0;
390 irq_desc[irq].depth = 1;
391 irq_desc[irq].handler = &bridge_irq_type;
392}
393
394int __devinit request_bridge_irq(struct bridge_controller *bc)
395{
396 int irq = allocate_irqno();
397 int swlevel, cpu;
398 nasid_t nasid;
399
400 if (irq < 0)
401 return irq;
402
403 /*
404 * "map" irq to a swlevel greater than 6 since the first 6 bits
405 * of INT_PEND0 are taken
406 */
407 cpu = bc->irq_cpu;
408 swlevel = alloc_level(cpu, irq);
409 if (unlikely(swlevel < 0)) {
410 free_irqno(irq);
411
412 return -EAGAIN;
413 }
414
415 /* Make sure it's not already pending when we connect it. */
416 nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
417 REMOTE_HUB_CLR_INTR(nasid, swlevel);
418
419 intr_connect_level(cpu, swlevel);
420
421 register_bridge_irq(irq);
422
423 return irq;
424}
425
426void __init arch_init_irq(void)
427{
428 set_except_vector(0, ip27_irq);
429}
430
431void install_ipi(void)
432{
433 int slice = LOCAL_HUB_L(PI_CPU_NUM);
434 int cpu = smp_processor_id();
435 struct slice_data *si = cpu_data[cpu].data;
436 hubreg_t mask, set;
437
438 if (slice == 0) {
439 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
440 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
441 mask = LOCAL_HUB_L(PI_INT_MASK0_A); /* Slice A */
442 set = (1UL << CPU_RESCHED_A_IRQ) | (1UL << CPU_CALL_A_IRQ);
443 mask |= set;
444 si->irq_enable_mask[0] |= set;
445 si->irq_alloc_mask[0] |= set;
446 LOCAL_HUB_S(PI_INT_MASK0_A, mask);
447 } else {
448 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
449 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
450 mask = LOCAL_HUB_L(PI_INT_MASK0_B); /* Slice B */
451 set = (1UL << CPU_RESCHED_B_IRQ) | (1UL << CPU_CALL_B_IRQ);
452 mask |= set;
453 si->irq_enable_mask[1] |= set;
454 si->irq_alloc_mask[1] |= set;
455 LOCAL_HUB_S(PI_INT_MASK0_B, mask);
456 }
457}
diff --git a/arch/mips/sgi-ip27/ip27-klconfig.c b/arch/mips/sgi-ip27/ip27-klconfig.c
new file mode 100644
index 000000000000..dd830b3670d1
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-klconfig.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
3 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
4 */
5#include <linux/init.h>
6#include <linux/kernel.h>
7#include <linux/sched.h>
8#include <linux/interrupt.h>
9#include <linux/kernel_stat.h>
10#include <linux/param.h>
11#include <linux/timex.h>
12#include <linux/mm.h>
13
14#include <asm/sn/klconfig.h>
15#include <asm/sn/arch.h>
16#include <asm/sn/gda.h>
17
18klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type)
19{
20 int index, j;
21
22 if (kli == (klinfo_t *)NULL) {
23 index = 0;
24 } else {
25 for (j = 0; j < KLCF_NUM_COMPS(brd); j++)
26 if (kli == KLCF_COMP(brd, j))
27 break;
28 index = j;
29 if (index == KLCF_NUM_COMPS(brd)) {
30 printk("find_component: Bad pointer: 0x%p\n", kli);
31 return (klinfo_t *)NULL;
32 }
33 index++; /* next component */
34 }
35
36 for (; index < KLCF_NUM_COMPS(brd); index++) {
37 kli = KLCF_COMP(brd, index);
38 if (KLCF_COMP_TYPE(kli) == struct_type)
39 return kli;
40 }
41
42 /* Didn't find it. */
43 return (klinfo_t *)NULL;
44}
45
46klinfo_t *find_first_component(lboard_t *brd, unsigned char struct_type)
47{
48 return find_component(brd, (klinfo_t *)NULL, struct_type);
49}
50
51lboard_t * find_lboard(lboard_t *start, unsigned char brd_type)
52{
53 /* Search all boards stored on this node. */
54 while (start) {
55 if (start->brd_type == brd_type)
56 return start;
57 start = KLCF_NEXT(start);
58 }
59 /* Didn't find it. */
60 return (lboard_t *)NULL;
61}
62
63lboard_t * find_lboard_class(lboard_t *start, unsigned char brd_type)
64{
65 /* Search all boards stored on this node. */
66 while (start) {
67 if (KLCLASS(start->brd_type) == KLCLASS(brd_type))
68 return start;
69 start = KLCF_NEXT(start);
70 }
71
72 /* Didn't find it. */
73 return (lboard_t *)NULL;
74}
75
76cnodeid_t get_cpu_cnode(cpuid_t cpu)
77{
78 return CPUID_TO_COMPACT_NODEID(cpu);
79}
80
81klcpu_t * nasid_slice_to_cpuinfo(nasid_t nasid, int slice)
82{
83 lboard_t *brd;
84 klcpu_t *acpu;
85
86 if (!(brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27)))
87 return (klcpu_t *)NULL;
88
89 if (!(acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU)))
90 return (klcpu_t *)NULL;
91
92 do {
93 if ((acpu->cpu_info.physid) == slice)
94 return acpu;
95 } while ((acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
96 KLSTRUCT_CPU)));
97 return (klcpu_t *)NULL;
98}
99
100klcpu_t * sn_get_cpuinfo(cpuid_t cpu)
101{
102 nasid_t nasid;
103 int slice;
104 klcpu_t *acpu;
105 gda_t *gdap = GDA;
106 cnodeid_t cnode;
107
108 if (!(cpu < MAXCPUS)) {
109 printk("sn_get_cpuinfo: illegal cpuid 0x%lx\n", cpu);
110 return NULL;
111 }
112
113 cnode = get_cpu_cnode(cpu);
114 if (cnode == INVALID_CNODEID)
115 return NULL;
116
117 if ((nasid = gdap->g_nasidtable[cnode]) == INVALID_NASID)
118 return NULL;
119
120 for (slice = 0; slice < CPUS_PER_NODE; slice++) {
121 acpu = nasid_slice_to_cpuinfo(nasid, slice);
122 if (acpu && acpu->cpu_info.virtid == cpu)
123 return acpu;
124 }
125 return NULL;
126}
127
128int get_cpu_slice(cpuid_t cpu)
129{
130 klcpu_t *acpu;
131
132 if ((acpu = sn_get_cpuinfo(cpu)) == NULL)
133 return -1;
134 return acpu->cpu_info.physid;
135}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
new file mode 100644
index 000000000000..41c3f405e00c
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -0,0 +1,135 @@
1/*
2 * Ported from IRIX to Linux by Kanoj Sarcar, 06/08/00.
3 * Copyright 2000 - 2001 Silicon Graphics, Inc.
4 * Copyright 2000 - 2001 Kanoj Sarcar (kanoj@sgi.com)
5 */
6#include <linux/config.h>
7#include <linux/init.h>
8#include <linux/mmzone.h>
9#include <linux/kernel.h>
10#include <linux/nodemask.h>
11#include <linux/string.h>
12
13#include <asm/page.h>
14#include <asm/sections.h>
15#include <asm/smp.h>
16#include <asm/sn/types.h>
17#include <asm/sn/arch.h>
18#include <asm/sn/gda.h>
19#include <asm/sn/hub.h>
20#include <asm/sn/mapped_kernel.h>
21#include <asm/sn/sn_private.h>
22
23static cpumask_t ktext_repmask;
24
25/*
26 * XXX - This needs to be much smarter about where it puts copies of the
27 * kernel. For example, we should never put a copy on a headless node,
28 * and we should respect the topology of the machine.
29 */
30void __init setup_replication_mask()
31{
32 cnodeid_t cnode;
33
34 /* Set only the master cnode's bit. The master cnode is always 0. */
35 cpus_clear(ktext_repmask);
36 cpu_set(0, ktext_repmask);
37
38#ifdef CONFIG_REPLICATE_KTEXT
39#ifndef CONFIG_MAPPED_KERNEL
40#error Kernel replication works with mapped kernel support. No calias support.
41#endif
42 for_each_online_node(cnode) {
43 if (cnode == 0)
44 continue;
45 /* Advertise that we have a copy of the kernel */
46 cpu_set(cnode, ktext_repmask);
47 }
48#endif
49 /* Set up a GDA pointer to the replication mask. */
50 GDA->g_ktext_repmask = &ktext_repmask;
51}
52
53
54static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid)
55{
56 cnodeid_t client_cnode;
57 kern_vars_t *kvp;
58
59 client_cnode = NASID_TO_COMPACT_NODEID(client_nasid);
60
61 kvp = &hub_data(client_nasid)->kern_vars;
62
63 KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp;
64
65 kvp->kv_magic = KV_MAGIC;
66 kvp->kv_ro_nasid = server_nasid;
67 kvp->kv_rw_nasid = master_nasid;
68 kvp->kv_ro_baseaddr = NODE_CAC_BASE(server_nasid);
69 kvp->kv_rw_baseaddr = NODE_CAC_BASE(master_nasid);
70 printk("REPLICATION: ON nasid %d, ktext from nasid %d, kdata from nasid %d\n", client_nasid, server_nasid, master_nasid);
71}
72
73/* XXX - When the BTE works, we should use it instead of this. */
74static __init void copy_kernel(nasid_t dest_nasid)
75{
76 unsigned long dest_kern_start, source_start, source_end, kern_size;
77
78 source_start = (unsigned long) _stext;
79 source_end = (unsigned long) _etext;
80 kern_size = source_end - source_start;
81
82 dest_kern_start = CHANGE_ADDR_NASID(MAPPED_KERN_RO_TO_K0(source_start),
83 dest_nasid);
84 memcpy((void *)dest_kern_start, (void *)source_start, kern_size);
85}
86
87void __init replicate_kernel_text()
88{
89 cnodeid_t cnode;
90 nasid_t client_nasid;
91 nasid_t server_nasid;
92
93 server_nasid = master_nasid;
94
95 /* Record where the master node should get its kernel text */
96 set_ktext_source(master_nasid, master_nasid);
97
98 for_each_online_node(cnode) {
99 if (cnode == 0)
100 continue;
101 client_nasid = COMPACT_TO_NASID_NODEID(cnode);
102
103 /* Check if this node should get a copy of the kernel */
104 if (cpu_isset(cnode, ktext_repmask)) {
105 server_nasid = client_nasid;
106 copy_kernel(server_nasid);
107 }
108
109 /* Record where this node should get its kernel text */
110 set_ktext_source(client_nasid, server_nasid);
111 }
112}
113
114/*
115 * Return pfn of first free page of memory on a node. PROM may allocate
116 * data structures on the first couple of pages of the first slot of each
117 * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
118 */
119pfn_t node_getfirstfree(cnodeid_t cnode)
120{
121 unsigned long loadbase = REP_BASE;
122 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
123 unsigned long offset;
124
125#ifdef CONFIG_MAPPED_KERNEL
126 loadbase += 16777216;
127#endif
128 offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
129 if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask)))
130 return (TO_NODE(nasid, offset) >> PAGE_SHIFT);
131 else
132 return (KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >>
133 PAGE_SHIFT);
134}
135
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
new file mode 100644
index 000000000000..0a44a98d7adc
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -0,0 +1,586 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2000 by Silicon Graphics, Inc.
8 * Copyright (C) 2004 by Christoph Hellwig
9 *
10 * On SGI IP27 the ARC memory configuration data is completly bogus but
11 * alternate easier to use mechanisms are available.
12 */
13#include <linux/config.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/mmzone.h>
18#include <linux/module.h>
19#include <linux/nodemask.h>
20#include <linux/swap.h>
21#include <linux/bootmem.h>
22#include <asm/page.h>
23#include <asm/sections.h>
24
25#include <asm/sn/arch.h>
26#include <asm/sn/hub.h>
27#include <asm/sn/klconfig.h>
28#include <asm/sn/sn_private.h>
29
30
31#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
32
33#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
34#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
35
36#define SLOT_IGNORED 0xffff
37
38static short __initdata slot_lastfilled_cache[MAX_COMPACT_NODES];
39static unsigned short __initdata slot_psize_cache[MAX_COMPACT_NODES][MAX_MEM_SLOTS];
40static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES];
41
42struct node_data *__node_data[MAX_COMPACT_NODES];
43
44EXPORT_SYMBOL(__node_data);
45
46static int fine_mode;
47
48static int is_fine_dirmode(void)
49{
50 return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
51 >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
52}
53
54static hubreg_t get_region(cnodeid_t cnode)
55{
56 if (fine_mode)
57 return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
58 else
59 return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
60}
61
62static hubreg_t region_mask;
63
64static void gen_region_mask(hubreg_t *region_mask)
65{
66 cnodeid_t cnode;
67
68 (*region_mask) = 0;
69 for_each_online_node(cnode) {
70 (*region_mask) |= 1ULL << get_region(cnode);
71 }
72}
73
74#define rou_rflag rou_flags
75
76static int router_distance;
77
78static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
79{
80 klrou_t *router;
81 lboard_t *brd;
82 int port;
83
84 if (router_a->rou_rflag == 1)
85 return;
86
87 if (depth >= router_distance)
88 return;
89
90 router_a->rou_rflag = 1;
91
92 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
93 if (router_a->rou_port[port].port_nasid == INVALID_NASID)
94 continue;
95
96 brd = (lboard_t *)NODE_OFFSET_TO_K0(
97 router_a->rou_port[port].port_nasid,
98 router_a->rou_port[port].port_offset);
99
100 if (brd->brd_type == KLTYPE_ROUTER) {
101 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
102 if (router == router_b) {
103 if (depth < router_distance)
104 router_distance = depth;
105 }
106 else
107 router_recurse(router, router_b, depth + 1);
108 }
109 }
110
111 router_a->rou_rflag = 0;
112}
113
114unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
115
116static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
117{
118 klrou_t *router, *router_a = NULL, *router_b = NULL;
119 lboard_t *brd, *dest_brd;
120 cnodeid_t cnode;
121 nasid_t nasid;
122 int port;
123
124 /* Figure out which routers nodes in question are connected to */
125 for_each_online_node(cnode) {
126 nasid = COMPACT_TO_NASID_NODEID(cnode);
127
128 if (nasid == -1) continue;
129
130 brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
131 KLTYPE_ROUTER);
132
133 if (!brd)
134 continue;
135
136 do {
137 if (brd->brd_flags & DUPLICATE_BOARD)
138 continue;
139
140 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
141 router->rou_rflag = 0;
142
143 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
144 if (router->rou_port[port].port_nasid == INVALID_NASID)
145 continue;
146
147 dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
148 router->rou_port[port].port_nasid,
149 router->rou_port[port].port_offset);
150
151 if (dest_brd->brd_type == KLTYPE_IP27) {
152 if (dest_brd->brd_nasid == nasid_a)
153 router_a = router;
154 if (dest_brd->brd_nasid == nasid_b)
155 router_b = router;
156 }
157 }
158
159 } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
160 }
161
162 if (router_a == NULL) {
163 printk("node_distance: router_a NULL\n");
164 return -1;
165 }
166 if (router_b == NULL) {
167 printk("node_distance: router_b NULL\n");
168 return -1;
169 }
170
171 if (nasid_a == nasid_b)
172 return 0;
173
174 if (router_a == router_b)
175 return 1;
176
177 router_distance = 100;
178 router_recurse(router_a, router_b, 2);
179
180 return router_distance;
181}
182
183static void __init init_topology_matrix(void)
184{
185 nasid_t nasid, nasid2;
186 cnodeid_t row, col;
187
188 for (row = 0; row < MAX_COMPACT_NODES; row++)
189 for (col = 0; col < MAX_COMPACT_NODES; col++)
190 __node_distances[row][col] = -1;
191
192 for_each_online_node(row) {
193 nasid = COMPACT_TO_NASID_NODEID(row);
194 for_each_online_node(col) {
195 nasid2 = COMPACT_TO_NASID_NODEID(col);
196 __node_distances[row][col] =
197 compute_node_distance(nasid, nasid2);
198 }
199 }
200}
201
202static void __init dump_topology(void)
203{
204 nasid_t nasid;
205 cnodeid_t cnode;
206 lboard_t *brd, *dest_brd;
207 int port;
208 int router_num = 0;
209 klrou_t *router;
210 cnodeid_t row, col;
211
212 printk("************** Topology ********************\n");
213
214 printk(" ");
215 for_each_online_node(col)
216 printk("%02d ", col);
217 printk("\n");
218 for_each_online_node(row) {
219 printk("%02d ", row);
220 for_each_online_node(col)
221 printk("%2d ", node_distance(row, col));
222 printk("\n");
223 }
224
225 for_each_online_node(cnode) {
226 nasid = COMPACT_TO_NASID_NODEID(cnode);
227
228 if (nasid == -1) continue;
229
230 brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
231 KLTYPE_ROUTER);
232
233 if (!brd)
234 continue;
235
236 do {
237 if (brd->brd_flags & DUPLICATE_BOARD)
238 continue;
239 printk("Router %d:", router_num);
240 router_num++;
241
242 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
243
244 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
245 if (router->rou_port[port].port_nasid == INVALID_NASID)
246 continue;
247
248 dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
249 router->rou_port[port].port_nasid,
250 router->rou_port[port].port_offset);
251
252 if (dest_brd->brd_type == KLTYPE_IP27)
253 printk(" %d", dest_brd->brd_nasid);
254 if (dest_brd->brd_type == KLTYPE_ROUTER)
255 printk(" r");
256 }
257 printk("\n");
258
259 } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
260 }
261}
262
263static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
264{
265 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
266
267 return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
268}
269
270/*
271 * Return the number of pages of memory provided by the given slot
272 * on the specified node.
273 */
274static pfn_t __init slot_getsize(cnodeid_t node, int slot)
275{
276 return (pfn_t) slot_psize_cache[node][slot];
277}
278
279/*
280 * Return highest slot filled
281 */
282static int __init node_getlastslot(cnodeid_t node)
283{
284 return (int) slot_lastfilled_cache[node];
285}
286
287/*
288 * Return the pfn of the last free page of memory on a node.
289 */
290static pfn_t __init node_getmaxclick(cnodeid_t node)
291{
292 pfn_t slot_psize;
293 int slot;
294
295 /*
296 * Start at the top slot. When we find a slot with memory in it,
297 * that's the winner.
298 */
299 for (slot = (MAX_MEM_SLOTS - 1); slot >= 0; slot--) {
300 if ((slot_psize = slot_getsize(node, slot))) {
301 if (slot_psize == SLOT_IGNORED)
302 continue;
303 /* Return the basepfn + the slot size, minus 1. */
304 return slot_getbasepfn(node, slot) + slot_psize - 1;
305 }
306 }
307
308 /*
309 * If there's no memory on the node, return 0. This is likely
310 * to cause problems.
311 */
312 return 0;
313}
314
315static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
316{
317 nasid_t nasid;
318 lboard_t *brd;
319 klmembnk_t *banks;
320 unsigned long size;
321
322 nasid = COMPACT_TO_NASID_NODEID(node);
323 /* Find the node board */
324 brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
325 if (!brd)
326 return 0;
327
328 /* Get the memory bank structure */
329 banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
330 if (!banks)
331 return 0;
332
333 /* Size in _Megabytes_ */
334 size = (unsigned long)banks->membnk_bnksz[slot/4];
335
336 /* hack for 128 dimm banks */
337 if (size <= 128) {
338 if (slot % 4 == 0) {
339 size <<= 20; /* size in bytes */
340 return(size >> PAGE_SHIFT);
341 } else
342 return 0;
343 } else {
344 size /= 4;
345 size <<= 20;
346 return size >> PAGE_SHIFT;
347 }
348}
349
350static void __init mlreset(void)
351{
352 int i;
353
354 master_nasid = get_nasid();
355 fine_mode = is_fine_dirmode();
356
357 /*
358 * Probe for all CPUs - this creates the cpumask and sets up the
359 * mapping tables. We need to do this as early as possible.
360 */
361#ifdef CONFIG_SMP
362 cpu_node_probe();
363#endif
364
365 init_topology_matrix();
366 dump_topology();
367
368 gen_region_mask(&region_mask);
369
370 setup_replication_mask();
371
372 /*
373 * Set all nodes' calias sizes to 8k
374 */
375 for_each_online_node(i) {
376 nasid_t nasid;
377
378 nasid = COMPACT_TO_NASID_NODEID(i);
379
380 /*
381 * Always have node 0 in the region mask, otherwise
382 * CALIAS accesses get exceptions since the hub
383 * thinks it is a node 0 address.
384 */
385 REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
386#ifdef CONFIG_REPLICATE_EXHANDLERS
387 REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
388#else
389 REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
390#endif
391
392#ifdef LATER
393 /*
394 * Set up all hubs to have a big window pointing at
395 * widget 0. Memory mode, widget 0, offset 0
396 */
397 REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
398 ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
399 (0 << IIO_ITTE_WIDGET_SHIFT)));
400#endif
401 }
402}
403
404static void __init szmem(void)
405{
406 pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
407 int slot, ignore;
408 cnodeid_t node;
409
410 num_physpages = 0;
411
412 for_each_online_node(node) {
413 ignore = nodebytes = 0;
414 for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
415 slot_psize = slot_psize_compute(node, slot);
416 if (slot == 0)
417 slot0sz = slot_psize;
418 /*
419 * We need to refine the hack when we have replicated
420 * kernel text.
421 */
422 nodebytes += (1LL << SLOT_SHIFT);
423 if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
424 (slot0sz << PAGE_SHIFT))
425 ignore = 1;
426 if (ignore && slot_psize) {
427 printk("Ignoring slot %d onwards on node %d\n",
428 slot, node);
429 slot_psize_cache[node][slot] = SLOT_IGNORED;
430 slot = MAX_MEM_SLOTS;
431 continue;
432 }
433 num_physpages += slot_psize;
434 slot_psize_cache[node][slot] =
435 (unsigned short) slot_psize;
436 if (slot_psize)
437 slot_lastfilled_cache[node] = slot;
438 }
439 }
440}
441
442static void __init node_mem_init(cnodeid_t node)
443{
444 pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
445 pfn_t slot_lastpfn = slot_firstpfn + slot_getsize(node, 0);
446 pfn_t slot_freepfn = node_getfirstfree(node);
447 struct pglist_data *pd;
448 unsigned long bootmap_size;
449
450 /*
451 * Allocate the node data structures on the node first.
452 */
453 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
454
455 pd = NODE_DATA(node);
456 pd->bdata = &plat_node_bdata[node];
457
458 cpus_clear(hub_data(node)->h_cpus);
459
460 slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
461 sizeof(struct hub_data));
462
463 bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
464 slot_firstpfn, slot_lastpfn);
465 free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
466 (slot_lastpfn - slot_firstpfn) << PAGE_SHIFT);
467 reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
468 ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size);
469}
470
471/*
472 * A node with nothing. We use it to avoid any special casing in
473 * node_to_cpumask
474 */
475static struct node_data null_node = {
476 .hub = {
477 .h_cpus = CPU_MASK_NONE
478 }
479};
480
481/*
482 * Currently, the intranode memory hole support assumes that each slot
483 * contains at least 32 MBytes of memory. We assume all bootmem data
484 * fits on the first slot.
485 */
486void __init prom_meminit(void)
487{
488 cnodeid_t node;
489
490 mlreset();
491 szmem();
492
493 for (node = 0; node < MAX_COMPACT_NODES; node++) {
494 if (node_online(node)) {
495 node_mem_init(node);
496 continue;
497 }
498 __node_data[node] = &null_node;
499 }
500}
501
502unsigned long __init prom_free_prom_memory(void)
503{
504 /* We got nothing to free here ... */
505 return 0;
506}
507
508extern void pagetable_init(void);
509extern unsigned long setup_zero_pages(void);
510
511void __init paging_init(void)
512{
513 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
514 unsigned node;
515
516 pagetable_init();
517
518 for_each_online_node(node) {
519 pfn_t start_pfn = slot_getbasepfn(node, 0);
520 pfn_t end_pfn = node_getmaxclick(node) + 1;
521
522 zones_size[ZONE_DMA] = end_pfn - start_pfn;
523 free_area_init_node(node, NODE_DATA(node),
524 zones_size, start_pfn, NULL);
525
526 if (end_pfn > max_low_pfn)
527 max_low_pfn = end_pfn;
528 }
529}
530
531void __init mem_init(void)
532{
533 unsigned long codesize, datasize, initsize, tmp;
534 unsigned node;
535
536 high_memory = (void *) __va(num_physpages << PAGE_SHIFT);
537
538 for_each_online_node(node) {
539 unsigned slot, numslots;
540 struct page *end, *p;
541
542 /*
543 * This will free up the bootmem, ie, slot 0 memory.
544 */
545 totalram_pages += free_all_bootmem_node(NODE_DATA(node));
546
547 /*
548 * We need to manually do the other slots.
549 */
550 numslots = node_getlastslot(node);
551 for (slot = 1; slot <= numslots; slot++) {
552 p = NODE_DATA(node)->node_mem_map +
553 (slot_getbasepfn(node, slot) -
554 slot_getbasepfn(node, 0));
555
556 /*
557 * Free valid memory in current slot.
558 */
559 for (end = p + slot_getsize(node, slot); p < end; p++) {
560 /* if (!page_is_ram(pgnr)) continue; */
561 /* commented out until page_is_ram works */
562 ClearPageReserved(p);
563 set_page_count(p, 1);
564 __free_page(p);
565 totalram_pages++;
566 }
567 }
568 }
569
570 totalram_pages -= setup_zero_pages(); /* This comes from node 0 */
571
572 codesize = (unsigned long) &_etext - (unsigned long) &_text;
573 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
574 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
575
576 tmp = nr_free_pages();
577 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
578 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
579 tmp << (PAGE_SHIFT-10),
580 num_physpages << (PAGE_SHIFT-10),
581 codesize >> 10,
582 (num_physpages - tmp) << (PAGE_SHIFT-10),
583 datasize >> 10,
584 initsize >> 10,
585 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
586}
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
new file mode 100644
index 000000000000..b0a25e1ee8b7
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -0,0 +1,249 @@
1#include <linux/kallsyms.h>
2#include <linux/kernel.h>
3#include <linux/mmzone.h>
4#include <linux/nodemask.h>
5#include <linux/spinlock.h>
6#include <linux/smp.h>
7#include <asm/atomic.h>
8#include <asm/sn/types.h>
9#include <asm/sn/addrs.h>
10#include <asm/sn/nmi.h>
11#include <asm/sn/arch.h>
12#include <asm/sn/sn0/hub.h>
13
14#if 0
15#define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
16#else
17#define NODE_NUM_CPUS(n) CPUS_PER_NODE
18#endif
19
20#define CNODEID_NONE (cnodeid_t)-1
21#define enter_panic_mode() spin_lock(&nmi_lock)
22
23typedef unsigned long machreg_t;
24
25DEFINE_SPINLOCK(nmi_lock);
26
27/*
28 * Lets see what else we need to do here. Set up sp, gp?
29 */
30void nmi_dump(void)
31{
32 void cont_nmi_dump(void);
33
34 cont_nmi_dump();
35}
36
37void install_cpu_nmi_handler(int slice)
38{
39 nmi_t *nmi_addr;
40
41 nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
42 if (nmi_addr->call_addr)
43 return;
44 nmi_addr->magic = NMI_MAGIC;
45 nmi_addr->call_addr = (void *)nmi_dump;
46 nmi_addr->call_addr_c =
47 (void *)(~((unsigned long)(nmi_addr->call_addr)));
48 nmi_addr->call_parm = 0;
49}
50
51/*
52 * Copy the cpu registers which have been saved in the IP27prom format
53 * into the eframe format for the node under consideration.
54 */
55
56void nmi_cpu_eframe_save(nasid_t nasid, int slice)
57{
58 struct reg_struct *nr;
59 int i;
60
61 /* Get the pointer to the current cpu's register set. */
62 nr = (struct reg_struct *)
63 (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
64 slice * IP27_NMI_KREGS_CPU_SIZE);
65
66 printk("NMI nasid %d: slice %d\n", nasid, slice);
67
68 /*
69 * Saved main processor registers
70 */
71 for (i = 0; i < 32; ) {
72 if ((i % 4) == 0)
73 printk("$%2d :", i);
74 printk(" %016lx", nr->gpr[i]);
75
76 i++;
77 if ((i % 4) == 0)
78 printk("\n");
79 }
80
81 printk("Hi : (value lost)\n");
82 printk("Lo : (value lost)\n");
83
84 /*
85 * Saved cp0 registers
86 */
87 printk("epc : %016lx ", nr->epc);
88 print_symbol("%s ", nr->epc);
89 printk("%s\n", print_tainted());
90 printk("ErrEPC: %016lx ", nr->error_epc);
91 print_symbol("%s\n", nr->error_epc);
92 printk("ra : %016lx ", nr->gpr[31]);
93 print_symbol("%s\n", nr->gpr[31]);
94 printk("Status: %08lx ", nr->sr);
95
96 if (nr->sr & ST0_KX)
97 printk("KX ");
98 if (nr->sr & ST0_SX)
99 printk("SX ");
100 if (nr->sr & ST0_UX)
101 printk("UX ");
102
103 switch (nr->sr & ST0_KSU) {
104 case KSU_USER:
105 printk("USER ");
106 break;
107 case KSU_SUPERVISOR:
108 printk("SUPERVISOR ");
109 break;
110 case KSU_KERNEL:
111 printk("KERNEL ");
112 break;
113 default:
114 printk("BAD_MODE ");
115 break;
116 }
117
118 if (nr->sr & ST0_ERL)
119 printk("ERL ");
120 if (nr->sr & ST0_EXL)
121 printk("EXL ");
122 if (nr->sr & ST0_IE)
123 printk("IE ");
124 printk("\n");
125
126 printk("Cause : %08lx\n", nr->cause);
127 printk("PrId : %08x\n", read_c0_prid());
128 printk("BadVA : %016lx\n", nr->badva);
129 printk("CErr : %016lx\n", nr->cache_err);
130 printk("NMI_SR: %016lx\n", nr->nmi_sr);
131
132 printk("\n");
133}
134
135void nmi_dump_hub_irq(nasid_t nasid, int slice)
136{
137 hubreg_t mask0, mask1, pend0, pend1;
138
139 if (slice == 0) { /* Slice A */
140 mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
141 mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
142 } else { /* Slice B */
143 mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
144 mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
145 }
146
147 pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
148 pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
149
150 printk("PI_INT_MASK0: %16lx PI_INT_MASK1: %16lx\n", mask0, mask1);
151 printk("PI_INT_PEND0: %16lx PI_INT_PEND1: %16lx\n", pend0, pend1);
152 printk("\n\n");
153}
154
155/*
156 * Copy the cpu registers which have been saved in the IP27prom format
157 * into the eframe format for the node under consideration.
158 */
159void nmi_node_eframe_save(cnodeid_t cnode)
160{
161 nasid_t nasid;
162 int slice;
163
164 /* Make sure that we have a valid node */
165 if (cnode == CNODEID_NONE)
166 return;
167
168 nasid = COMPACT_TO_NASID_NODEID(cnode);
169 if (nasid == INVALID_NASID)
170 return;
171
172 /* Save the registers into eframe for each cpu */
173 for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
174 nmi_cpu_eframe_save(nasid, slice);
175 nmi_dump_hub_irq(nasid, slice);
176 }
177}
178
179/*
180 * Save the nmi cpu registers for all cpus in the system.
181 */
182void
183nmi_eframes_save(void)
184{
185 cnodeid_t cnode;
186
187 for_each_online_node(cnode)
188 nmi_node_eframe_save(cnode);
189}
190
191void
192cont_nmi_dump(void)
193{
194#ifndef REAL_NMI_SIGNAL
195 static atomic_t nmied_cpus = ATOMIC_INIT(0);
196
197 atomic_inc(&nmied_cpus);
198#endif
199 /*
200 * Use enter_panic_mode to allow only 1 cpu to proceed
201 */
202 enter_panic_mode();
203
204#ifdef REAL_NMI_SIGNAL
205 /*
206 * Wait up to 15 seconds for the other cpus to respond to the NMI.
207 * If a cpu has not responded after 10 sec, send it 1 additional NMI.
208 * This is for 2 reasons:
209 * - sometimes a MMSC fail to NMI all cpus.
210 * - on 512p SN0 system, the MMSC will only send NMIs to
211 * half the cpus. Unfortunately, we don't know which cpus may be
212 * NMIed - it depends on how the site chooses to configure.
213 *
214 * Note: it has been measure that it takes the MMSC up to 2.3 secs to
215 * send NMIs to all cpus on a 256p system.
216 */
217 for (i=0; i < 1500; i++) {
218 for_each_online_node(node)
219 if (NODEPDA(node)->dump_count == 0)
220 break;
221 if (node == MAX_NUMNODES)
222 break;
223 if (i == 1000) {
224 for_each_online_node(node)
225 if (NODEPDA(node)->dump_count == 0) {
226 cpu = node_to_first_cpu(node);
227 for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
228 CPUMASK_SETB(nmied_cpus, cpu);
229 /*
230 * cputonasid, cputoslice
231 * needs kernel cpuid
232 */
233 SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
234 }
235 }
236
237 }
238 udelay(10000);
239 }
240#else
241 while (atomic_read(&nmied_cpus) != num_online_cpus());
242#endif
243
244 /*
245 * Save the nmi cpu registers for all cpu in the eframe format.
246 */
247 nmi_eframes_save();
248 LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
249}
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c
new file mode 100644
index 000000000000..2e16be94c78b
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-reset.c
@@ -0,0 +1,81 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Reset an IP27.
7 *
8 * Copyright (C) 1997, 1998, 1999, 2000 by Ralf Baechle
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 */
11#include <linux/config.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/timer.h>
15#include <linux/smp.h>
16#include <linux/mmzone.h>
17#include <linux/nodemask.h>
18
19#include <asm/io.h>
20#include <asm/irq.h>
21#include <asm/reboot.h>
22#include <asm/system.h>
23#include <asm/sgialib.h>
24#include <asm/sn/addrs.h>
25#include <asm/sn/arch.h>
26#include <asm/sn/gda.h>
27#include <asm/sn/sn0/hub.h>
28
29void machine_restart(char *command) __attribute__((noreturn));
30void machine_halt(void) __attribute__((noreturn));
31void machine_power_off(void) __attribute__((noreturn));
32
33#define noreturn while(1); /* Silence gcc. */
34
35/* XXX How to pass the reboot command to the firmware??? */
36static void ip27_machine_restart(char *command)
37{
38#if 0
39 int i;
40#endif
41
42 printk("Reboot started from CPU %d\n", smp_processor_id());
43#ifdef CONFIG_SMP
44 smp_send_stop();
45#endif
46#if 0
47 for_each_online_node(i)
48 REMOTE_HUB_S(COMPACT_TO_NASID_NODEID(i), PROMOP_REG,
49 PROMOP_REBOOT);
50#else
51 LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
52#endif
53 noreturn;
54}
55
56static void ip27_machine_halt(void)
57{
58 int i;
59
60#ifdef CONFIG_SMP
61 smp_send_stop();
62#endif
63 for_each_online_node(i)
64 REMOTE_HUB_S(COMPACT_TO_NASID_NODEID(i), PROMOP_REG,
65 PROMOP_RESTART);
66 LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
67 noreturn;
68}
69
70static void ip27_machine_power_off(void)
71{
72 /* To do ... */
73 noreturn;
74}
75
76void ip27_reboot_setup(void)
77{
78 _machine_restart = ip27_machine_restart;
79 _machine_halt = ip27_machine_halt;
80 _machine_power_off = ip27_machine_power_off;
81}
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
new file mode 100644
index 000000000000..17f768cba94f
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -0,0 +1,225 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General
3 * Public License. See the file "COPYING" in the main directory of this
4 * archive for more details.
5 *
6 * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
7 * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
8 */
9#include <linux/init.h>
10#include <linux/sched.h>
11#include <linux/nodemask.h>
12#include <asm/page.h>
13#include <asm/processor.h>
14#include <asm/sn/arch.h>
15#include <asm/sn/gda.h>
16#include <asm/sn/intr.h>
17#include <asm/sn/klconfig.h>
18#include <asm/sn/launch.h>
19#include <asm/sn/mapped_kernel.h>
20#include <asm/sn/sn_private.h>
21#include <asm/sn/types.h>
22#include <asm/sn/sn0/hubpi.h>
23#include <asm/sn/sn0/hubio.h>
24#include <asm/sn/sn0/ip27.h>
25
26/*
27 * Takes as first input the PROM assigned cpu id, and the kernel
28 * assigned cpu id as the second.
29 */
30static void alloc_cpupda(cpuid_t cpu, int cpunum)
31{
32 cnodeid_t node = get_cpu_cnode(cpu);
33 nasid_t nasid = COMPACT_TO_NASID_NODEID(node);
34
35 cputonasid(cpunum) = nasid;
36 cpu_data[cpunum].p_nodeid = node;
37 cputoslice(cpunum) = get_cpu_slice(cpu);
38}
39
40static nasid_t get_actual_nasid(lboard_t *brd)
41{
42 klhub_t *hub;
43
44 if (!brd)
45 return INVALID_NASID;
46
47 /* find out if we are a completely disabled brd. */
48 hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
49 if (!hub)
50 return INVALID_NASID;
51 if (!(hub->hub_info.flags & KLINFO_ENABLE)) /* disabled node brd */
52 return hub->hub_info.physid;
53 else
54 return brd->brd_nasid;
55}
56
57static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
58{
59 static int tot_cpus_found = 0;
60 lboard_t *brd;
61 klcpu_t *acpu;
62 int cpus_found = 0;
63 cpuid_t cpuid;
64
65 brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
66
67 do {
68 acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
69 while (acpu) {
70 cpuid = acpu->cpu_info.virtid;
71 /* cnode is not valid for completely disabled brds */
72 if (get_actual_nasid(brd) == brd->brd_nasid)
73 cpuid_to_compact_node[cpuid] = cnode;
74 if (cpuid > highest)
75 highest = cpuid;
76 /* Only let it join in if it's marked enabled */
77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
78 (tot_cpus_found != NR_CPUS)) {
79 cpu_set(cpuid, phys_cpu_present_map);
80 alloc_cpupda(cpuid, tot_cpus_found);
81 cpus_found++;
82 tot_cpus_found++;
83 }
84 acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
85 KLSTRUCT_CPU);
86 }
87 brd = KLCF_NEXT(brd);
88 if (!brd)
89 break;
90
91 brd = find_lboard(brd, KLTYPE_IP27);
92 } while (brd);
93
94 return highest;
95}
96
97void cpu_node_probe(void)
98{
99 int i, highest = 0;
100 gda_t *gdap = GDA;
101
102 /*
103 * Initialize the arrays to invalid nodeid (-1)
104 */
105 for (i = 0; i < MAX_COMPACT_NODES; i++)
106 compact_to_nasid_node[i] = INVALID_NASID;
107 for (i = 0; i < MAX_NASIDS; i++)
108 nasid_to_compact_node[i] = INVALID_CNODEID;
109 for (i = 0; i < MAXCPUS; i++)
110 cpuid_to_compact_node[i] = INVALID_CNODEID;
111
112 /*
113 * MCD - this whole "compact node" stuff can probably be dropped,
114 * as we can handle sparse numbering now
115 */
116 nodes_clear(node_online_map);
117 for (i = 0; i < MAX_COMPACT_NODES; i++) {
118 nasid_t nasid = gdap->g_nasidtable[i];
119 if (nasid == INVALID_NASID)
120 break;
121 compact_to_nasid_node[i] = nasid;
122 nasid_to_compact_node[nasid] = i;
123 node_set_online(num_online_nodes());
124 highest = do_cpumask(i, nasid, highest);
125 }
126
127 printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes());
128}
129
130static void intr_clear_bits(nasid_t nasid, volatile hubreg_t *pend,
131 int base_level)
132{
133 volatile hubreg_t bits;
134 int i;
135
136 /* Check pending interrupts */
137 if ((bits = HUB_L(pend)) != 0)
138 for (i = 0; i < N_INTPEND_BITS; i++)
139 if (bits & (1 << i))
140 LOCAL_HUB_CLR_INTR(base_level + i);
141}
142
143static void intr_clear_all(nasid_t nasid)
144{
145 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, 0);
146 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, 0);
147 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, 0);
148 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, 0);
149 intr_clear_bits(nasid, REMOTE_HUB_ADDR(nasid, PI_INT_PEND0),
150 INT_PEND0_BASELVL);
151 intr_clear_bits(nasid, REMOTE_HUB_ADDR(nasid, PI_INT_PEND1),
152 INT_PEND1_BASELVL);
153}
154
155void __init prom_prepare_cpus(unsigned int max_cpus)
156{
157 cnodeid_t cnode;
158
159 for_each_online_node(cnode)
160 intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
161
162 replicate_kernel_text();
163
164 /*
165 * Assumption to be fixed: we're always booted on logical / physical
166 * processor 0. While we're always running on logical processor 0
167 * this still means this is physical processor zero; it might for
168 * example be disabled in the firwware.
169 */
170 alloc_cpupda(0, 0);
171}
172
173/*
174 * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
175 * set sp to the kernel stack of the newly created idle process, gp to the proc
176 * struct so that current_thread_info() will work.
177 */
178void __init prom_boot_secondary(int cpu, struct task_struct *idle)
179{
180 unsigned long gp = (unsigned long) idle->thread_info;
181 unsigned long sp = gp + THREAD_SIZE - 32;
182
183 LAUNCH_SLAVE(cputonasid(cpu),cputoslice(cpu),
184 (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
185 0, (void *) sp, (void *) gp);
186}
187
188void prom_init_secondary(void)
189{
190 per_cpu_init();
191 local_irq_enable();
192}
193
194void __init prom_cpus_done(void)
195{
196}
197
198void prom_smp_finish(void)
199{
200}
201
202void core_send_ipi(int destid, unsigned int action)
203{
204 int irq;
205
206 switch (action) {
207 case SMP_RESCHEDULE_YOURSELF:
208 irq = CPU_RESCHED_A_IRQ;
209 break;
210 case SMP_CALL_FUNCTION:
211 irq = CPU_CALL_A_IRQ;
212 break;
213 default:
214 panic("sendintr");
215 }
216
217 irq += cputoslice(destid);
218
219 /*
220 * Convert the compact hub number to the NASID to get the correct
221 * part of the address space. Then set the interrupt bit associated
222 * with the CPU we want to send the interrupt to.
223 */
224 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
225}
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
new file mode 100644
index 000000000000..8c1b96fffa76
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -0,0 +1,243 @@
1/*
2 * Copytight (C) 1999, 2000, 05 Ralf Baechle (ralf@linux-mips.org)
3 * Copytight (C) 1999, 2000 Silicon Graphics, Inc.
4 */
5#include <linux/bcd.h>
6#include <linux/init.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/interrupt.h>
10#include <linux/kernel_stat.h>
11#include <linux/param.h>
12#include <linux/time.h>
13#include <linux/timex.h>
14#include <linux/mm.h>
15
16#include <asm/time.h>
17#include <asm/pgtable.h>
18#include <asm/sgialib.h>
19#include <asm/sn/ioc3.h>
20#include <asm/m48t35.h>
21#include <asm/sn/klconfig.h>
22#include <asm/sn/arch.h>
23#include <asm/sn/addrs.h>
24#include <asm/sn/sn_private.h>
25#include <asm/sn/sn0/ip27.h>
26#include <asm/sn/sn0/hub.h>
27
28/*
29 * This is a hack; we really need to figure these values out dynamically
30 *
31 * Since 800 ns works very well with various HUB frequencies, such as
32 * 360, 380, 390 and 400 MHZ, we use 800 ns rtc cycle time.
33 *
34 * Ralf: which clock rate is used to feed the counter?
35 */
36#define NSEC_PER_CYCLE 800
37#define CYCLES_PER_SEC (NSEC_PER_SEC/NSEC_PER_CYCLE)
38#define CYCLES_PER_JIFFY (CYCLES_PER_SEC/HZ)
39
40#define TICK_SIZE (tick_nsec / 1000)
41
42static unsigned long ct_cur[NR_CPUS]; /* What counter should be at next timer irq */
43static long last_rtc_update; /* Last time the rtc clock got updated */
44
45extern volatile unsigned long wall_jiffies;
46
47#if 0
48static int set_rtc_mmss(unsigned long nowtime)
49{
50 int retval = 0;
51 int real_seconds, real_minutes, cmos_minutes;
52 struct m48t35_rtc *rtc;
53 nasid_t nid;
54
55 nid = get_nasid();
56 rtc = (struct m48t35_rtc *)(KL_CONFIG_CH_CONS_INFO(nid)->memory_base +
57 IOC3_BYTEBUS_DEV0);
58
59 rtc->control |= M48T35_RTC_READ;
60 cmos_minutes = BCD2BIN(rtc->min);
61 rtc->control &= ~M48T35_RTC_READ;
62
63 /*
64 * Since we're only adjusting minutes and seconds, don't interfere with
65 * hour overflow. This avoids messing with unknown time zones but
66 * requires your RTC not to be off by more than 15 minutes
67 */
68 real_seconds = nowtime % 60;
69 real_minutes = nowtime / 60;
70 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
71 real_minutes += 30; /* correct for half hour time zone */
72 real_minutes %= 60;
73
74 if (abs(real_minutes - cmos_minutes) < 30) {
75 real_seconds = BIN2BCD(real_seconds);
76 real_minutes = BIN2BCD(real_minutes);
77 rtc->control |= M48T35_RTC_SET;
78 rtc->sec = real_seconds;
79 rtc->min = real_minutes;
80 rtc->control &= ~M48T35_RTC_SET;
81 } else {
82 printk(KERN_WARNING
83 "set_rtc_mmss: can't update from %d to %d\n",
84 cmos_minutes, real_minutes);
85 retval = -1;
86 }
87
88 return retval;
89}
90#endif
91
92void ip27_rt_timer_interrupt(struct pt_regs *regs)
93{
94 int cpu = smp_processor_id();
95 int cpuA = cputoslice(cpu) == 0;
96 int irq = 9; /* XXX Assign number */
97
98 irq_enter();
99 write_seqlock(&xtime_lock);
100
101again:
102 LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0); /* Ack */
103 ct_cur[cpu] += CYCLES_PER_JIFFY;
104 LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);
105
106 if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
107 goto again;
108
109 kstat_this_cpu.irqs[irq]++; /* kstat only for bootcpu? */
110
111 if (cpu == 0)
112 do_timer(regs);
113
114 update_process_times(user_mode(regs));
115
116 /*
117 * If we have an externally synchronized Linux clock, then update
118 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
119 * called as close as possible to when a second starts.
120 */
121 if ((time_status & STA_UNSYNC) == 0 &&
122 xtime.tv_sec > last_rtc_update + 660 &&
123 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
124 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
125 if (rtc_set_time(xtime.tv_sec) == 0) {
126 last_rtc_update = xtime.tv_sec;
127 } else {
128 last_rtc_update = xtime.tv_sec - 600;
129 /* do it again in 60 s */
130 }
131 }
132
133 write_sequnlock(&xtime_lock);
134 irq_exit();
135}
136
137unsigned long ip27_do_gettimeoffset(void)
138{
139 unsigned long ct_cur1;
140 ct_cur1 = REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT) + CYCLES_PER_JIFFY;
141 return (ct_cur1 - ct_cur[0]) * NSEC_PER_CYCLE / 1000;
142}
143
144/* Includes for ioc3_init(). */
145#include <asm/sn/types.h>
146#include <asm/sn/sn0/addrs.h>
147#include <asm/sn/sn0/hubni.h>
148#include <asm/sn/sn0/hubio.h>
149#include <asm/pci/bridge.h>
150
151static __init unsigned long get_m48t35_time(void)
152{
153 unsigned int year, month, date, hour, min, sec;
154 struct m48t35_rtc *rtc;
155 nasid_t nid;
156
157 nid = get_nasid();
158 rtc = (struct m48t35_rtc *)(KL_CONFIG_CH_CONS_INFO(nid)->memory_base +
159 IOC3_BYTEBUS_DEV0);
160
161 rtc->control |= M48T35_RTC_READ;
162 sec = rtc->sec;
163 min = rtc->min;
164 hour = rtc->hour;
165 date = rtc->date;
166 month = rtc->month;
167 year = rtc->year;
168 rtc->control &= ~M48T35_RTC_READ;
169
170 sec = BCD2BIN(sec);
171 min = BCD2BIN(min);
172 hour = BCD2BIN(hour);
173 date = BCD2BIN(date);
174 month = BCD2BIN(month);
175 year = BCD2BIN(year);
176
177 year += 1970;
178
179 return mktime(year, month, date, hour, min, sec);
180}
181
182static void ip27_timer_setup(struct irqaction *irq)
183{
184 /* over-write the handler, we use our own way */
185 irq->handler = no_action;
186
187 /* setup irqaction */
188// setup_irq(IP27_TIMER_IRQ, irq); /* XXX Can't do this yet. */
189}
190
191void __init ip27_time_init(void)
192{
193 xtime.tv_sec = get_m48t35_time();
194 xtime.tv_nsec = 0;
195
196 do_gettimeoffset = ip27_do_gettimeoffset;
197
198 board_timer_setup = ip27_timer_setup;
199}
200
201void __init cpu_time_init(void)
202{
203 lboard_t *board;
204 klcpu_t *cpu;
205 int cpuid;
206
207 /* Don't use ARCS. ARCS is fragile. Klconfig is simple and sane. */
208 board = find_lboard(KL_CONFIG_INFO(get_nasid()), KLTYPE_IP27);
209 if (!board)
210 panic("Can't find board info for myself.");
211
212 cpuid = LOCAL_HUB_L(PI_CPU_NUM) ? IP27_CPU0_INDEX : IP27_CPU1_INDEX;
213 cpu = (klcpu_t *) KLCF_COMP(board, cpuid);
214 if (!cpu)
215 panic("No information about myself?");
216
217 printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed);
218
219 set_c0_status(SRB_TIMOCLK);
220}
221
222void __init hub_rtc_init(cnodeid_t cnode)
223{
224 /*
225 * We only need to initialize the current node.
226 * If this is not the current node then it is a cpuless
227 * node and timeouts will not happen there.
228 */
229 if (get_compact_nodeid() == cnode) {
230 int cpu = smp_processor_id();
231 LOCAL_HUB_S(PI_RT_EN_A, 1);
232 LOCAL_HUB_S(PI_RT_EN_B, 1);
233 LOCAL_HUB_S(PI_PROF_EN_A, 0);
234 LOCAL_HUB_S(PI_PROF_EN_B, 0);
235 ct_cur[cpu] = CYCLES_PER_JIFFY;
236 LOCAL_HUB_S(PI_RT_COMPARE_A, ct_cur[cpu]);
237 LOCAL_HUB_S(PI_RT_COUNT, 0);
238 LOCAL_HUB_S(PI_RT_PEND_A, 0);
239 LOCAL_HUB_S(PI_RT_COMPARE_B, ct_cur[cpu]);
240 LOCAL_HUB_S(PI_RT_COUNT, 0);
241 LOCAL_HUB_S(PI_RT_PEND_B, 0);
242 }
243}
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
new file mode 100644
index 000000000000..fc82f34a32ce
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
3 * Copyright (C) 1999, 2000 Silcon Graphics, Inc.
4 * Copyright (C) 2004 Christoph Hellwig.
5 * Released under GPL v2.
6 *
7 * Generic XTALK initialization code
8 */
9
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <asm/sn/types.h>
13#include <asm/sn/klconfig.h>
14#include <asm/sn/hub.h>
15#include <asm/pci/bridge.h>
16#include <asm/xtalk/xtalk.h>
17
18
19#define XBOW_WIDGET_PART_NUM 0x0
20#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
21#define BASE_XBOW_PORT 8 /* Lowest external port */
22
23extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
24
25static int __init probe_one_port(nasid_t nasid, int widget, int masterwid)
26{
27 widgetreg_t widget_id;
28 xwidget_part_num_t partnum;
29
30 widget_id = *(volatile widgetreg_t *)
31 (RAW_NODE_SWIN_BASE(nasid, widget) + WIDGET_ID);
32 partnum = XWIDGET_PART_NUM(widget_id);
33
34 printk(KERN_INFO "Cpu %d, Nasid 0x%x, widget 0x%x (partnum 0x%x) is ",
35 smp_processor_id(), nasid, widget, partnum);
36
37 switch (partnum) {
38 case BRIDGE_WIDGET_PART_NUM:
39 case XBRIDGE_WIDGET_PART_NUM:
40 bridge_probe(nasid, widget, masterwid);
41 break;
42 default:
43 break;
44 }
45
46 return 0;
47}
48
49static int __init xbow_probe(nasid_t nasid)
50{
51 lboard_t *brd;
52 klxbow_t *xbow_p;
53 unsigned masterwid, i;
54
55 printk("is xbow\n");
56
57 /*
58 * found xbow, so may have multiple bridges
59 * need to probe xbow
60 */
61 brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_MIDPLANE8);
62 if (!brd)
63 return -ENODEV;
64
65 xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW);
66 if (!xbow_p)
67 return -ENODEV;
68
69 /*
70 * Okay, here's a xbow. Lets arbitrate and find
71 * out if we should initialize it. Set enabled
72 * hub connected at highest or lowest widget as
73 * master.
74 */
75#ifdef WIDGET_A
76 i = HUB_WIDGET_ID_MAX + 1;
77 do {
78 i--;
79 } while ((!XBOW_PORT_TYPE_HUB(xbow_p, i)) ||
80 (!XBOW_PORT_IS_ENABLED(xbow_p, i)));
81#else
82 i = HUB_WIDGET_ID_MIN - 1;
83 do {
84 i++;
85 } while ((!XBOW_PORT_TYPE_HUB(xbow_p, i)) ||
86 (!XBOW_PORT_IS_ENABLED(xbow_p, i)));
87#endif
88
89 masterwid = i;
90 if (nasid != XBOW_PORT_NASID(xbow_p, i))
91 return 1;
92
93 for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++) {
94 if (XBOW_PORT_IS_ENABLED(xbow_p, i) &&
95 XBOW_PORT_TYPE_IO(xbow_p, i))
96 probe_one_port(nasid, i, masterwid);
97 }
98
99 return 0;
100}
101
102void __init xtalk_probe_node(cnodeid_t nid)
103{
104 volatile u64 hubreg;
105 nasid_t nasid;
106 xwidget_part_num_t partnum;
107 widgetreg_t widget_id;
108
109 nasid = COMPACT_TO_NASID_NODEID(nid);
110 hubreg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
111
112 /* check whether the link is up */
113 if (!(hubreg & IIO_LLP_CSR_IS_UP))
114 return;
115
116 widget_id = *(volatile widgetreg_t *)
117 (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
118 partnum = XWIDGET_PART_NUM(widget_id);
119
120 printk(KERN_INFO "Cpu %d, Nasid 0x%x: partnum 0x%x is ",
121 smp_processor_id(), nasid, partnum);
122
123 switch (partnum) {
124 case BRIDGE_WIDGET_PART_NUM:
125 bridge_probe(nasid, 0x8, 0xa);
126 break;
127 case XBOW_WIDGET_PART_NUM:
128 case XXBOW_WIDGET_PART_NUM:
129 xbow_probe(nasid);
130 break;
131 default:
132 printk(" unknown widget??\n");
133 break;
134 }
135}