aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/Makefile44
-rw-r--r--arch/sparc64/kernel/auxio.c152
-rw-r--r--arch/sparc64/kernel/binfmt_aout32.c424
-rw-r--r--arch/sparc64/kernel/binfmt_elf32.c159
-rw-r--r--arch/sparc64/kernel/central.c457
-rw-r--r--arch/sparc64/kernel/chmc.c458
-rw-r--r--arch/sparc64/kernel/cpu.c124
-rw-r--r--arch/sparc64/kernel/devices.c144
-rw-r--r--arch/sparc64/kernel/dtlb_backend.S181
-rw-r--r--arch/sparc64/kernel/dtlb_base.S113
-rw-r--r--arch/sparc64/kernel/dtlb_prot.S54
-rw-r--r--arch/sparc64/kernel/ebus.c644
-rw-r--r--arch/sparc64/kernel/entry.S1919
-rw-r--r--arch/sparc64/kernel/etrap.S301
-rw-r--r--arch/sparc64/kernel/head.S782
-rw-r--r--arch/sparc64/kernel/idprom.c49
-rw-r--r--arch/sparc64/kernel/init_task.c35
-rw-r--r--arch/sparc64/kernel/ioctl32.c597
-rw-r--r--arch/sparc64/kernel/iommu_common.c231
-rw-r--r--arch/sparc64/kernel/iommu_common.h48
-rw-r--r--arch/sparc64/kernel/irq.c1269
-rw-r--r--arch/sparc64/kernel/isa.c329
-rw-r--r--arch/sparc64/kernel/itlb_base.S83
-rw-r--r--arch/sparc64/kernel/kprobes.c394
-rw-r--r--arch/sparc64/kernel/module.c209
-rw-r--r--arch/sparc64/kernel/pci.c805
-rw-r--r--arch/sparc64/kernel/pci_common.c1040
-rw-r--r--arch/sparc64/kernel/pci_impl.h49
-rw-r--r--arch/sparc64/kernel/pci_iommu.c855
-rw-r--r--arch/sparc64/kernel/pci_psycho.c1560
-rw-r--r--arch/sparc64/kernel/pci_sabre.c1702
-rw-r--r--arch/sparc64/kernel/pci_schizo.c2187
-rw-r--r--arch/sparc64/kernel/power.c150
-rw-r--r--arch/sparc64/kernel/process.c869
-rw-r--r--arch/sparc64/kernel/ptrace.c646
-rw-r--r--arch/sparc64/kernel/rtrap.S362
-rw-r--r--arch/sparc64/kernel/sbus.c1243
-rw-r--r--arch/sparc64/kernel/semaphore.c251
-rw-r--r--arch/sparc64/kernel/setup.c731
-rw-r--r--arch/sparc64/kernel/signal.c688
-rw-r--r--arch/sparc64/kernel/signal32.c1469
-rw-r--r--arch/sparc64/kernel/smp.c1244
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c432
-rw-r--r--arch/sparc64/kernel/starfire.c123
-rw-r--r--arch/sparc64/kernel/sunos_ioctl32.c275
-rw-r--r--arch/sparc64/kernel/sys32.S327
-rw-r--r--arch/sparc64/kernel/sys_sparc.c723
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c1118
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c1343
-rw-r--r--arch/sparc64/kernel/systbls.S251
-rw-r--r--arch/sparc64/kernel/time.c1195
-rw-r--r--arch/sparc64/kernel/trampoline.S368
-rw-r--r--arch/sparc64/kernel/traps.c2118
-rw-r--r--arch/sparc64/kernel/ttable.S280
-rw-r--r--arch/sparc64/kernel/unaligned.c729
-rw-r--r--arch/sparc64/kernel/us2e_cpufreq.c400
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c255
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S106
-rw-r--r--arch/sparc64/kernel/winfixup.S417
59 files changed, 35511 insertions, 0 deletions
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
new file mode 100644
index 000000000000..093281bdf85f
--- /dev/null
+++ b/arch/sparc64/kernel/Makefile
@@ -0,0 +1,44 @@
1# $Id: Makefile,v 1.70 2002/02/09 19:49:30 davem Exp $
2# Makefile for the linux kernel.
3#
4
5EXTRA_AFLAGS := -ansi
6EXTRA_CFLAGS := -Werror
7
8extra-y := head.o init_task.o vmlinux.lds
9
10obj-y := process.o setup.o cpu.o idprom.o \
11 traps.o devices.o auxio.o \
12 irq.o ptrace.o time.o sys_sparc.o signal.o \
13 unaligned.o central.o pci.o starfire.o semaphore.o \
14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
15
16obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
17 pci_psycho.o pci_sabre.o pci_schizo.o
18obj-$(CONFIG_SMP) += smp.o trampoline.o
19obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o ioctl32.o
20obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
21obj-$(CONFIG_BINFMT_AOUT32) += binfmt_aout32.o
22obj-$(CONFIG_MODULES) += module.o
23obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
24obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
25obj-$(CONFIG_KPROBES) += kprobes.o
26
27ifdef CONFIG_SUNOS_EMUL
28 obj-y += sys_sunos32.o sunos_ioctl32.o
29else
30 ifdef CONFIG_SOLARIS_EMUL
31 obj-y += sys_sunos32.o sunos_ioctl32.o
32 endif
33endif
34
35ifneq ($(NEW_GCC),y)
36 CMODEL_CFLAG := -mmedlow
37else
38 CMODEL_CFLAG := -m64 -mcmodel=medlow
39endif
40
41head.o: head.S ttable.S itlb_base.S dtlb_base.S dtlb_backend.S dtlb_prot.S \
42 etrap.S rtrap.S winfixup.S entry.S
43
44CFLAGS_ioctl32.o += -Ifs/
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
new file mode 100644
index 000000000000..a0716ccc2f4a
--- /dev/null
+++ b/arch/sparc64/kernel/auxio.c
@@ -0,0 +1,152 @@
1/* auxio.c: Probing for the Sparc AUXIO register at boot time.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 *
5 * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/ioport.h>
12
13#include <asm/oplib.h>
14#include <asm/io.h>
15#include <asm/sbus.h>
16#include <asm/ebus.h>
17#include <asm/auxio.h>
18
19/* This cannot be static, as it is referenced in entry.S */
20void __iomem *auxio_register = NULL;
21
22enum auxio_type {
23 AUXIO_TYPE_NODEV,
24 AUXIO_TYPE_SBUS,
25 AUXIO_TYPE_EBUS
26};
27
28static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV;
29static DEFINE_SPINLOCK(auxio_lock);
30
31static void __auxio_sbus_set(u8 bits_on, u8 bits_off)
32{
33 if (auxio_register) {
34 unsigned char regval;
35 unsigned long flags;
36 unsigned char newval;
37
38 spin_lock_irqsave(&auxio_lock, flags);
39
40 regval = sbus_readb(auxio_register);
41 newval = regval | bits_on;
42 newval &= ~bits_off;
43 newval &= ~AUXIO_AUX1_MASK;
44 sbus_writeb(newval, auxio_register);
45
46 spin_unlock_irqrestore(&auxio_lock, flags);
47 }
48}
49
50static void __auxio_ebus_set(u8 bits_on, u8 bits_off)
51{
52 if (auxio_register) {
53 unsigned char regval;
54 unsigned long flags;
55 unsigned char newval;
56
57 spin_lock_irqsave(&auxio_lock, flags);
58
59 regval = (u8)readl(auxio_register);
60 newval = regval | bits_on;
61 newval &= ~bits_off;
62 writel((u32)newval, auxio_register);
63
64 spin_unlock_irqrestore(&auxio_lock, flags);
65 }
66}
67
68static inline void __auxio_ebus_set_led(int on)
69{
70 (on) ? __auxio_ebus_set(AUXIO_PCIO_LED, 0) :
71 __auxio_ebus_set(0, AUXIO_PCIO_LED) ;
72}
73
74static inline void __auxio_sbus_set_led(int on)
75{
76 (on) ? __auxio_sbus_set(AUXIO_AUX1_LED, 0) :
77 __auxio_sbus_set(0, AUXIO_AUX1_LED) ;
78}
79
80void auxio_set_led(int on)
81{
82 switch(auxio_devtype) {
83 case AUXIO_TYPE_SBUS:
84 __auxio_sbus_set_led(on);
85 break;
86 case AUXIO_TYPE_EBUS:
87 __auxio_ebus_set_led(on);
88 break;
89 default:
90 break;
91 }
92}
93
94static inline void __auxio_sbus_set_lte(int on)
95{
96 (on) ? __auxio_sbus_set(AUXIO_AUX1_LTE, 0) :
97 __auxio_sbus_set(0, AUXIO_AUX1_LTE) ;
98}
99
100void auxio_set_lte(int on)
101{
102 switch(auxio_devtype) {
103 case AUXIO_TYPE_SBUS:
104 __auxio_sbus_set_lte(on);
105 break;
106 case AUXIO_TYPE_EBUS:
107 /* FALL-THROUGH */
108 default:
109 break;
110 }
111}
112
113void __init auxio_probe(void)
114{
115 struct sbus_bus *sbus;
116 struct sbus_dev *sdev = NULL;
117
118 for_each_sbus(sbus) {
119 for_each_sbusdev(sdev, sbus) {
120 if(!strcmp(sdev->prom_name, "auxio"))
121 goto found_sdev;
122 }
123 }
124
125found_sdev:
126 if (sdev) {
127 auxio_devtype = AUXIO_TYPE_SBUS;
128 auxio_register = sbus_ioremap(&sdev->resource[0], 0,
129 sdev->reg_addrs[0].reg_size,
130 "auxiliaryIO");
131 }
132#ifdef CONFIG_PCI
133 else {
134 struct linux_ebus *ebus;
135 struct linux_ebus_device *edev = NULL;
136
137 for_each_ebus(ebus) {
138 for_each_ebusdev(edev, ebus) {
139 if (!strcmp(edev->prom_name, "auxio"))
140 goto ebus_done;
141 }
142 }
143 ebus_done:
144 if (edev) {
145 auxio_devtype = AUXIO_TYPE_EBUS;
146 auxio_register =
147 ioremap(edev->resource[0].start, sizeof(u32));
148 }
149 }
150 auxio_set_led(AUXIO_LED_ON);
151#endif
152}
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
new file mode 100644
index 000000000000..b2854ef221d0
--- /dev/null
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -0,0 +1,424 @@
1/*
2 * linux/fs/binfmt_aout.c
3 *
4 * Copyright (C) 1991, 1992, 1996 Linus Torvalds
5 *
6 * Hacked a bit by DaveM to make it work with 32-bit SunOS
7 * binaries on the sparc64 port.
8 */
9
10#include <linux/module.h>
11
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/a.out.h>
17#include <linux/errno.h>
18#include <linux/signal.h>
19#include <linux/string.h>
20#include <linux/fs.h>
21#include <linux/file.h>
22#include <linux/stat.h>
23#include <linux/fcntl.h>
24#include <linux/ptrace.h>
25#include <linux/user.h>
26#include <linux/slab.h>
27#include <linux/binfmts.h>
28#include <linux/personality.h>
29#include <linux/init.h>
30
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/pgalloc.h>
34
35static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs);
36static int load_aout32_library(struct file*);
37static int aout32_core_dump(long signr, struct pt_regs * regs, struct file *file);
38
39extern void dump_thread(struct pt_regs *, struct user *);
40
41static struct linux_binfmt aout32_format = {
42 NULL, THIS_MODULE, load_aout32_binary, load_aout32_library, aout32_core_dump,
43 PAGE_SIZE
44};
45
46static void set_brk(unsigned long start, unsigned long end)
47{
48 start = PAGE_ALIGN(start);
49 end = PAGE_ALIGN(end);
50 if (end <= start)
51 return;
52 down_write(&current->mm->mmap_sem);
53 do_brk(start, end - start);
54 up_write(&current->mm->mmap_sem);
55}
56
57/*
58 * These are the only things you should do on a core-file: use only these
59 * macros to write out all the necessary info.
60 */
61
62static int dump_write(struct file *file, const void *addr, int nr)
63{
64 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
65}
66
67#define DUMP_WRITE(addr, nr) \
68 if (!dump_write(file, (void *)(addr), (nr))) \
69 goto end_coredump;
70
71#define DUMP_SEEK(offset) \
72if (file->f_op->llseek) { \
73 if (file->f_op->llseek(file,(offset),0) != (offset)) \
74 goto end_coredump; \
75} else file->f_pos = (offset)
76
77/*
78 * Routine writes a core dump image in the current directory.
79 * Currently only a stub-function.
80 *
81 * Note that setuid/setgid files won't make a core-dump if the uid/gid
82 * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
83 * field, which also makes sure the core-dumps won't be recursive if the
84 * dumping of the process results in another error..
85 */
86
87static int aout32_core_dump(long signr, struct pt_regs *regs, struct file *file)
88{
89 mm_segment_t fs;
90 int has_dumped = 0;
91 unsigned long dump_start, dump_size;
92 struct user dump;
93# define START_DATA(u) (u.u_tsize)
94# define START_STACK(u) ((regs->u_regs[UREG_FP]) & ~(PAGE_SIZE - 1))
95
96 fs = get_fs();
97 set_fs(KERNEL_DS);
98 has_dumped = 1;
99 current->flags |= PF_DUMPCORE;
100 strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
101 dump.signal = signr;
102 dump_thread(regs, &dump);
103
104/* If the size of the dump file exceeds the rlimit, then see what would happen
105 if we wrote the stack, but not the data area. */
106 if ((dump.u_dsize+dump.u_ssize) >
107 current->signal->rlim[RLIMIT_CORE].rlim_cur)
108 dump.u_dsize = 0;
109
110/* Make sure we have enough room to write the stack and data areas. */
111 if ((dump.u_ssize) >
112 current->signal->rlim[RLIMIT_CORE].rlim_cur)
113 dump.u_ssize = 0;
114
115/* make sure we actually have a data and stack area to dump */
116 set_fs(USER_DS);
117 if (!access_ok(VERIFY_READ, (void __user *) START_DATA(dump), dump.u_dsize))
118 dump.u_dsize = 0;
119 if (!access_ok(VERIFY_READ, (void __user *) START_STACK(dump), dump.u_ssize))
120 dump.u_ssize = 0;
121
122 set_fs(KERNEL_DS);
123/* struct user */
124 DUMP_WRITE(&dump,sizeof(dump));
125/* now we start writing out the user space info */
126 set_fs(USER_DS);
127/* Dump the data area */
128 if (dump.u_dsize != 0) {
129 dump_start = START_DATA(dump);
130 dump_size = dump.u_dsize;
131 DUMP_WRITE(dump_start,dump_size);
132 }
133/* Now prepare to dump the stack area */
134 if (dump.u_ssize != 0) {
135 dump_start = START_STACK(dump);
136 dump_size = dump.u_ssize;
137 DUMP_WRITE(dump_start,dump_size);
138 }
139/* Finally dump the task struct. Not be used by gdb, but could be useful */
140 set_fs(KERNEL_DS);
141 DUMP_WRITE(current,sizeof(*current));
142end_coredump:
143 set_fs(fs);
144 return has_dumped;
145}
146
147/*
148 * create_aout32_tables() parses the env- and arg-strings in new user
149 * memory and creates the pointer tables from them, and puts their
150 * addresses on the "stack", returning the new stack pointer value.
151 */
152
153static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bprm)
154{
155 u32 __user *argv;
156 u32 __user *envp;
157 u32 __user *sp;
158 int argc = bprm->argc;
159 int envc = bprm->envc;
160
161 sp = (u32 __user *)((-(unsigned long)sizeof(char *))&(unsigned long)p);
162
163 /* This imposes the proper stack alignment for a new process. */
164 sp = (u32 __user *) (((unsigned long) sp) & ~7);
165 if ((envc+argc+3)&1)
166 --sp;
167
168 sp -= envc+1;
169 envp = sp;
170 sp -= argc+1;
171 argv = sp;
172 put_user(argc,--sp);
173 current->mm->arg_start = (unsigned long) p;
174 while (argc-->0) {
175 char c;
176 put_user(((u32)(unsigned long)(p)),argv++);
177 do {
178 get_user(c,p++);
179 } while (c);
180 }
181 put_user(NULL,argv);
182 current->mm->arg_end = current->mm->env_start = (unsigned long) p;
183 while (envc-->0) {
184 char c;
185 put_user(((u32)(unsigned long)(p)),envp++);
186 do {
187 get_user(c,p++);
188 } while (c);
189 }
190 put_user(NULL,envp);
191 current->mm->env_end = (unsigned long) p;
192 return sp;
193}
194
195/*
196 * These are the functions used to load a.out style executables and shared
197 * libraries. There is no binary dependent code anywhere else.
198 */
199
200static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
201{
202 struct exec ex;
203 unsigned long error;
204 unsigned long fd_offset;
205 unsigned long rlim;
206 unsigned long orig_thr_flags;
207 int retval;
208
209 ex = *((struct exec *) bprm->buf); /* exec-header */
210 if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
211 N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
212 N_TRSIZE(ex) || N_DRSIZE(ex) ||
213 bprm->file->f_dentry->d_inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
214 return -ENOEXEC;
215 }
216
217 fd_offset = N_TXTOFF(ex);
218
219 /* Check initial limits. This avoids letting people circumvent
220 * size limits imposed on them by creating programs with large
221 * arrays in the data or bss.
222 */
223 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
224 if (rlim >= RLIM_INFINITY)
225 rlim = ~0;
226 if (ex.a_data + ex.a_bss > rlim)
227 return -ENOMEM;
228
229 /* Flush all traces of the currently running executable */
230 retval = flush_old_exec(bprm);
231 if (retval)
232 return retval;
233
234 /* OK, This is the point of no return */
235 set_personality(PER_SUNOS);
236
237 current->mm->end_code = ex.a_text +
238 (current->mm->start_code = N_TXTADDR(ex));
239 current->mm->end_data = ex.a_data +
240 (current->mm->start_data = N_DATADDR(ex));
241 current->mm->brk = ex.a_bss +
242 (current->mm->start_brk = N_BSSADDR(ex));
243
244 set_mm_counter(current->mm, rss, 0);
245 current->mm->mmap = NULL;
246 compute_creds(bprm);
247 current->flags &= ~PF_FORKNOEXEC;
248 if (N_MAGIC(ex) == NMAGIC) {
249 loff_t pos = fd_offset;
250 /* Fuck me plenty... */
251 down_write(&current->mm->mmap_sem);
252 error = do_brk(N_TXTADDR(ex), ex.a_text);
253 up_write(&current->mm->mmap_sem);
254 bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
255 ex.a_text, &pos);
256 down_write(&current->mm->mmap_sem);
257 error = do_brk(N_DATADDR(ex), ex.a_data);
258 up_write(&current->mm->mmap_sem);
259 bprm->file->f_op->read(bprm->file, (char __user *)N_DATADDR(ex),
260 ex.a_data, &pos);
261 goto beyond_if;
262 }
263
264 if (N_MAGIC(ex) == OMAGIC) {
265 loff_t pos = fd_offset;
266 down_write(&current->mm->mmap_sem);
267 do_brk(N_TXTADDR(ex) & PAGE_MASK,
268 ex.a_text+ex.a_data + PAGE_SIZE - 1);
269 up_write(&current->mm->mmap_sem);
270 bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
271 ex.a_text+ex.a_data, &pos);
272 } else {
273 static unsigned long error_time;
274 if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
275 (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time) > 5*HZ)
276 {
277 printk(KERN_NOTICE "executable not page aligned\n");
278 error_time = jiffies;
279 }
280
281 if (!bprm->file->f_op->mmap) {
282 loff_t pos = fd_offset;
283 down_write(&current->mm->mmap_sem);
284 do_brk(0, ex.a_text+ex.a_data);
285 up_write(&current->mm->mmap_sem);
286 bprm->file->f_op->read(bprm->file,
287 (char __user *)N_TXTADDR(ex),
288 ex.a_text+ex.a_data, &pos);
289 goto beyond_if;
290 }
291
292 down_write(&current->mm->mmap_sem);
293 error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
294 PROT_READ | PROT_EXEC,
295 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
296 fd_offset);
297 up_write(&current->mm->mmap_sem);
298
299 if (error != N_TXTADDR(ex)) {
300 send_sig(SIGKILL, current, 0);
301 return error;
302 }
303
304 down_write(&current->mm->mmap_sem);
305 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
306 PROT_READ | PROT_WRITE | PROT_EXEC,
307 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
308 fd_offset + ex.a_text);
309 up_write(&current->mm->mmap_sem);
310 if (error != N_DATADDR(ex)) {
311 send_sig(SIGKILL, current, 0);
312 return error;
313 }
314 }
315beyond_if:
316 set_binfmt(&aout32_format);
317
318 set_brk(current->mm->start_brk, current->mm->brk);
319
320 /* Make sure STACK_TOP returns the right thing. */
321 orig_thr_flags = current_thread_info()->flags;
322 current_thread_info()->flags |= _TIF_32BIT;
323
324 retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
325 if (retval < 0) {
326 current_thread_info()->flags = orig_thr_flags;
327
328 /* Someone check-me: is this error path enough? */
329 send_sig(SIGKILL, current, 0);
330 return retval;
331 }
332
333 current->mm->start_stack =
334 (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm);
335 if (!(orig_thr_flags & _TIF_32BIT)) {
336 unsigned long pgd_cache = get_pgd_cache(current->mm->pgd);
337
338 __asm__ __volatile__("stxa\t%0, [%1] %2\n\t"
339 "membar #Sync"
340 : /* no outputs */
341 : "r" (pgd_cache),
342 "r" (TSB_REG), "i" (ASI_DMMU));
343 }
344 start_thread32(regs, ex.a_entry, current->mm->start_stack);
345 if (current->ptrace & PT_PTRACED)
346 send_sig(SIGTRAP, current, 0);
347 return 0;
348}
349
350/* N.B. Move to .h file and use code in fs/binfmt_aout.c? */
351static int load_aout32_library(struct file *file)
352{
353 struct inode * inode;
354 unsigned long bss, start_addr, len;
355 unsigned long error;
356 int retval;
357 struct exec ex;
358
359 inode = file->f_dentry->d_inode;
360
361 retval = -ENOEXEC;
362 error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
363 if (error != sizeof(ex))
364 goto out;
365
366 /* We come in here for the regular a.out style of shared libraries */
367 if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
368 N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
369 inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
370 goto out;
371 }
372
373 if (N_MAGIC(ex) == ZMAGIC && N_TXTOFF(ex) &&
374 (N_TXTOFF(ex) < inode->i_sb->s_blocksize)) {
375 printk("N_TXTOFF < BLOCK_SIZE. Please convert library\n");
376 goto out;
377 }
378
379 if (N_FLAGS(ex))
380 goto out;
381
382 /* For QMAGIC, the starting address is 0x20 into the page. We mask
383 this off to get the starting address for the page */
384
385 start_addr = ex.a_entry & 0xfffff000;
386
387 /* Now use mmap to map the library into memory. */
388 down_write(&current->mm->mmap_sem);
389 error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
390 PROT_READ | PROT_WRITE | PROT_EXEC,
391 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
392 N_TXTOFF(ex));
393 up_write(&current->mm->mmap_sem);
394 retval = error;
395 if (error != start_addr)
396 goto out;
397
398 len = PAGE_ALIGN(ex.a_text + ex.a_data);
399 bss = ex.a_text + ex.a_data + ex.a_bss;
400 if (bss > len) {
401 down_write(&current->mm->mmap_sem);
402 error = do_brk(start_addr + len, bss - len);
403 up_write(&current->mm->mmap_sem);
404 retval = error;
405 if (error != start_addr + len)
406 goto out;
407 }
408 retval = 0;
409out:
410 return retval;
411}
412
413static int __init init_aout32_binfmt(void)
414{
415 return register_binfmt(&aout32_format);
416}
417
418static void __exit exit_aout32_binfmt(void)
419{
420 unregister_binfmt(&aout32_format);
421}
422
423module_init(init_aout32_binfmt);
424module_exit(exit_aout32_binfmt);
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
new file mode 100644
index 000000000000..a1a12d2aa353
--- /dev/null
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -0,0 +1,159 @@
1/*
2 * binfmt_elf32.c: Support 32-bit Sparc ELF binaries on Ultra.
3 *
4 * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
8#define ELF_ARCH EM_SPARC
9#define ELF_CLASS ELFCLASS32
10#define ELF_DATA ELFDATA2MSB;
11
12/* For the most part we present code dumps in the format
13 * Solaris does.
14 */
15typedef unsigned int elf_greg_t;
16#define ELF_NGREG 38
17typedef elf_greg_t elf_gregset_t[ELF_NGREG];
18
19/* Format is:
20 * G0 --> G7
21 * O0 --> O7
22 * L0 --> L7
23 * I0 --> I7
24 * PSR, PC, nPC, Y, WIM, TBR
25 */
26#include <asm/psrcompat.h>
27#define ELF_CORE_COPY_REGS(__elf_regs, __pt_regs) \
28do { unsigned int *dest = &(__elf_regs[0]); \
29 struct pt_regs *src = (__pt_regs); \
30 unsigned int __user *sp; \
31 int i; \
32 for(i = 0; i < 16; i++) \
33 dest[i] = (unsigned int) src->u_regs[i];\
34 /* Don't try this at home kids... */ \
35 sp = (unsigned int __user *) (src->u_regs[14] & \
36 0x00000000fffffffc); \
37 for(i = 0; i < 16; i++) \
38 __get_user(dest[i+16], &sp[i]); \
39 dest[32] = tstate_to_psr(src->tstate); \
40 dest[33] = (unsigned int) src->tpc; \
41 dest[34] = (unsigned int) src->tnpc; \
42 dest[35] = src->y; \
43 dest[36] = dest[37] = 0; /* XXX */ \
44} while(0);
45
46typedef struct {
47 union {
48 unsigned int pr_regs[32];
49 unsigned long pr_dregs[16];
50 } pr_fr;
51 unsigned int __unused;
52 unsigned int pr_fsr;
53 unsigned char pr_qcnt;
54 unsigned char pr_q_entrysize;
55 unsigned char pr_en;
56 unsigned int pr_q[64];
57} elf_fpregset_t;
58
59/* UltraSparc extensions. Still unused, but will be eventually. */
60typedef struct {
61 unsigned int pr_type;
62 unsigned int pr_align;
63 union {
64 struct {
65 union {
66 unsigned int pr_regs[32];
67 unsigned long pr_dregs[16];
68 long double pr_qregs[8];
69 } pr_xfr;
70 } pr_v8p;
71 unsigned int pr_xfsr;
72 unsigned int pr_fprs;
73 unsigned int pr_xg[8];
74 unsigned int pr_xo[8];
75 unsigned long pr_tstate;
76 unsigned int pr_filler[8];
77 } pr_un;
78} elf_xregset_t;
79
80#define elf_check_arch(x) (((x)->e_machine == EM_SPARC) || ((x)->e_machine == EM_SPARC32PLUS))
81
82#define ELF_ET_DYN_BASE 0x70000000
83
84
85#include <asm/processor.h>
86#include <linux/module.h>
87#include <linux/config.h>
88#include <linux/elfcore.h>
89#include <linux/compat.h>
90
91#define elf_prstatus elf_prstatus32
92struct elf_prstatus32
93{
94 struct elf_siginfo pr_info; /* Info associated with signal */
95 short pr_cursig; /* Current signal */
96 unsigned int pr_sigpend; /* Set of pending signals */
97 unsigned int pr_sighold; /* Set of held signals */
98 pid_t pr_pid;
99 pid_t pr_ppid;
100 pid_t pr_pgrp;
101 pid_t pr_sid;
102 struct compat_timeval pr_utime; /* User time */
103 struct compat_timeval pr_stime; /* System time */
104 struct compat_timeval pr_cutime; /* Cumulative user time */
105 struct compat_timeval pr_cstime; /* Cumulative system time */
106 elf_gregset_t pr_reg; /* GP registers */
107 int pr_fpvalid; /* True if math co-processor being used. */
108};
109
110#define elf_prpsinfo elf_prpsinfo32
111struct elf_prpsinfo32
112{
113 char pr_state; /* numeric process state */
114 char pr_sname; /* char for pr_state */
115 char pr_zomb; /* zombie */
116 char pr_nice; /* nice val */
117 unsigned int pr_flag; /* flags */
118 u16 pr_uid;
119 u16 pr_gid;
120 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
121 /* Lots missing */
122 char pr_fname[16]; /* filename of executable */
123 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
124};
125
126#include <linux/highuid.h>
127
128#undef NEW_TO_OLD_UID
129#undef NEW_TO_OLD_GID
130#define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
131#define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
132
133#include <linux/time.h>
134
135#undef cputime_to_timeval
136#define cputime_to_timeval cputime_to_compat_timeval
137static __inline__ void
138cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
139{
140 unsigned long jiffies = cputime_to_jiffies(cputime);
141 value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
142 value->tv_sec = jiffies / HZ;
143}
144
145#define elf_addr_t u32
146#undef start_thread
147#define start_thread start_thread32
148#define init_elf_binfmt init_elf32_binfmt
149
150MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit SparcLinux binaries on the Ultra");
151MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek");
152
153#undef MODULE_DESCRIPTION
154#undef MODULE_AUTHOR
155
156#undef TASK_SIZE
157#define TASK_SIZE 0xf0000000
158
159#include "../../../fs/binfmt_elf.c"
diff --git a/arch/sparc64/kernel/central.c b/arch/sparc64/kernel/central.c
new file mode 100644
index 000000000000..3d184a784968
--- /dev/null
+++ b/arch/sparc64/kernel/central.c
@@ -0,0 +1,457 @@
1/* $Id: central.c,v 1.15 2001/12/19 00:29:51 davem Exp $
2 * central.c: Central FHC driver for Sunfire/Starfire/Wildfire.
3 *
4 * Copyright (C) 1997, 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/string.h>
10#include <linux/timer.h>
11#include <linux/sched.h>
12#include <linux/delay.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15
16#include <asm/page.h>
17#include <asm/fhc.h>
18#include <asm/starfire.h>
19
20struct linux_central *central_bus = NULL;
21struct linux_fhc *fhc_list = NULL;
22
23#define IS_CENTRAL_FHC(__fhc) ((__fhc) == central_bus->child)
24
25static void central_probe_failure(int line)
26{
27 prom_printf("CENTRAL: Critical device probe failure at central.c:%d\n",
28 line);
29 prom_halt();
30}
31
32static void central_ranges_init(int cnode, struct linux_central *central)
33{
34 int success;
35
36 central->num_central_ranges = 0;
37 success = prom_getproperty(central->prom_node, "ranges",
38 (char *) central->central_ranges,
39 sizeof (central->central_ranges));
40 if (success != -1)
41 central->num_central_ranges = (success/sizeof(struct linux_prom_ranges));
42}
43
44static void fhc_ranges_init(int fnode, struct linux_fhc *fhc)
45{
46 int success;
47
48 fhc->num_fhc_ranges = 0;
49 success = prom_getproperty(fhc->prom_node, "ranges",
50 (char *) fhc->fhc_ranges,
51 sizeof (fhc->fhc_ranges));
52 if (success != -1)
53 fhc->num_fhc_ranges = (success/sizeof(struct linux_prom_ranges));
54}
55
56/* Range application routines are exported to various drivers,
57 * so do not __init this.
58 */
59static void adjust_regs(struct linux_prom_registers *regp, int nregs,
60 struct linux_prom_ranges *rangep, int nranges)
61{
62 int regc, rngc;
63
64 for (regc = 0; regc < nregs; regc++) {
65 for (rngc = 0; rngc < nranges; rngc++)
66 if (regp[regc].which_io == rangep[rngc].ot_child_space)
67 break; /* Fount it */
68 if (rngc == nranges) /* oops */
69 central_probe_failure(__LINE__);
70 regp[regc].which_io = rangep[rngc].ot_parent_space;
71 regp[regc].phys_addr -= rangep[rngc].ot_child_base;
72 regp[regc].phys_addr += rangep[rngc].ot_parent_base;
73 }
74}
75
76/* Apply probed fhc ranges to registers passed, if no ranges return. */
77void apply_fhc_ranges(struct linux_fhc *fhc,
78 struct linux_prom_registers *regs,
79 int nregs)
80{
81 if (fhc->num_fhc_ranges)
82 adjust_regs(regs, nregs, fhc->fhc_ranges,
83 fhc->num_fhc_ranges);
84}
85
86/* Apply probed central ranges to registers passed, if no ranges return. */
87void apply_central_ranges(struct linux_central *central,
88 struct linux_prom_registers *regs, int nregs)
89{
90 if (central->num_central_ranges)
91 adjust_regs(regs, nregs, central->central_ranges,
92 central->num_central_ranges);
93}
94
95void * __init central_alloc_bootmem(unsigned long size)
96{
97 void *ret;
98
99 ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
100 if (ret != NULL)
101 memset(ret, 0, size);
102
103 return ret;
104}
105
106static unsigned long prom_reg_to_paddr(struct linux_prom_registers *r)
107{
108 unsigned long ret = ((unsigned long) r->which_io) << 32;
109
110 return ret | (unsigned long) r->phys_addr;
111}
112
113static void probe_other_fhcs(void)
114{
115 struct linux_prom64_registers fpregs[6];
116 char namebuf[128];
117 int node;
118
119 node = prom_getchild(prom_root_node);
120 node = prom_searchsiblings(node, "fhc");
121 if (node == 0)
122 central_probe_failure(__LINE__);
123 while (node) {
124 struct linux_fhc *fhc;
125 int board;
126 u32 tmp;
127
128 fhc = (struct linux_fhc *)
129 central_alloc_bootmem(sizeof(struct linux_fhc));
130 if (fhc == NULL)
131 central_probe_failure(__LINE__);
132
133 /* Link it into the FHC chain. */
134 fhc->next = fhc_list;
135 fhc_list = fhc;
136
137 /* Toplevel FHCs have no parent. */
138 fhc->parent = NULL;
139
140 fhc->prom_node = node;
141 prom_getstring(node, "name", namebuf, sizeof(namebuf));
142 strcpy(fhc->prom_name, namebuf);
143 fhc_ranges_init(node, fhc);
144
145 /* Non-central FHC's have 64-bit OBP format registers. */
146 if (prom_getproperty(node, "reg",
147 (char *)&fpregs[0], sizeof(fpregs)) == -1)
148 central_probe_failure(__LINE__);
149
150 /* Only central FHC needs special ranges applied. */
151 fhc->fhc_regs.pregs = fpregs[0].phys_addr;
152 fhc->fhc_regs.ireg = fpregs[1].phys_addr;
153 fhc->fhc_regs.ffregs = fpregs[2].phys_addr;
154 fhc->fhc_regs.sregs = fpregs[3].phys_addr;
155 fhc->fhc_regs.uregs = fpregs[4].phys_addr;
156 fhc->fhc_regs.tregs = fpregs[5].phys_addr;
157
158 board = prom_getintdefault(node, "board#", -1);
159 fhc->board = board;
160
161 tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_JCTRL);
162 if ((tmp & FHC_JTAG_CTRL_MENAB) != 0)
163 fhc->jtag_master = 1;
164 else
165 fhc->jtag_master = 0;
166
167 tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_ID);
168 printk("FHC(board %d): Version[%x] PartID[%x] Manuf[%x] %s\n",
169 board,
170 (tmp & FHC_ID_VERS) >> 28,
171 (tmp & FHC_ID_PARTID) >> 12,
172 (tmp & FHC_ID_MANUF) >> 1,
173 (fhc->jtag_master ? "(JTAG Master)" : ""));
174
175 /* This bit must be set in all non-central FHC's in
176 * the system. When it is clear, this identifies
177 * the central board.
178 */
179 tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
180 tmp |= FHC_CONTROL_IXIST;
181 upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
182
183 /* Look for the next FHC. */
184 node = prom_getsibling(node);
185 if (node == 0)
186 break;
187 node = prom_searchsiblings(node, "fhc");
188 if (node == 0)
189 break;
190 }
191}
192
193static void probe_clock_board(struct linux_central *central,
194 struct linux_fhc *fhc,
195 int cnode, int fnode)
196{
197 struct linux_prom_registers cregs[3];
198 int clknode, nslots, tmp, nregs;
199
200 clknode = prom_searchsiblings(prom_getchild(fnode), "clock-board");
201 if (clknode == 0 || clknode == -1)
202 central_probe_failure(__LINE__);
203
204 nregs = prom_getproperty(clknode, "reg", (char *)&cregs[0], sizeof(cregs));
205 if (nregs == -1)
206 central_probe_failure(__LINE__);
207
208 nregs /= sizeof(struct linux_prom_registers);
209 apply_fhc_ranges(fhc, &cregs[0], nregs);
210 apply_central_ranges(central, &cregs[0], nregs);
211 central->cfreg = prom_reg_to_paddr(&cregs[0]);
212 central->clkregs = prom_reg_to_paddr(&cregs[1]);
213
214 if (nregs == 2)
215 central->clkver = 0UL;
216 else
217 central->clkver = prom_reg_to_paddr(&cregs[2]);
218
219 tmp = upa_readb(central->clkregs + CLOCK_STAT1);
220 tmp &= 0xc0;
221 switch(tmp) {
222 case 0x40:
223 nslots = 16;
224 break;
225 case 0xc0:
226 nslots = 8;
227 break;
228 case 0x80:
229 if (central->clkver != 0UL &&
230 upa_readb(central->clkver) != 0) {
231 if ((upa_readb(central->clkver) & 0x80) != 0)
232 nslots = 4;
233 else
234 nslots = 5;
235 break;
236 }
237 default:
238 nslots = 4;
239 break;
240 };
241 central->slots = nslots;
242 printk("CENTRAL: Detected %d slot Enterprise system. cfreg[%02x] cver[%02x]\n",
243 central->slots, upa_readb(central->cfreg),
244 (central->clkver ? upa_readb(central->clkver) : 0x00));
245}
246
247static void ZAP(unsigned long iclr, unsigned long imap)
248{
249 u32 imap_tmp;
250
251 upa_writel(0, iclr);
252 upa_readl(iclr);
253 imap_tmp = upa_readl(imap);
254 imap_tmp &= ~(0x80000000);
255 upa_writel(imap_tmp, imap);
256 upa_readl(imap);
257}
258
259static void init_all_fhc_hw(void)
260{
261 struct linux_fhc *fhc;
262
263 for (fhc = fhc_list; fhc != NULL; fhc = fhc->next) {
264 u32 tmp;
265
266 /* Clear all of the interrupt mapping registers
267 * just in case OBP left them in a foul state.
268 */
269 ZAP(fhc->fhc_regs.ffregs + FHC_FFREGS_ICLR,
270 fhc->fhc_regs.ffregs + FHC_FFREGS_IMAP);
271 ZAP(fhc->fhc_regs.sregs + FHC_SREGS_ICLR,
272 fhc->fhc_regs.sregs + FHC_SREGS_IMAP);
273 ZAP(fhc->fhc_regs.uregs + FHC_UREGS_ICLR,
274 fhc->fhc_regs.uregs + FHC_UREGS_IMAP);
275 ZAP(fhc->fhc_regs.tregs + FHC_TREGS_ICLR,
276 fhc->fhc_regs.tregs + FHC_TREGS_IMAP);
277
278 /* Setup FHC control register. */
279 tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
280
281 /* All non-central boards have this bit set. */
282 if (! IS_CENTRAL_FHC(fhc))
283 tmp |= FHC_CONTROL_IXIST;
284
285 /* For all FHCs, clear the firmware synchronization
286 * line and both low power mode enables.
287 */
288 tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF |
289 FHC_CONTROL_SLINE);
290
291 upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
292 upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
293 }
294
295}
296
297void central_probe(void)
298{
299 struct linux_prom_registers fpregs[6];
300 struct linux_fhc *fhc;
301 char namebuf[128];
302 int cnode, fnode, err;
303
304 cnode = prom_finddevice("/central");
305 if (cnode == 0 || cnode == -1) {
306 if (this_is_starfire)
307 starfire_cpu_setup();
308 return;
309 }
310
311 /* Ok we got one, grab some memory for software state. */
312 central_bus = (struct linux_central *)
313 central_alloc_bootmem(sizeof(struct linux_central));
314 if (central_bus == NULL)
315 central_probe_failure(__LINE__);
316
317 fhc = (struct linux_fhc *)
318 central_alloc_bootmem(sizeof(struct linux_fhc));
319 if (fhc == NULL)
320 central_probe_failure(__LINE__);
321
322 /* First init central. */
323 central_bus->child = fhc;
324 central_bus->prom_node = cnode;
325
326 prom_getstring(cnode, "name", namebuf, sizeof(namebuf));
327 strcpy(central_bus->prom_name, namebuf);
328
329 central_ranges_init(cnode, central_bus);
330
331 /* And then central's FHC. */
332 fhc->next = fhc_list;
333 fhc_list = fhc;
334
335 fhc->parent = central_bus;
336 fnode = prom_searchsiblings(prom_getchild(cnode), "fhc");
337 if (fnode == 0 || fnode == -1)
338 central_probe_failure(__LINE__);
339
340 fhc->prom_node = fnode;
341 prom_getstring(fnode, "name", namebuf, sizeof(namebuf));
342 strcpy(fhc->prom_name, namebuf);
343
344 fhc_ranges_init(fnode, fhc);
345
346 /* Now, map in FHC register set. */
347 if (prom_getproperty(fnode, "reg", (char *)&fpregs[0], sizeof(fpregs)) == -1)
348 central_probe_failure(__LINE__);
349
350 apply_central_ranges(central_bus, &fpregs[0], 6);
351
352 fhc->fhc_regs.pregs = prom_reg_to_paddr(&fpregs[0]);
353 fhc->fhc_regs.ireg = prom_reg_to_paddr(&fpregs[1]);
354 fhc->fhc_regs.ffregs = prom_reg_to_paddr(&fpregs[2]);
355 fhc->fhc_regs.sregs = prom_reg_to_paddr(&fpregs[3]);
356 fhc->fhc_regs.uregs = prom_reg_to_paddr(&fpregs[4]);
357 fhc->fhc_regs.tregs = prom_reg_to_paddr(&fpregs[5]);
358
359 /* Obtain board number from board status register, Central's
360 * FHC lacks "board#" property.
361 */
362 err = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_BSR);
363 fhc->board = (((err >> 16) & 0x01) |
364 ((err >> 12) & 0x0e));
365
366 fhc->jtag_master = 0;
367
368 /* Attach the clock board registers for CENTRAL. */
369 probe_clock_board(central_bus, fhc, cnode, fnode);
370
371 err = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_ID);
372 printk("FHC(board %d): Version[%x] PartID[%x] Manuf[%x] (CENTRAL)\n",
373 fhc->board,
374 ((err & FHC_ID_VERS) >> 28),
375 ((err & FHC_ID_PARTID) >> 12),
376 ((err & FHC_ID_MANUF) >> 1));
377
378 probe_other_fhcs();
379
380 init_all_fhc_hw();
381}
382
383static __inline__ void fhc_ledblink(struct linux_fhc *fhc, int on)
384{
385 u32 tmp;
386
387 tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
388
389 /* NOTE: reverse logic on this bit */
390 if (on)
391 tmp &= ~(FHC_CONTROL_RLED);
392 else
393 tmp |= FHC_CONTROL_RLED;
394 tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF | FHC_CONTROL_SLINE);
395
396 upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
397 upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
398}
399
400static __inline__ void central_ledblink(struct linux_central *central, int on)
401{
402 u8 tmp;
403
404 tmp = upa_readb(central->clkregs + CLOCK_CTRL);
405
406 /* NOTE: reverse logic on this bit */
407 if (on)
408 tmp &= ~(CLOCK_CTRL_RLED);
409 else
410 tmp |= CLOCK_CTRL_RLED;
411
412 upa_writeb(tmp, central->clkregs + CLOCK_CTRL);
413 upa_readb(central->clkregs + CLOCK_CTRL);
414}
415
416static struct timer_list sftimer;
417static int led_state;
418
419static void sunfire_timer(unsigned long __ignored)
420{
421 struct linux_fhc *fhc;
422
423 central_ledblink(central_bus, led_state);
424 for (fhc = fhc_list; fhc != NULL; fhc = fhc->next)
425 if (! IS_CENTRAL_FHC(fhc))
426 fhc_ledblink(fhc, led_state);
427 led_state = ! led_state;
428 sftimer.expires = jiffies + (HZ >> 1);
429 add_timer(&sftimer);
430}
431
432/* After PCI/SBUS busses have been probed, this is called to perform
433 * final initialization of all FireHose Controllers in the system.
434 */
435void firetruck_init(void)
436{
437 struct linux_central *central = central_bus;
438 u8 ctrl;
439
440 /* No central bus, nothing to do. */
441 if (central == NULL)
442 return;
443
444 /* OBP leaves it on, turn it off so clock board timer LED
445 * is in sync with FHC ones.
446 */
447 ctrl = upa_readb(central->clkregs + CLOCK_CTRL);
448 ctrl &= ~(CLOCK_CTRL_RLED);
449 upa_writeb(ctrl, central->clkregs + CLOCK_CTRL);
450
451 led_state = 0;
452 init_timer(&sftimer);
453 sftimer.data = 0;
454 sftimer.function = &sunfire_timer;
455 sftimer.expires = jiffies + (HZ >> 1);
456 add_timer(&sftimer);
457}
diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c
new file mode 100644
index 000000000000..97cf912f0853
--- /dev/null
+++ b/arch/sparc64/kernel/chmc.c
@@ -0,0 +1,458 @@
1/* $Id: chmc.c,v 1.4 2002/01/08 16:00:14 davem Exp $
2 * memctrlr.c: Driver for UltraSPARC-III memory controller.
3 *
4 * Copyright (C) 2001 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/list.h>
12#include <linux/string.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/errno.h>
16#include <linux/init.h>
17#include <asm/spitfire.h>
18#include <asm/chmctrl.h>
19#include <asm/oplib.h>
20#include <asm/io.h>
21
22#define CHMCTRL_NDGRPS 2
23#define CHMCTRL_NDIMMS 4
24
25#define DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS)
26
27/* OBP memory-layout property format. */
28struct obp_map {
29 unsigned char dimm_map[144];
30 unsigned char pin_map[576];
31};
32
33#define DIMM_LABEL_SZ 8
34
35struct obp_mem_layout {
36 /* One max 8-byte string label per DIMM. Usually
37 * this matches the label on the motherboard where
38 * that DIMM resides.
39 */
40 char dimm_labels[DIMMS_PER_MC][DIMM_LABEL_SZ];
41
42 /* If symmetric use map[0], else it is
43 * asymmetric and map[1] should be used.
44 */
45 char symmetric;
46
47 struct obp_map map[2];
48};
49
50#define CHMCTRL_NBANKS 4
51
52struct bank_info {
53 struct mctrl_info *mp;
54 int bank_id;
55
56 u64 raw_reg;
57 int valid;
58 int uk;
59 int um;
60 int lk;
61 int lm;
62 int interleave;
63 unsigned long base;
64 unsigned long size;
65};
66
67struct mctrl_info {
68 struct list_head list;
69 int portid;
70 int index;
71
72 struct obp_mem_layout layout_prop;
73 int layout_size;
74
75 void __iomem *regs;
76
77 u64 timing_control1;
78 u64 timing_control2;
79 u64 timing_control3;
80 u64 timing_control4;
81 u64 memaddr_control;
82
83 struct bank_info logical_banks[CHMCTRL_NBANKS];
84};
85
86static LIST_HEAD(mctrl_list);
87
88/* Does BANK decode PHYS_ADDR? */
89static int bank_match(struct bank_info *bp, unsigned long phys_addr)
90{
91 unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT;
92 unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT;
93
94 /* Bank must be enabled to match. */
95 if (bp->valid == 0)
96 return 0;
97
98 /* Would BANK match upper bits? */
99 upper_bits ^= bp->um; /* What bits are different? */
100 upper_bits = ~upper_bits; /* Invert. */
101 upper_bits |= bp->uk; /* What bits don't matter for matching? */
102 upper_bits = ~upper_bits; /* Invert. */
103
104 if (upper_bits)
105 return 0;
106
107 /* Would BANK match lower bits? */
108 lower_bits ^= bp->lm; /* What bits are different? */
109 lower_bits = ~lower_bits; /* Invert. */
110 lower_bits |= bp->lk; /* What bits don't matter for matching? */
111 lower_bits = ~lower_bits; /* Invert. */
112
113 if (lower_bits)
114 return 0;
115
116 /* I always knew you'd be the one. */
117 return 1;
118}
119
120/* Given PHYS_ADDR, search memory controller banks for a match. */
121static struct bank_info *find_bank(unsigned long phys_addr)
122{
123 struct list_head *mctrl_head = &mctrl_list;
124 struct list_head *mctrl_entry = mctrl_head->next;
125
126 for (;;) {
127 struct mctrl_info *mp =
128 list_entry(mctrl_entry, struct mctrl_info, list);
129 int bank_no;
130
131 if (mctrl_entry == mctrl_head)
132 break;
133 mctrl_entry = mctrl_entry->next;
134
135 for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) {
136 struct bank_info *bp;
137
138 bp = &mp->logical_banks[bank_no];
139 if (bank_match(bp, phys_addr))
140 return bp;
141 }
142 }
143
144 return NULL;
145}
146
147/* This is the main purpose of this driver. */
148#define SYNDROME_MIN -1
149#define SYNDROME_MAX 144
150int chmc_getunumber(int syndrome_code,
151 unsigned long phys_addr,
152 char *buf, int buflen)
153{
154 struct bank_info *bp;
155 struct obp_mem_layout *prop;
156 int bank_in_controller, first_dimm;
157
158 bp = find_bank(phys_addr);
159 if (bp == NULL ||
160 syndrome_code < SYNDROME_MIN ||
161 syndrome_code > SYNDROME_MAX) {
162 buf[0] = '?';
163 buf[1] = '?';
164 buf[2] = '?';
165 buf[3] = '\0';
166 return 0;
167 }
168
169 prop = &bp->mp->layout_prop;
170 bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1);
171 first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1));
172 first_dimm *= CHMCTRL_NDIMMS;
173
174 if (syndrome_code != SYNDROME_MIN) {
175 struct obp_map *map;
176 int qword, where_in_line, where, map_index, map_offset;
177 unsigned int map_val;
178
179 /* Yaay, single bit error so we can figure out
180 * the exact dimm.
181 */
182 if (prop->symmetric)
183 map = &prop->map[0];
184 else
185 map = &prop->map[1];
186
187 /* Covert syndrome code into the way the bits are
188 * positioned on the bus.
189 */
190 if (syndrome_code < 144 - 16)
191 syndrome_code += 16;
192 else if (syndrome_code < 144)
193 syndrome_code -= (144 - 7);
194 else if (syndrome_code < (144 + 3))
195 syndrome_code -= (144 + 3 - 4);
196 else
197 syndrome_code -= 144 + 3;
198
199 /* All this magic has to do with how a cache line
200 * comes over the wire on Safari. A 64-bit line
201 * comes over in 4 quadword cycles, each of which
202 * transmit ECC/MTAG info as well as the actual
203 * data. 144 bits per quadword, 576 total.
204 */
205#define LINE_SIZE 64
206#define LINE_ADDR_MSK (LINE_SIZE - 1)
207#define QW_PER_LINE 4
208#define QW_BYTES (LINE_SIZE / QW_PER_LINE)
209#define QW_BITS 144
210#define LAST_BIT (576 - 1)
211
212 qword = (phys_addr & LINE_ADDR_MSK) / QW_BYTES;
213 where_in_line = ((3 - qword) * QW_BITS) + syndrome_code;
214 where = (LAST_BIT - where_in_line);
215 map_index = where >> 2;
216 map_offset = where & 0x3;
217 map_val = map->dimm_map[map_index];
218 map_val = ((map_val >> ((3 - map_offset) << 1)) & (2 - 1));
219
220 sprintf(buf, "%s, pin %3d",
221 prop->dimm_labels[first_dimm + map_val],
222 map->pin_map[where_in_line]);
223 } else {
224 int dimm;
225
226 /* Multi-bit error, we just dump out all the
227 * dimm labels associated with this bank.
228 */
229 for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) {
230 sprintf(buf, "%s ",
231 prop->dimm_labels[first_dimm + dimm]);
232 buf += strlen(buf);
233 }
234 }
235 return 0;
236}
237
238/* Accessing the registers is slightly complicated. If you want
239 * to get at the memory controller which is on the same processor
240 * the code is executing, you must use special ASI load/store else
241 * you go through the global mapping.
242 */
243static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset)
244{
245 unsigned long ret;
246 int this_cpu = get_cpu();
247
248 if (mp->portid == this_cpu) {
249 __asm__ __volatile__("ldxa [%1] %2, %0"
250 : "=r" (ret)
251 : "r" (offset), "i" (ASI_MCU_CTRL_REG));
252 } else {
253 __asm__ __volatile__("ldxa [%1] %2, %0"
254 : "=r" (ret)
255 : "r" (mp->regs + offset),
256 "i" (ASI_PHYS_BYPASS_EC_E));
257 }
258 put_cpu();
259
260 return ret;
261}
262
263#if 0 /* currently unused */
264static void write_mcreg(struct mctrl_info *mp, unsigned long offset, u64 val)
265{
266 if (mp->portid == smp_processor_id()) {
267 __asm__ __volatile__("stxa %0, [%1] %2"
268 : : "r" (val),
269 "r" (offset), "i" (ASI_MCU_CTRL_REG));
270 } else {
271 __asm__ __volatile__("ldxa %0, [%1] %2"
272 : : "r" (val),
273 "r" (mp->regs + offset),
274 "i" (ASI_PHYS_BYPASS_EC_E));
275 }
276}
277#endif
278
279static void interpret_one_decode_reg(struct mctrl_info *mp, int which_bank, u64 val)
280{
281 struct bank_info *p = &mp->logical_banks[which_bank];
282
283 p->mp = mp;
284 p->bank_id = (CHMCTRL_NBANKS * mp->portid) + which_bank;
285 p->raw_reg = val;
286 p->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT;
287 p->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT;
288 p->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT;
289 p->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT;
290 p->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT;
291
292 p->base = (p->um);
293 p->base &= ~(p->uk);
294 p->base <<= PA_UPPER_BITS_SHIFT;
295
296 switch(p->lk) {
297 case 0xf:
298 default:
299 p->interleave = 1;
300 break;
301
302 case 0xe:
303 p->interleave = 2;
304 break;
305
306 case 0xc:
307 p->interleave = 4;
308 break;
309
310 case 0x8:
311 p->interleave = 8;
312 break;
313
314 case 0x0:
315 p->interleave = 16;
316 break;
317 };
318
319 /* UK[10] is reserved, and UK[11] is not set for the SDRAM
320 * bank size definition.
321 */
322 p->size = (((unsigned long)p->uk &
323 ((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT;
324 p->size /= p->interleave;
325}
326
327static void fetch_decode_regs(struct mctrl_info *mp)
328{
329 if (mp->layout_size == 0)
330 return;
331
332 interpret_one_decode_reg(mp, 0,
333 read_mcreg(mp, CHMCTRL_DECODE1));
334 interpret_one_decode_reg(mp, 1,
335 read_mcreg(mp, CHMCTRL_DECODE2));
336 interpret_one_decode_reg(mp, 2,
337 read_mcreg(mp, CHMCTRL_DECODE3));
338 interpret_one_decode_reg(mp, 3,
339 read_mcreg(mp, CHMCTRL_DECODE4));
340}
341
342static int init_one_mctrl(int node, int index)
343{
344 struct mctrl_info *mp = kmalloc(sizeof(*mp), GFP_KERNEL);
345 int portid = prom_getintdefault(node, "portid", -1);
346 struct linux_prom64_registers p_reg_prop;
347 int t;
348
349 if (!mp)
350 return -1;
351 memset(mp, 0, sizeof(*mp));
352 if (portid == -1)
353 goto fail;
354
355 mp->portid = portid;
356 mp->layout_size = prom_getproplen(node, "memory-layout");
357 if (mp->layout_size < 0)
358 mp->layout_size = 0;
359 if (mp->layout_size > sizeof(mp->layout_prop))
360 goto fail;
361
362 if (mp->layout_size > 0)
363 prom_getproperty(node, "memory-layout",
364 (char *) &mp->layout_prop,
365 mp->layout_size);
366
367 t = prom_getproperty(node, "reg",
368 (char *) &p_reg_prop,
369 sizeof(p_reg_prop));
370 if (t < 0 || p_reg_prop.reg_size != 0x48)
371 goto fail;
372
373 mp->regs = ioremap(p_reg_prop.phys_addr, p_reg_prop.reg_size);
374 if (mp->regs == NULL)
375 goto fail;
376
377 if (mp->layout_size != 0UL) {
378 mp->timing_control1 = read_mcreg(mp, CHMCTRL_TCTRL1);
379 mp->timing_control2 = read_mcreg(mp, CHMCTRL_TCTRL2);
380 mp->timing_control3 = read_mcreg(mp, CHMCTRL_TCTRL3);
381 mp->timing_control4 = read_mcreg(mp, CHMCTRL_TCTRL4);
382 mp->memaddr_control = read_mcreg(mp, CHMCTRL_MACTRL);
383 }
384
385 fetch_decode_regs(mp);
386
387 mp->index = index;
388
389 list_add(&mp->list, &mctrl_list);
390
391 /* Report the device. */
392 printk(KERN_INFO "chmc%d: US3 memory controller at %p [%s]\n",
393 mp->index,
394 mp->regs, (mp->layout_size ? "ACTIVE" : "INACTIVE"));
395
396 return 0;
397
398fail:
399 if (mp) {
400 if (mp->regs != NULL)
401 iounmap(mp->regs);
402 kfree(mp);
403 }
404 return -1;
405}
406
407static int __init probe_for_string(char *name, int index)
408{
409 int node = prom_getchild(prom_root_node);
410
411 while ((node = prom_searchsiblings(node, name)) != 0) {
412 int ret = init_one_mctrl(node, index);
413
414 if (!ret)
415 index++;
416
417 node = prom_getsibling(node);
418 if (!node)
419 break;
420 }
421
422 return index;
423}
424
425static int __init chmc_init(void)
426{
427 int index;
428
429 /* This driver is only for cheetah platforms. */
430 if (tlb_type != cheetah && tlb_type != cheetah_plus)
431 return -ENODEV;
432
433 index = probe_for_string("memory-controller", 0);
434 index = probe_for_string("mc-us3", index);
435
436 return 0;
437}
438
439static void __exit chmc_cleanup(void)
440{
441 struct list_head *head = &mctrl_list;
442 struct list_head *tmp = head->next;
443
444 for (;;) {
445 struct mctrl_info *p =
446 list_entry(tmp, struct mctrl_info, list);
447 if (tmp == head)
448 break;
449 tmp = tmp->next;
450
451 list_del(&p->list);
452 iounmap(p->regs);
453 kfree(p);
454 }
455}
456
457module_init(chmc_init);
458module_exit(chmc_cleanup);
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
new file mode 100644
index 000000000000..48756958116b
--- /dev/null
+++ b/arch/sparc64/kernel/cpu.c
@@ -0,0 +1,124 @@
1/* cpu.c: Dinky routines to look for the kind of Sparc cpu
2 * we are on.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/sched.h>
11#include <linux/smp.h>
12#include <asm/asi.h>
13#include <asm/system.h>
14#include <asm/fpumacro.h>
15#include <asm/cpudata.h>
16
17DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
18
19struct cpu_iu_info {
20 short manuf;
21 short impl;
22 char* cpu_name; /* should be enough I hope... */
23};
24
25struct cpu_fp_info {
26 short manuf;
27 short impl;
28 char fpu_vers;
29 char* fp_name;
30};
31
32struct cpu_fp_info linux_sparc_fpu[] = {
33 { 0x17, 0x10, 0, "UltraSparc I integrated FPU"},
34 { 0x22, 0x10, 0, "UltraSparc I integrated FPU"},
35 { 0x17, 0x11, 0, "UltraSparc II integrated FPU"},
36 { 0x17, 0x12, 0, "UltraSparc IIi integrated FPU"},
37 { 0x17, 0x13, 0, "UltraSparc IIe integrated FPU"},
38 { 0x3e, 0x14, 0, "UltraSparc III integrated FPU"},
39 { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"},
40 { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"},
41 { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"},
42};
43
44#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
45
46struct cpu_iu_info linux_sparc_chips[] = {
47 { 0x17, 0x10, "TI UltraSparc I (SpitFire)"},
48 { 0x22, 0x10, "TI UltraSparc I (SpitFire)"},
49 { 0x17, 0x11, "TI UltraSparc II (BlackBird)"},
50 { 0x17, 0x12, "TI UltraSparc IIi (Sabre)"},
51 { 0x17, 0x13, "TI UltraSparc IIe (Hummingbird)"},
52 { 0x3e, 0x14, "TI UltraSparc III (Cheetah)"},
53 { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"},
54 { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"},
55 { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"},
56};
57
58#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
59
60char *sparc_cpu_type = "cpu-oops";
61char *sparc_fpu_type = "fpu-oops";
62
63unsigned int fsr_storage;
64
65void __init cpu_probe(void)
66{
67 unsigned long ver, fpu_vers, manuf, impl, fprs;
68 int i;
69
70 fprs = fprs_read();
71 fprs_write(FPRS_FEF);
72 __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]"
73 : "=&r" (ver)
74 : "r" (&fpu_vers));
75 fprs_write(fprs);
76
77 manuf = ((ver >> 48) & 0xffff);
78 impl = ((ver >> 32) & 0xffff);
79
80 fpu_vers = ((fpu_vers >> 17) & 0x7);
81
82retry:
83 for (i = 0; i < NSPARCCHIPS; i++) {
84 if (linux_sparc_chips[i].manuf == manuf) {
85 if (linux_sparc_chips[i].impl == impl) {
86 sparc_cpu_type =
87 linux_sparc_chips[i].cpu_name;
88 break;
89 }
90 }
91 }
92
93 if (i == NSPARCCHIPS) {
94 /* Maybe it is a cheetah+ derivative, report it as cheetah+
95 * in that case until we learn the real names.
96 */
97 if (manuf == 0x3e &&
98 impl > 0x15) {
99 impl = 0x15;
100 goto retry;
101 } else {
102 printk("DEBUG: manuf[%lx] impl[%lx]\n",
103 manuf, impl);
104 }
105 sparc_cpu_type = "Unknown CPU";
106 }
107
108 for (i = 0; i < NSPARCFPU; i++) {
109 if (linux_sparc_fpu[i].manuf == manuf &&
110 linux_sparc_fpu[i].impl == impl) {
111 if (linux_sparc_fpu[i].fpu_vers == fpu_vers) {
112 sparc_fpu_type =
113 linux_sparc_fpu[i].fp_name;
114 break;
115 }
116 }
117 }
118
119 if (i == NSPARCFPU) {
120 printk("DEBUG: manuf[%lx] impl[%lx] fsr.vers[%lx]\n",
121 manuf, impl, fpu_vers);
122 sparc_fpu_type = "Unknown FPU";
123 }
124}
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
new file mode 100644
index 000000000000..d710274e516b
--- /dev/null
+++ b/arch/sparc64/kernel/devices.c
@@ -0,0 +1,144 @@
1/* devices.c: Initial scan of the prom device tree for important
2 * Sparc device nodes which we need to find.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/threads.h>
10#include <linux/init.h>
11#include <linux/ioport.h>
12#include <linux/string.h>
13#include <linux/spinlock.h>
14#include <linux/errno.h>
15
16#include <asm/page.h>
17#include <asm/oplib.h>
18#include <asm/system.h>
19#include <asm/smp.h>
20#include <asm/spitfire.h>
21#include <asm/timer.h>
22#include <asm/cpudata.h>
23
24/* Used to synchronize acceses to NatSemi SUPER I/O chip configure
25 * operations in asm/ns87303.h
26 */
27DEFINE_SPINLOCK(ns87303_lock);
28
29extern void cpu_probe(void);
30extern void central_probe(void);
31
32static char *cpu_mid_prop(void)
33{
34 if (tlb_type == spitfire)
35 return "upa-portid";
36 return "portid";
37}
38
39static int check_cpu_node(int nd, int *cur_inst,
40 int (*compare)(int, int, void *), void *compare_arg,
41 int *prom_node, int *mid)
42{
43 char node_str[128];
44
45 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
46 if (strcmp(node_str, "cpu"))
47 return -ENODEV;
48
49 if (!compare(nd, *cur_inst, compare_arg)) {
50 if (prom_node)
51 *prom_node = nd;
52 if (mid)
53 *mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
54 return 0;
55 }
56
57 (*cur_inst)++;
58
59 return -ENODEV;
60}
61
62static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
63 int *prom_node, int *mid)
64{
65 int nd, cur_inst, err;
66
67 nd = prom_root_node;
68 cur_inst = 0;
69
70 err = check_cpu_node(nd, &cur_inst,
71 compare, compare_arg,
72 prom_node, mid);
73 if (err == 0)
74 return 0;
75
76 nd = prom_getchild(nd);
77 while ((nd = prom_getsibling(nd)) != 0) {
78 err = check_cpu_node(nd, &cur_inst,
79 compare, compare_arg,
80 prom_node, mid);
81 if (err == 0)
82 return 0;
83 }
84
85 return -ENODEV;
86}
87
88static int cpu_instance_compare(int nd, int instance, void *_arg)
89{
90 int desired_instance = (int) (long) _arg;
91
92 if (instance == desired_instance)
93 return 0;
94 return -ENODEV;
95}
96
97int cpu_find_by_instance(int instance, int *prom_node, int *mid)
98{
99 return __cpu_find_by(cpu_instance_compare, (void *)(long)instance,
100 prom_node, mid);
101}
102
103static int cpu_mid_compare(int nd, int instance, void *_arg)
104{
105 int desired_mid = (int) (long) _arg;
106 int this_mid;
107
108 this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
109 if (this_mid == desired_mid)
110 return 0;
111 return -ENODEV;
112}
113
114int cpu_find_by_mid(int mid, int *prom_node)
115{
116 return __cpu_find_by(cpu_mid_compare, (void *)(long)mid,
117 prom_node, NULL);
118}
119
120void __init device_scan(void)
121{
122 /* FIX ME FAST... -DaveM */
123 ioport_resource.end = 0xffffffffffffffffUL;
124
125 prom_printf("Booting Linux...\n");
126
127#ifndef CONFIG_SMP
128 {
129 int err, cpu_node;
130 err = cpu_find_by_instance(0, &cpu_node, NULL);
131 if (err) {
132 prom_printf("No cpu nodes, cannot continue\n");
133 prom_halt();
134 }
135 cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
136 "clock-frequency",
137 0);
138 }
139#endif
140
141 central_probe();
142
143 cpu_probe();
144}
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S
new file mode 100644
index 000000000000..b73a3c858770
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_backend.S
@@ -0,0 +1,181 @@
1/* $Id: dtlb_backend.S,v 1.16 2001/10/09 04:02:11 davem Exp $
2 * dtlb_backend.S: Back end to DTLB miss replacement strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <asm/pgtable.h>
10#include <asm/mmu.h>
11
12#if PAGE_SHIFT == 13
13#define SZ_BITS _PAGE_SZ8K
14#elif PAGE_SHIFT == 16
15#define SZ_BITS _PAGE_SZ64K
16#elif PAGE_SHIFT == 19
17#define SZ_BITS _PAGE_SZ512K
18#elif PAGE_SHIFT == 22
19#define SZ_BITS _PAGE_SZ4M
20#endif
21
22#define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS)
23
24#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P )
25#define VPTE_SHIFT (PAGE_SHIFT - 3)
26
27/* Ways we can get here:
28 *
29 * 1) Nucleus loads and stores to/from PA-->VA direct mappings at tl>1.
30 * 2) Nucleus loads and stores to/from user/kernel window save areas.
31 * 3) VPTE misses from dtlb_base and itlb_base.
32 *
33 * We need to extract out the PMD and PGDIR indexes from the
34 * linear virtual page table access address. The PTE index
35 * is at the bottom, but we are not concerned with it. Bits
36 * 0 to 2 are clear since each PTE is 8 bytes in size. Each
37 * PMD and PGDIR entry are 4 bytes in size. Thus, this
38 * address looks something like:
39 *
40 * |---------------------------------------------------------------|
41 * | ... | PGDIR index | PMD index | PTE index | |
42 * |---------------------------------------------------------------|
43 * 63 F E D C B A 3 2 0 <- bit nr
44 *
45 * The variable bits above are defined as:
46 * A --> 3 + (PAGE_SHIFT - log2(8))
47 * --> 3 + (PAGE_SHIFT - 3) - 1
48 * (ie. this is "bit 3" + PAGE_SIZE - size of PTE entry in bits - 1)
49 * B --> A + 1
50 * C --> B + (PAGE_SHIFT - log2(4))
51 * --> B + (PAGE_SHIFT - 2) - 1
52 * (ie. this is "bit B" + PAGE_SIZE - size of PMD entry in bits - 1)
53 * D --> C + 1
54 * E --> D + (PAGE_SHIFT - log2(4))
55 * --> D + (PAGE_SHIFT - 2) - 1
56 * (ie. this is "bit D" + PAGE_SIZE - size of PGDIR entry in bits - 1)
57 * F --> E + 1
58 *
59 * (Note how "B" always evalutes to PAGE_SHIFT, all the other constants
60 * cancel out.)
61 *
62 * For 8K PAGE_SIZE (thus, PAGE_SHIFT of 13) the bit numbers are:
63 * A --> 12
64 * B --> 13
65 * C --> 23
66 * D --> 24
67 * E --> 34
68 * F --> 35
69 *
70 * For 64K PAGE_SIZE (thus, PAGE_SHIFT of 16) the bit numbers are:
71 * A --> 15
72 * B --> 16
73 * C --> 29
74 * D --> 30
75 * E --> 43
76 * F --> 44
77 *
78 * Because bits both above and below each PGDIR and PMD index need to
79 * be masked out, and the index can be as long as 14 bits (when using a
80 * 64K PAGE_SIZE, and thus a PAGE_SHIFT of 16), we need 3 instructions
81 * to extract each index out.
82 *
83 * Shifts do not pair very well on UltraSPARC-I, II, IIi, and IIe, so
84 * we try to avoid using them for the entire operation. We could setup
85 * a mask anywhere from bit 31 down to bit 10 using the sethi instruction.
86 *
87 * We need a mask covering bits B --> C and one covering D --> E.
88 * For 8K PAGE_SIZE these masks are 0x00ffe000 and 0x7ff000000.
89 * For 64K PAGE_SIZE these masks are 0x3fff0000 and 0xfffc0000000.
90 * The second in each set cannot be loaded with a single sethi
91 * instruction, because the upper bits are past bit 32. We would
92 * need to use a sethi + a shift.
93 *
94 * For the time being, we use 2 shifts and a simple "and" mask.
95 * We shift left to clear the bits above the index, we shift down
96 * to clear the bits below the index (sans the log2(4 or 8) bits)
97 * and a mask to clear the log2(4 or 8) bits. We need therefore
98 * define 4 shift counts, all of which are relative to PAGE_SHIFT.
99 *
100 * Although unsupportable for other reasons, this does mean that
101 * 512K and 4MB page sizes would be generaally supported by the
102 * kernel. (ELF binaries would break with > 64K PAGE_SIZE since
103 * the sections are only aligned that strongly).
104 *
105 * The operations performed for extraction are thus:
106 *
107 * ((X << FOO_SHIFT_LEFT) >> FOO_SHIFT_RIGHT) & ~0x3
108 *
109 */
110
111#define A (3 + (PAGE_SHIFT - 3) - 1)
112#define B (A + 1)
113#define C (B + (PAGE_SHIFT - 2) - 1)
114#define D (C + 1)
115#define E (D + (PAGE_SHIFT - 2) - 1)
116#define F (E + 1)
117
118#define PMD_SHIFT_LEFT (64 - D)
119#define PMD_SHIFT_RIGHT (64 - (D - B) - 2)
120#define PGDIR_SHIFT_LEFT (64 - F)
121#define PGDIR_SHIFT_RIGHT (64 - (F - D) - 2)
122#define LOW_MASK_BITS 0x3
123
124/* TLB1 ** ICACHE line 1: tl1 DTLB and quick VPTE miss */
125 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
126 add %g3, %g3, %g5 ! Compute VPTE base
127 cmp %g4, %g5 ! VPTE miss?
128 bgeu,pt %xcc, 1f ! Continue here
129 andcc %g4, TAG_CONTEXT_BITS, %g5 ! tl0 miss Nucleus test
130 ba,a,pt %xcc, from_tl1_trap ! Fall to tl0 miss
1311: sllx %g6, VPTE_SHIFT, %g4 ! Position TAG_ACCESS
132 or %g4, %g5, %g4 ! Prepare TAG_ACCESS
133
134/* TLB1 ** ICACHE line 2: Quick VPTE miss */
135 mov TSB_REG, %g1 ! Grab TSB reg
136 ldxa [%g1] ASI_DMMU, %g5 ! Doing PGD caching?
137 sllx %g6, PMD_SHIFT_LEFT, %g1 ! Position PMD offset
138 be,pn %xcc, sparc64_vpte_nucleus ! Is it from Nucleus?
139 srlx %g1, PMD_SHIFT_RIGHT, %g1 ! Mask PMD offset bits
140 brnz,pt %g5, sparc64_vpte_continue ! Yep, go like smoke
141 andn %g1, LOW_MASK_BITS, %g1 ! Final PMD mask
142 sllx %g6, PGDIR_SHIFT_LEFT, %g5 ! Position PGD offset
143
144/* TLB1 ** ICACHE line 3: Quick VPTE miss */
145 srlx %g5, PGDIR_SHIFT_RIGHT, %g5 ! Mask PGD offset bits
146 andn %g5, LOW_MASK_BITS, %g5 ! Final PGD mask
147 lduwa [%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD
148 brz,pn %g5, vpte_noent ! Valid?
149sparc64_kpte_continue:
150 sllx %g5, 11, %g5 ! Shift into place
151sparc64_vpte_continue:
152 lduwa [%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD
153 sllx %g5, 11, %g5 ! Shift into place
154 brz,pn %g5, vpte_noent ! Valid?
155
156/* TLB1 ** ICACHE line 4: Quick VPTE miss */
157 mov (VALID_SZ_BITS >> 61), %g1 ! upper vpte into %g1
158 sllx %g1, 61, %g1 ! finish calc
159 or %g5, VPTE_BITS, %g5 ! Prepare VPTE data
160 or %g5, %g1, %g5 ! ...
161 mov TLB_SFSR, %g1 ! Restore %g1 value
162 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load VPTE into TLB
163 stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS
164 retry ! Load PTE once again
165
166#undef SZ_BITS
167#undef VALID_SZ_BITS
168#undef VPTE_SHIFT
169#undef VPTE_BITS
170#undef A
171#undef B
172#undef C
173#undef D
174#undef E
175#undef F
176#undef PMD_SHIFT_LEFT
177#undef PMD_SHIFT_RIGHT
178#undef PGDIR_SHIFT_LEFT
179#undef PGDIR_SHIFT_RIGHT
180#undef LOW_MASK_BITS
181
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
new file mode 100644
index 000000000000..ded2fed23fcc
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_base.S
@@ -0,0 +1,113 @@
1/* $Id: dtlb_base.S,v 1.17 2001/10/11 22:33:52 davem Exp $
2 * dtlb_base.S: Front end to DTLB miss replacement strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <asm/pgtable.h>
10#include <asm/mmu.h>
11
12/* %g1 TLB_SFSR (%g1 + %g1 == TLB_TAG_ACCESS)
13 * %g2 (KERN_HIGHBITS | KERN_LOWBITS)
14 * %g3 VPTE base (0xfffffffe00000000) Spitfire/Blackbird (44-bit VA space)
15 * (0xffe0000000000000) Cheetah (64-bit VA space)
16 * %g7 __pa(current->mm->pgd)
17 *
18 * The VPTE base value is completely magic, but note that
19 * few places in the kernel other than these TLB miss
20 * handlers know anything about the VPTE mechanism or
21 * how it works (see VPTE_SIZE, TASK_SIZE and PTRS_PER_PGD).
22 * Consider the 44-bit VADDR Ultra-I/II case as an example:
23 *
24 * VA[0 : (1<<43)] produce VPTE index [%g3 : 0]
25 * VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3]
26 *
27 * For Cheetah's 64-bit VADDR space this is:
28 *
29 * VA[0 : (1<<63)] produce VPTE index [%g3 : 0]
30 * VA[0 : -(1<<63)] produce VPTE index [%g3-(1<<(63-PAGE_SHIFT+3)) : %g3]
31 *
32 * If you're paying attention you'll notice that this means half of
33 * the VPTE table is above %g3 and half is below, low VA addresses
34 * map progressively upwards from %g3, and high VA addresses map
35 * progressively upwards towards %g3. This trick was needed to make
36 * the same 8 instruction handler work both for Spitfire/Blackbird's
37 * peculiar VA space hole configuration and the full 64-bit VA space
38 * one of Cheetah at the same time.
39 */
40
41/* Ways we can get here:
42 *
43 * 1) Nucleus loads and stores to/from PA-->VA direct mappings.
44 * 2) Nucleus loads and stores to/from vmalloc() areas.
45 * 3) User loads and stores.
46 * 4) User space accesses by nucleus at tl0
47 */
48
49#if PAGE_SHIFT == 13
50/*
51 * To compute vpte offset, we need to do ((addr >> 13) << 3),
52 * which can be optimized to (addr >> 10) if bits 10/11/12 can
53 * be guaranteed to be 0 ... mmu_context.h does guarantee this
54 * by only using 10 bits in the hwcontext value.
55 */
56#define CREATE_VPTE_OFFSET1(r1, r2)
57#define CREATE_VPTE_OFFSET2(r1, r2) \
58 srax r1, 10, r2
59#define CREATE_VPTE_NOP nop
60#else
61#define CREATE_VPTE_OFFSET1(r1, r2) \
62 srax r1, PAGE_SHIFT, r2
63#define CREATE_VPTE_OFFSET2(r1, r2) \
64 sllx r2, 3, r2
65#define CREATE_VPTE_NOP
66#endif
67
68/* DTLB ** ICACHE line 1: Quick user TLB misses */
69 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
70 andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
71from_tl1_trap:
72 rdpr %tl, %g5 ! For TL==3 test
73 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
74 be,pn %xcc, 3f ! Yep, special processing
75 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
76 cmp %g5, 4 ! Last trap level?
77 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
78 nop ! delay slot
79
80/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */
81 ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
821: brgez,pn %g5, longpath ! Invalid, branch out
83 nop ! Delay-slot
849: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
85 retry ! Trap return
863: brlz,pt %g4, 9b ! Kernel virtual map?
87 xor %g2, %g4, %g5 ! Finish bit twiddles
88 ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc
89
90/* DTLB ** ICACHE line 3: winfixups+real_faults */
91longpath:
92 rdpr %pstate, %g5 ! Move into alternate globals
93 wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
94 rdpr %tl, %g4 ! See where we came from.
95 cmp %g4, 1 ! Is etrap/rtrap window fault?
96 mov TLB_TAG_ACCESS, %g4 ! Prepare for fault processing
97 ldxa [%g4] ASI_DMMU, %g5 ! Load faulting VA page
98 be,pt %xcc, sparc64_realfault_common ! Jump to normal fault handling
99 mov FAULT_CODE_DTLB, %g4 ! It was read from DTLB
100
101/* DTLB ** ICACHE line 4: Unused... */
102 ba,a,pt %xcc, winfix_trampoline ! Call window fixup code
103 nop
104 nop
105 nop
106 nop
107 nop
108 nop
109 CREATE_VPTE_NOP
110
111#undef CREATE_VPTE_OFFSET1
112#undef CREATE_VPTE_OFFSET2
113#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/dtlb_prot.S b/arch/sparc64/kernel/dtlb_prot.S
new file mode 100644
index 000000000000..d848bb7374bb
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_prot.S
@@ -0,0 +1,54 @@
1/* $Id: dtlb_prot.S,v 1.22 2001/04/11 23:40:32 davem Exp $
2 * dtlb_prot.S: DTLB protection trap strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9/* Ways we can get here:
10 *
11 * [TL == 0] 1) User stores to readonly pages.
12 * [TL == 0] 2) Nucleus stores to user readonly pages.
13 * [TL > 0] 3) Nucleus stores to user readonly stack frame.
14 */
15
16/* PROT ** ICACHE line 1: User DTLB protection trap */
17 stxa %g0, [%g1] ASI_DMMU ! Clear SFSR FaultValid bit
18 membar #Sync ! Synchronize ASI stores
19 rdpr %pstate, %g5 ! Move into alternate globals
20 wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
21 rdpr %tl, %g1 ! Need to do a winfixup?
22 cmp %g1, 1 ! Trap level >1?
23 mov TLB_TAG_ACCESS, %g4 ! Prepare reload of vaddr
24 nop
25
26/* PROT ** ICACHE line 2: More real fault processing */
27 bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
28 ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
29 ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
30 mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
31 nop
32 nop
33 nop
34 nop
35
36/* PROT ** ICACHE line 3: Unused... */
37 nop
38 nop
39 nop
40 nop
41 nop
42 nop
43 nop
44 nop
45
46/* PROT ** ICACHE line 4: Unused... */
47 nop
48 nop
49 nop
50 nop
51 nop
52 nop
53 nop
54 nop
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
new file mode 100644
index 000000000000..6ffbeb701940
--- /dev/null
+++ b/arch/sparc64/kernel/ebus.c
@@ -0,0 +1,644 @@
1/* $Id: ebus.c,v 1.64 2001/11/08 04:41:33 davem Exp $
2 * ebus.c: PCI to EBus bridge device.
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 */
7
8#include <linux/config.h>
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <linux/string.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17
18#include <asm/system.h>
19#include <asm/page.h>
20#include <asm/pbm.h>
21#include <asm/ebus.h>
22#include <asm/oplib.h>
23#include <asm/bpp.h>
24#include <asm/irq.h>
25
26/* EBUS dma library. */
27
28#define EBDMA_CSR 0x00UL /* Control/Status */
29#define EBDMA_ADDR 0x04UL /* DMA Address */
30#define EBDMA_COUNT 0x08UL /* DMA Count */
31
32#define EBDMA_CSR_INT_PEND 0x00000001
33#define EBDMA_CSR_ERR_PEND 0x00000002
34#define EBDMA_CSR_DRAIN 0x00000004
35#define EBDMA_CSR_INT_EN 0x00000010
36#define EBDMA_CSR_RESET 0x00000080
37#define EBDMA_CSR_WRITE 0x00000100
38#define EBDMA_CSR_EN_DMA 0x00000200
39#define EBDMA_CSR_CYC_PEND 0x00000400
40#define EBDMA_CSR_DIAG_RD_DONE 0x00000800
41#define EBDMA_CSR_DIAG_WR_DONE 0x00001000
42#define EBDMA_CSR_EN_CNT 0x00002000
43#define EBDMA_CSR_TC 0x00004000
44#define EBDMA_CSR_DIS_CSR_DRN 0x00010000
45#define EBDMA_CSR_BURST_SZ_MASK 0x000c0000
46#define EBDMA_CSR_BURST_SZ_1 0x00080000
47#define EBDMA_CSR_BURST_SZ_4 0x00000000
48#define EBDMA_CSR_BURST_SZ_8 0x00040000
49#define EBDMA_CSR_BURST_SZ_16 0x000c0000
50#define EBDMA_CSR_DIAG_EN 0x00100000
51#define EBDMA_CSR_DIS_ERR_PEND 0x00400000
52#define EBDMA_CSR_TCI_DIS 0x00800000
53#define EBDMA_CSR_EN_NEXT 0x01000000
54#define EBDMA_CSR_DMA_ON 0x02000000
55#define EBDMA_CSR_A_LOADED 0x04000000
56#define EBDMA_CSR_NA_LOADED 0x08000000
57#define EBDMA_CSR_DEV_ID_MASK 0xf0000000
58
59#define EBUS_DMA_RESET_TIMEOUT 10000
60
61static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
62{
63 int i;
64 u32 val = 0;
65
66 writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
67 udelay(1);
68
69 if (no_drain)
70 return;
71
72 for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
73 val = readl(p->regs + EBDMA_CSR);
74
75 if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
76 break;
77 udelay(10);
78 }
79}
80
81static irqreturn_t ebus_dma_irq(int irq, void *dev_id, struct pt_regs *regs)
82{
83 struct ebus_dma_info *p = dev_id;
84 unsigned long flags;
85 u32 csr = 0;
86
87 spin_lock_irqsave(&p->lock, flags);
88 csr = readl(p->regs + EBDMA_CSR);
89 writel(csr, p->regs + EBDMA_CSR);
90 spin_unlock_irqrestore(&p->lock, flags);
91
92 if (csr & EBDMA_CSR_ERR_PEND) {
93 printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
94 p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
95 return IRQ_HANDLED;
96 } else if (csr & EBDMA_CSR_INT_PEND) {
97 p->callback(p,
98 (csr & EBDMA_CSR_TC) ?
99 EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
100 p->client_cookie);
101 return IRQ_HANDLED;
102 }
103
104 return IRQ_NONE;
105
106}
107
108int ebus_dma_register(struct ebus_dma_info *p)
109{
110 u32 csr;
111
112 if (!p->regs)
113 return -EINVAL;
114 if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
115 EBUS_DMA_FLAG_TCI_DISABLE))
116 return -EINVAL;
117 if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
118 return -EINVAL;
119 if (!strlen(p->name))
120 return -EINVAL;
121
122 __ebus_dma_reset(p, 1);
123
124 csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
125
126 if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
127 csr |= EBDMA_CSR_TCI_DIS;
128
129 writel(csr, p->regs + EBDMA_CSR);
130
131 return 0;
132}
133EXPORT_SYMBOL(ebus_dma_register);
134
135int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
136{
137 unsigned long flags;
138 u32 csr;
139
140 if (on) {
141 if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
142 if (request_irq(p->irq, ebus_dma_irq, SA_SHIRQ, p->name, p))
143 return -EBUSY;
144 }
145
146 spin_lock_irqsave(&p->lock, flags);
147 csr = readl(p->regs + EBDMA_CSR);
148 csr |= EBDMA_CSR_INT_EN;
149 writel(csr, p->regs + EBDMA_CSR);
150 spin_unlock_irqrestore(&p->lock, flags);
151 } else {
152 spin_lock_irqsave(&p->lock, flags);
153 csr = readl(p->regs + EBDMA_CSR);
154 csr &= ~EBDMA_CSR_INT_EN;
155 writel(csr, p->regs + EBDMA_CSR);
156 spin_unlock_irqrestore(&p->lock, flags);
157
158 if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
159 free_irq(p->irq, p);
160 }
161 }
162
163 return 0;
164}
165EXPORT_SYMBOL(ebus_dma_irq_enable);
166
167void ebus_dma_unregister(struct ebus_dma_info *p)
168{
169 unsigned long flags;
170 u32 csr;
171 int irq_on = 0;
172
173 spin_lock_irqsave(&p->lock, flags);
174 csr = readl(p->regs + EBDMA_CSR);
175 if (csr & EBDMA_CSR_INT_EN) {
176 csr &= ~EBDMA_CSR_INT_EN;
177 writel(csr, p->regs + EBDMA_CSR);
178 irq_on = 1;
179 }
180 spin_unlock_irqrestore(&p->lock, flags);
181
182 if (irq_on)
183 free_irq(p->irq, p);
184}
185EXPORT_SYMBOL(ebus_dma_unregister);
186
187int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
188{
189 unsigned long flags;
190 u32 csr;
191 int err;
192
193 if (len >= (1 << 24))
194 return -EINVAL;
195
196 spin_lock_irqsave(&p->lock, flags);
197 csr = readl(p->regs + EBDMA_CSR);
198 err = -EINVAL;
199 if (!(csr & EBDMA_CSR_EN_DMA))
200 goto out;
201 err = -EBUSY;
202 if (csr & EBDMA_CSR_NA_LOADED)
203 goto out;
204
205 writel(len, p->regs + EBDMA_COUNT);
206 writel(bus_addr, p->regs + EBDMA_ADDR);
207 err = 0;
208
209out:
210 spin_unlock_irqrestore(&p->lock, flags);
211
212 return err;
213}
214EXPORT_SYMBOL(ebus_dma_request);
215
216void ebus_dma_prepare(struct ebus_dma_info *p, int write)
217{
218 unsigned long flags;
219 u32 csr;
220
221 spin_lock_irqsave(&p->lock, flags);
222 __ebus_dma_reset(p, 0);
223
224 csr = (EBDMA_CSR_INT_EN |
225 EBDMA_CSR_EN_CNT |
226 EBDMA_CSR_BURST_SZ_16 |
227 EBDMA_CSR_EN_NEXT);
228
229 if (write)
230 csr |= EBDMA_CSR_WRITE;
231 if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
232 csr |= EBDMA_CSR_TCI_DIS;
233
234 writel(csr, p->regs + EBDMA_CSR);
235
236 spin_unlock_irqrestore(&p->lock, flags);
237}
238EXPORT_SYMBOL(ebus_dma_prepare);
239
240unsigned int ebus_dma_residue(struct ebus_dma_info *p)
241{
242 return readl(p->regs + EBDMA_COUNT);
243}
244EXPORT_SYMBOL(ebus_dma_residue);
245
246unsigned int ebus_dma_addr(struct ebus_dma_info *p)
247{
248 return readl(p->regs + EBDMA_ADDR);
249}
250EXPORT_SYMBOL(ebus_dma_addr);
251
252void ebus_dma_enable(struct ebus_dma_info *p, int on)
253{
254 unsigned long flags;
255 u32 orig_csr, csr;
256
257 spin_lock_irqsave(&p->lock, flags);
258 orig_csr = csr = readl(p->regs + EBDMA_CSR);
259 if (on)
260 csr |= EBDMA_CSR_EN_DMA;
261 else
262 csr &= ~EBDMA_CSR_EN_DMA;
263 if ((orig_csr & EBDMA_CSR_EN_DMA) !=
264 (csr & EBDMA_CSR_EN_DMA))
265 writel(csr, p->regs + EBDMA_CSR);
266 spin_unlock_irqrestore(&p->lock, flags);
267}
268EXPORT_SYMBOL(ebus_dma_enable);
269
270struct linux_ebus *ebus_chain = NULL;
271
272#ifdef CONFIG_SUN_AUXIO
273extern void auxio_probe(void);
274#endif
275
276static inline void *ebus_alloc(size_t size)
277{
278 void *mem;
279
280 mem = kmalloc(size, GFP_ATOMIC);
281 if (!mem)
282 panic("ebus_alloc: out of memory");
283 memset((char *)mem, 0, size);
284 return mem;
285}
286
287static void __init ebus_ranges_init(struct linux_ebus *ebus)
288{
289 int success;
290
291 ebus->num_ebus_ranges = 0;
292 success = prom_getproperty(ebus->prom_node, "ranges",
293 (char *)ebus->ebus_ranges,
294 sizeof(ebus->ebus_ranges));
295 if (success != -1)
296 ebus->num_ebus_ranges = (success/sizeof(struct linux_prom_ebus_ranges));
297}
298
299static void __init ebus_intmap_init(struct linux_ebus *ebus)
300{
301 int success;
302
303 ebus->num_ebus_intmap = 0;
304 success = prom_getproperty(ebus->prom_node, "interrupt-map",
305 (char *)ebus->ebus_intmap,
306 sizeof(ebus->ebus_intmap));
307 if (success == -1)
308 return;
309
310 ebus->num_ebus_intmap = (success/sizeof(struct linux_prom_ebus_intmap));
311
312 success = prom_getproperty(ebus->prom_node, "interrupt-map-mask",
313 (char *)&ebus->ebus_intmask,
314 sizeof(ebus->ebus_intmask));
315 if (success == -1) {
316 prom_printf("%s: can't get interrupt-map-mask\n", __FUNCTION__);
317 prom_halt();
318 }
319}
320
321int __init ebus_intmap_match(struct linux_ebus *ebus,
322 struct linux_prom_registers *reg,
323 int *interrupt)
324{
325 unsigned int hi, lo, irq;
326 int i;
327
328 if (!ebus->num_ebus_intmap)
329 return 0;
330
331 hi = reg->which_io & ebus->ebus_intmask.phys_hi;
332 lo = reg->phys_addr & ebus->ebus_intmask.phys_lo;
333 irq = *interrupt & ebus->ebus_intmask.interrupt;
334 for (i = 0; i < ebus->num_ebus_intmap; i++) {
335 if ((ebus->ebus_intmap[i].phys_hi == hi) &&
336 (ebus->ebus_intmap[i].phys_lo == lo) &&
337 (ebus->ebus_intmap[i].interrupt == irq)) {
338 *interrupt = ebus->ebus_intmap[i].cinterrupt;
339 return 0;
340 }
341 }
342 return -1;
343}
344
345void __init fill_ebus_child(int node, struct linux_prom_registers *preg,
346 struct linux_ebus_child *dev, int non_standard_regs)
347{
348 int regs[PROMREG_MAX];
349 int irqs[PROMREG_MAX];
350 int i, len;
351
352 dev->prom_node = node;
353 prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name));
354 printk(" (%s)", dev->prom_name);
355
356 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
357 dev->num_addrs = len / sizeof(regs[0]);
358
359 if (non_standard_regs) {
360 /* This is to handle reg properties which are not
361 * in the parent relative format. One example are
362 * children of the i2c device on CompactPCI systems.
363 *
364 * So, for such devices we just record the property
365 * raw in the child resources.
366 */
367 for (i = 0; i < dev->num_addrs; i++)
368 dev->resource[i].start = regs[i];
369 } else {
370 for (i = 0; i < dev->num_addrs; i++) {
371 int rnum = regs[i];
372 if (rnum >= dev->parent->num_addrs) {
373 prom_printf("UGH: property for %s was %d, need < %d\n",
374 dev->prom_name, len, dev->parent->num_addrs);
375 panic(__FUNCTION__);
376 }
377 dev->resource[i].start = dev->parent->resource[i].start;
378 dev->resource[i].end = dev->parent->resource[i].end;
379 dev->resource[i].flags = IORESOURCE_MEM;
380 dev->resource[i].name = dev->prom_name;
381 }
382 }
383
384 for (i = 0; i < PROMINTR_MAX; i++)
385 dev->irqs[i] = PCI_IRQ_NONE;
386
387 len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
388 if ((len == -1) || (len == 0)) {
389 dev->num_irqs = 0;
390 /*
391 * Oh, well, some PROMs don't export interrupts
392 * property to children of EBus devices...
393 *
394 * Be smart about PS/2 keyboard and mouse.
395 */
396 if (!strcmp(dev->parent->prom_name, "8042")) {
397 if (!strcmp(dev->prom_name, "kb_ps2")) {
398 dev->num_irqs = 1;
399 dev->irqs[0] = dev->parent->irqs[0];
400 } else {
401 dev->num_irqs = 1;
402 dev->irqs[0] = dev->parent->irqs[1];
403 }
404 }
405 } else {
406 dev->num_irqs = len / sizeof(irqs[0]);
407 for (i = 0; i < dev->num_irqs; i++) {
408 struct pci_pbm_info *pbm = dev->bus->parent;
409 struct pci_controller_info *p = pbm->parent;
410
411 if (ebus_intmap_match(dev->bus, preg, &irqs[i]) != -1) {
412 dev->irqs[i] = p->irq_build(pbm,
413 dev->bus->self,
414 irqs[i]);
415 } else {
416 /* If we get a bogus interrupt property, just
417 * record the raw value instead of punting.
418 */
419 dev->irqs[i] = irqs[i];
420 }
421 }
422 }
423}
424
425static int __init child_regs_nonstandard(struct linux_ebus_device *dev)
426{
427 if (!strcmp(dev->prom_name, "i2c") ||
428 !strcmp(dev->prom_name, "SUNW,lombus"))
429 return 1;
430 return 0;
431}
432
433void __init fill_ebus_device(int node, struct linux_ebus_device *dev)
434{
435 struct linux_prom_registers regs[PROMREG_MAX];
436 struct linux_ebus_child *child;
437 int irqs[PROMINTR_MAX];
438 int i, n, len;
439
440 dev->prom_node = node;
441 prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name));
442 printk(" [%s", dev->prom_name);
443
444 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
445 if (len == -1) {
446 dev->num_addrs = 0;
447 goto probe_interrupts;
448 }
449
450 if (len % sizeof(struct linux_prom_registers)) {
451 prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
452 dev->prom_name, len,
453 (int)sizeof(struct linux_prom_registers));
454 prom_halt();
455 }
456 dev->num_addrs = len / sizeof(struct linux_prom_registers);
457
458 for (i = 0; i < dev->num_addrs; i++) {
459 /* XXX Learn how to interpret ebus ranges... -DaveM */
460 if (regs[i].which_io >= 0x10)
461 n = (regs[i].which_io - 0x10) >> 2;
462 else
463 n = regs[i].which_io;
464
465 dev->resource[i].start = dev->bus->self->resource[n].start;
466 dev->resource[i].start += (unsigned long)regs[i].phys_addr;
467 dev->resource[i].end =
468 (dev->resource[i].start + (unsigned long)regs[i].reg_size - 1UL);
469 dev->resource[i].flags = IORESOURCE_MEM;
470 dev->resource[i].name = dev->prom_name;
471 request_resource(&dev->bus->self->resource[n],
472 &dev->resource[i]);
473 }
474
475probe_interrupts:
476 for (i = 0; i < PROMINTR_MAX; i++)
477 dev->irqs[i] = PCI_IRQ_NONE;
478
479 len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
480 if ((len == -1) || (len == 0)) {
481 dev->num_irqs = 0;
482 } else {
483 dev->num_irqs = len / sizeof(irqs[0]);
484 for (i = 0; i < dev->num_irqs; i++) {
485 struct pci_pbm_info *pbm = dev->bus->parent;
486 struct pci_controller_info *p = pbm->parent;
487
488 if (ebus_intmap_match(dev->bus, &regs[0], &irqs[i]) != -1) {
489 dev->irqs[i] = p->irq_build(pbm,
490 dev->bus->self,
491 irqs[i]);
492 } else {
493 /* If we get a bogus interrupt property, just
494 * record the raw value instead of punting.
495 */
496 dev->irqs[i] = irqs[i];
497 }
498 }
499 }
500
501 if ((node = prom_getchild(node))) {
502 printk(" ->");
503 dev->children = ebus_alloc(sizeof(struct linux_ebus_child));
504
505 child = dev->children;
506 child->next = NULL;
507 child->parent = dev;
508 child->bus = dev->bus;
509 fill_ebus_child(node, &regs[0],
510 child, child_regs_nonstandard(dev));
511
512 while ((node = prom_getsibling(node)) != 0) {
513 child->next = ebus_alloc(sizeof(struct linux_ebus_child));
514
515 child = child->next;
516 child->next = NULL;
517 child->parent = dev;
518 child->bus = dev->bus;
519 fill_ebus_child(node, &regs[0],
520 child, child_regs_nonstandard(dev));
521 }
522 }
523 printk("]");
524}
525
526static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p)
527{
528 struct pci_dev *pdev = start;
529
530 do {
531 pdev = pci_find_device(PCI_VENDOR_ID_SUN, PCI_ANY_ID, pdev);
532 if (pdev &&
533 (pdev->device == PCI_DEVICE_ID_SUN_EBUS ||
534 pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS))
535 break;
536 } while (pdev != NULL);
537
538 if (pdev && (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS))
539 *is_rio_p = 1;
540 else
541 *is_rio_p = 0;
542
543 return pdev;
544}
545
546void __init ebus_init(void)
547{
548 struct pci_pbm_info *pbm;
549 struct linux_ebus_device *dev;
550 struct linux_ebus *ebus;
551 struct pci_dev *pdev;
552 struct pcidev_cookie *cookie;
553 int nd, ebusnd, is_rio;
554 int num_ebus = 0;
555
556 pdev = find_next_ebus(NULL, &is_rio);
557 if (!pdev) {
558 printk("ebus: No EBus's found.\n");
559 return;
560 }
561
562 cookie = pdev->sysdata;
563 ebusnd = cookie->prom_node;
564
565 ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus));
566 ebus->next = NULL;
567 ebus->is_rio = is_rio;
568
569 while (ebusnd) {
570 /* SUNW,pci-qfe uses four empty ebuses on it.
571 I think we should not consider them here,
572 as they have half of the properties this
573 code expects and once we do PCI hot-plug,
574 we'd have to tweak with the ebus_chain
575 in the runtime after initialization. -jj */
576 if (!prom_getchild (ebusnd)) {
577 pdev = find_next_ebus(pdev, &is_rio);
578 if (!pdev) {
579 if (ebus == ebus_chain) {
580 ebus_chain = NULL;
581 printk("ebus: No EBus's found.\n");
582 return;
583 }
584 break;
585 }
586 ebus->is_rio = is_rio;
587 cookie = pdev->sysdata;
588 ebusnd = cookie->prom_node;
589 continue;
590 }
591 printk("ebus%d:", num_ebus);
592
593 prom_getstring(ebusnd, "name", ebus->prom_name, sizeof(ebus->prom_name));
594 ebus->index = num_ebus;
595 ebus->prom_node = ebusnd;
596 ebus->self = pdev;
597 ebus->parent = pbm = cookie->pbm;
598
599 ebus_ranges_init(ebus);
600 ebus_intmap_init(ebus);
601
602 nd = prom_getchild(ebusnd);
603 if (!nd)
604 goto next_ebus;
605
606 ebus->devices = ebus_alloc(sizeof(struct linux_ebus_device));
607
608 dev = ebus->devices;
609 dev->next = NULL;
610 dev->children = NULL;
611 dev->bus = ebus;
612 fill_ebus_device(nd, dev);
613
614 while ((nd = prom_getsibling(nd)) != 0) {
615 dev->next = ebus_alloc(sizeof(struct linux_ebus_device));
616
617 dev = dev->next;
618 dev->next = NULL;
619 dev->children = NULL;
620 dev->bus = ebus;
621 fill_ebus_device(nd, dev);
622 }
623
624 next_ebus:
625 printk("\n");
626
627 pdev = find_next_ebus(pdev, &is_rio);
628 if (!pdev)
629 break;
630
631 cookie = pdev->sysdata;
632 ebusnd = cookie->prom_node;
633
634 ebus->next = ebus_alloc(sizeof(struct linux_ebus));
635 ebus = ebus->next;
636 ebus->next = NULL;
637 ebus->is_rio = is_rio;
638 ++num_ebus;
639 }
640
641#ifdef CONFIG_SUN_AUXIO
642 auxio_probe();
643#endif
644}
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
new file mode 100644
index 000000000000..a47f2d0b1a29
--- /dev/null
+++ b/arch/sparc64/kernel/entry.S
@@ -0,0 +1,1919 @@
1/* $Id: entry.S,v 1.144 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
10#include <linux/config.h>
11#include <linux/errno.h>
12
13#include <asm/head.h>
14#include <asm/asi.h>
15#include <asm/smp.h>
16#include <asm/ptrace.h>
17#include <asm/page.h>
18#include <asm/signal.h>
19#include <asm/pgtable.h>
20#include <asm/processor.h>
21#include <asm/visasm.h>
22#include <asm/estate.h>
23#include <asm/auxio.h>
24
25/* #define SYSCALL_TRACING 1 */
26
27#define curptr g6
28
29#define NR_SYSCALLS 284 /* Each OS is different... */
30
31 .text
32 .align 32
33
34 .globl sparc64_vpte_patchme1
35 .globl sparc64_vpte_patchme2
36/*
37 * On a second level vpte miss, check whether the original fault is to the OBP
38 * range (note that this is only possible for instruction miss, data misses to
39 * obp range do not use vpte). If so, go back directly to the faulting address.
40 * This is because we want to read the tpc, otherwise we have no way of knowing
41 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
42 * also ensures no vpte range addresses are dropped into tlb while obp is
43 * executing (see inherit_locked_prom_mappings() rant).
44 */
45sparc64_vpte_nucleus:
46 /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
47 mov 0xf, %g5
48 sllx %g5, 28, %g5
49
50 /* Is addr >= LOW_OBP_ADDRESS? */
51 cmp %g4, %g5
52 blu,pn %xcc, sparc64_vpte_patchme1
53 mov 0x1, %g5
54
55 /* Load 0x100000000, which is HI_OBP_ADDRESS. */
56 sllx %g5, 32, %g5
57
58 /* Is addr < HI_OBP_ADDRESS? */
59 cmp %g4, %g5
60 blu,pn %xcc, obp_iaddr_patch
61 nop
62
63 /* These two instructions are patched by paginig_init(). */
64sparc64_vpte_patchme1:
65 sethi %hi(0), %g5
66sparc64_vpte_patchme2:
67 or %g5, %lo(0), %g5
68
69 /* With kernel PGD in %g5, branch back into dtlb_backend. */
70 ba,pt %xcc, sparc64_kpte_continue
71 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
72
73vpte_noent:
74 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
75 * skip over the trap instruction so that the top level
76 * TLB miss handler will thing this %g5 value is just an
77 * invalid PTE, thus branching to full fault processing.
78 */
79 mov TLB_SFSR, %g1
80 stxa %g4, [%g1 + %g1] ASI_DMMU
81 done
82
83 .globl obp_iaddr_patch
84obp_iaddr_patch:
85 /* These two instructions patched by inherit_prom_mappings(). */
86 sethi %hi(0), %g5
87 or %g5, %lo(0), %g5
88
89 /* Behave as if we are at TL0. */
90 wrpr %g0, 1, %tl
91 rdpr %tpc, %g4 /* Find original faulting iaddr */
92 srlx %g4, 13, %g4 /* Throw out context bits */
93 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
94
95 /* Restore previous TAG_ACCESS. */
96 mov TLB_SFSR, %g1
97 stxa %g4, [%g1 + %g1] ASI_IMMU
98
99 /* Get PMD offset. */
100 srlx %g4, 23, %g6
101 and %g6, 0x7ff, %g6
102 sllx %g6, 2, %g6
103
104 /* Load PMD, is it valid? */
105 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
106 brz,pn %g5, longpath
107 sllx %g5, 11, %g5
108
109 /* Get PTE offset. */
110 srlx %g4, 13, %g6
111 and %g6, 0x3ff, %g6
112 sllx %g6, 3, %g6
113
114 /* Load PTE. */
115 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
116 brgez,pn %g5, longpath
117 nop
118
119 /* TLB load and return from trap. */
120 stxa %g5, [%g0] ASI_ITLB_DATA_IN
121 retry
122
123 .globl obp_daddr_patch
124obp_daddr_patch:
125 /* These two instructions patched by inherit_prom_mappings(). */
126 sethi %hi(0), %g5
127 or %g5, %lo(0), %g5
128
129 /* Get PMD offset. */
130 srlx %g4, 23, %g6
131 and %g6, 0x7ff, %g6
132 sllx %g6, 2, %g6
133
134 /* Load PMD, is it valid? */
135 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
136 brz,pn %g5, longpath
137 sllx %g5, 11, %g5
138
139 /* Get PTE offset. */
140 srlx %g4, 13, %g6
141 and %g6, 0x3ff, %g6
142 sllx %g6, 3, %g6
143
144 /* Load PTE. */
145 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
146 brgez,pn %g5, longpath
147 nop
148
149 /* TLB load and return from trap. */
150 stxa %g5, [%g0] ASI_DTLB_DATA_IN
151 retry
152
153/*
154 * On a first level data miss, check whether this is to the OBP range (note
155 * that such accesses can be made by prom, as well as by kernel using
156 * prom_getproperty on "address"), and if so, do not use vpte access ...
157 * rather, use information saved during inherit_prom_mappings() using 8k
158 * pagesize.
159 */
160kvmap:
161 /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
162 mov 0xf, %g5
163 sllx %g5, 28, %g5
164
165 /* Is addr >= LOW_OBP_ADDRESS? */
166 cmp %g4, %g5
167 blu,pn %xcc, vmalloc_addr
168 mov 0x1, %g5
169
170 /* Load 0x100000000, which is HI_OBP_ADDRESS. */
171 sllx %g5, 32, %g5
172
173 /* Is addr < HI_OBP_ADDRESS? */
174 cmp %g4, %g5
175 blu,pn %xcc, obp_daddr_patch
176 nop
177
178vmalloc_addr:
179 /* If we get here, a vmalloc addr accessed, load kernel VPTE. */
180 ldxa [%g3 + %g6] ASI_N, %g5
181 brgez,pn %g5, longpath
182 nop
183
184 /* PTE is valid, load into TLB and return from trap. */
185 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
186 retry
187
188 /* This is trivial with the new code... */
189 .globl do_fpdis
190do_fpdis:
191 sethi %hi(TSTATE_PEF), %g4 ! IEU0
192 rdpr %tstate, %g5
193 andcc %g5, %g4, %g0
194 be,pt %xcc, 1f
195 nop
196 rd %fprs, %g5
197 andcc %g5, FPRS_FEF, %g0
198 be,pt %xcc, 1f
199 nop
200
201 /* Legal state when DCR_IFPOE is set in Cheetah %dcr. */
202 sethi %hi(109f), %g7
203 ba,pt %xcc, etrap
204109: or %g7, %lo(109b), %g7
205 add %g0, %g0, %g0
206 ba,a,pt %xcc, rtrap_clr_l6
207
2081: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group
209 wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
210 andcc %g5, FPRS_FEF, %g0 ! IEU1 Group
211 be,a,pt %icc, 1f ! CTI
212 clr %g7 ! IEU0
213 ldx [%g6 + TI_GSR], %g7 ! Load Group
2141: andcc %g5, FPRS_DL, %g0 ! IEU1
215 bne,pn %icc, 2f ! CTI
216 fzero %f0 ! FPA
217 andcc %g5, FPRS_DU, %g0 ! IEU1 Group
218 bne,pn %icc, 1f ! CTI
219 fzero %f2 ! FPA
220 faddd %f0, %f2, %f4
221 fmuld %f0, %f2, %f6
222 faddd %f0, %f2, %f8
223 fmuld %f0, %f2, %f10
224 faddd %f0, %f2, %f12
225 fmuld %f0, %f2, %f14
226 faddd %f0, %f2, %f16
227 fmuld %f0, %f2, %f18
228 faddd %f0, %f2, %f20
229 fmuld %f0, %f2, %f22
230 faddd %f0, %f2, %f24
231 fmuld %f0, %f2, %f26
232 faddd %f0, %f2, %f28
233 fmuld %f0, %f2, %f30
234 faddd %f0, %f2, %f32
235 fmuld %f0, %f2, %f34
236 faddd %f0, %f2, %f36
237 fmuld %f0, %f2, %f38
238 faddd %f0, %f2, %f40
239 fmuld %f0, %f2, %f42
240 faddd %f0, %f2, %f44
241 fmuld %f0, %f2, %f46
242 faddd %f0, %f2, %f48
243 fmuld %f0, %f2, %f50
244 faddd %f0, %f2, %f52
245 fmuld %f0, %f2, %f54
246 faddd %f0, %f2, %f56
247 fmuld %f0, %f2, %f58
248 b,pt %xcc, fpdis_exit2
249 faddd %f0, %f2, %f60
2501: mov SECONDARY_CONTEXT, %g3
251 add %g6, TI_FPREGS + 0x80, %g1
252 faddd %f0, %f2, %f4
253 fmuld %f0, %f2, %f6
254 ldxa [%g3] ASI_DMMU, %g5
255cplus_fptrap_insn_1:
256 sethi %hi(0), %g2
257 stxa %g2, [%g3] ASI_DMMU
258 membar #Sync
259 add %g6, TI_FPREGS + 0xc0, %g2
260 faddd %f0, %f2, %f8
261 fmuld %f0, %f2, %f10
262 ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-(
263 ldda [%g2] ASI_BLK_S, %f48
264 faddd %f0, %f2, %f12
265 fmuld %f0, %f2, %f14
266 faddd %f0, %f2, %f16
267 fmuld %f0, %f2, %f18
268 faddd %f0, %f2, %f20
269 fmuld %f0, %f2, %f22
270 faddd %f0, %f2, %f24
271 fmuld %f0, %f2, %f26
272 faddd %f0, %f2, %f28
273 fmuld %f0, %f2, %f30
274 b,pt %xcc, fpdis_exit
275 membar #Sync
2762: andcc %g5, FPRS_DU, %g0
277 bne,pt %icc, 3f
278 fzero %f32
279 mov SECONDARY_CONTEXT, %g3
280 fzero %f34
281 ldxa [%g3] ASI_DMMU, %g5
282 add %g6, TI_FPREGS, %g1
283cplus_fptrap_insn_2:
284 sethi %hi(0), %g2
285 stxa %g2, [%g3] ASI_DMMU
286 membar #Sync
287 add %g6, TI_FPREGS + 0x40, %g2
288 faddd %f32, %f34, %f36
289 fmuld %f32, %f34, %f38
290 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
291 ldda [%g2] ASI_BLK_S, %f16
292 faddd %f32, %f34, %f40
293 fmuld %f32, %f34, %f42
294 faddd %f32, %f34, %f44
295 fmuld %f32, %f34, %f46
296 faddd %f32, %f34, %f48
297 fmuld %f32, %f34, %f50
298 faddd %f32, %f34, %f52
299 fmuld %f32, %f34, %f54
300 faddd %f32, %f34, %f56
301 fmuld %f32, %f34, %f58
302 faddd %f32, %f34, %f60
303 fmuld %f32, %f34, %f62
304 ba,pt %xcc, fpdis_exit
305 membar #Sync
3063: mov SECONDARY_CONTEXT, %g3
307 add %g6, TI_FPREGS, %g1
308 ldxa [%g3] ASI_DMMU, %g5
309cplus_fptrap_insn_3:
310 sethi %hi(0), %g2
311 stxa %g2, [%g3] ASI_DMMU
312 membar #Sync
313 mov 0x40, %g2
314 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
315 ldda [%g1 + %g2] ASI_BLK_S, %f16
316 add %g1, 0x80, %g1
317 ldda [%g1] ASI_BLK_S, %f32
318 ldda [%g1 + %g2] ASI_BLK_S, %f48
319 membar #Sync
320fpdis_exit:
321 stxa %g5, [%g3] ASI_DMMU
322 membar #Sync
323fpdis_exit2:
324 wr %g7, 0, %gsr
325 ldx [%g6 + TI_XFSR], %fsr
326 rdpr %tstate, %g3
327 or %g3, %g4, %g3 ! anal...
328 wrpr %g3, %tstate
329 wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits
330 retry
331
332 .align 32
333fp_other_bounce:
334 call do_fpother
335 add %sp, PTREGS_OFF, %o0
336 ba,pt %xcc, rtrap
337 clr %l6
338
339 .globl do_fpother_check_fitos
340 .align 32
341do_fpother_check_fitos:
342 sethi %hi(fp_other_bounce - 4), %g7
343 or %g7, %lo(fp_other_bounce - 4), %g7
344
345 /* NOTE: Need to preserve %g7 until we fully commit
346 * to the fitos fixup.
347 */
348 stx %fsr, [%g6 + TI_XFSR]
349 rdpr %tstate, %g3
350 andcc %g3, TSTATE_PRIV, %g0
351 bne,pn %xcc, do_fptrap_after_fsr
352 nop
353 ldx [%g6 + TI_XFSR], %g3
354 srlx %g3, 14, %g1
355 and %g1, 7, %g1
356 cmp %g1, 2 ! Unfinished FP-OP
357 bne,pn %xcc, do_fptrap_after_fsr
358 sethi %hi(1 << 23), %g1 ! Inexact
359 andcc %g3, %g1, %g0
360 bne,pn %xcc, do_fptrap_after_fsr
361 rdpr %tpc, %g1
362 lduwa [%g1] ASI_AIUP, %g3 ! This cannot ever fail
363#define FITOS_MASK 0xc1f83fe0
364#define FITOS_COMPARE 0x81a01880
365 sethi %hi(FITOS_MASK), %g1
366 or %g1, %lo(FITOS_MASK), %g1
367 and %g3, %g1, %g1
368 sethi %hi(FITOS_COMPARE), %g2
369 or %g2, %lo(FITOS_COMPARE), %g2
370 cmp %g1, %g2
371 bne,pn %xcc, do_fptrap_after_fsr
372 nop
373 std %f62, [%g6 + TI_FPREGS + (62 * 4)]
374 sethi %hi(fitos_table_1), %g1
375 and %g3, 0x1f, %g2
376 or %g1, %lo(fitos_table_1), %g1
377 sllx %g2, 2, %g2
378 jmpl %g1 + %g2, %g0
379 ba,pt %xcc, fitos_emul_continue
380
381fitos_table_1:
382 fitod %f0, %f62
383 fitod %f1, %f62
384 fitod %f2, %f62
385 fitod %f3, %f62
386 fitod %f4, %f62
387 fitod %f5, %f62
388 fitod %f6, %f62
389 fitod %f7, %f62
390 fitod %f8, %f62
391 fitod %f9, %f62
392 fitod %f10, %f62
393 fitod %f11, %f62
394 fitod %f12, %f62
395 fitod %f13, %f62
396 fitod %f14, %f62
397 fitod %f15, %f62
398 fitod %f16, %f62
399 fitod %f17, %f62
400 fitod %f18, %f62
401 fitod %f19, %f62
402 fitod %f20, %f62
403 fitod %f21, %f62
404 fitod %f22, %f62
405 fitod %f23, %f62
406 fitod %f24, %f62
407 fitod %f25, %f62
408 fitod %f26, %f62
409 fitod %f27, %f62
410 fitod %f28, %f62
411 fitod %f29, %f62
412 fitod %f30, %f62
413 fitod %f31, %f62
414
415fitos_emul_continue:
416 sethi %hi(fitos_table_2), %g1
417 srl %g3, 25, %g2
418 or %g1, %lo(fitos_table_2), %g1
419 and %g2, 0x1f, %g2
420 sllx %g2, 2, %g2
421 jmpl %g1 + %g2, %g0
422 ba,pt %xcc, fitos_emul_fini
423
424fitos_table_2:
425 fdtos %f62, %f0
426 fdtos %f62, %f1
427 fdtos %f62, %f2
428 fdtos %f62, %f3
429 fdtos %f62, %f4
430 fdtos %f62, %f5
431 fdtos %f62, %f6
432 fdtos %f62, %f7
433 fdtos %f62, %f8
434 fdtos %f62, %f9
435 fdtos %f62, %f10
436 fdtos %f62, %f11
437 fdtos %f62, %f12
438 fdtos %f62, %f13
439 fdtos %f62, %f14
440 fdtos %f62, %f15
441 fdtos %f62, %f16
442 fdtos %f62, %f17
443 fdtos %f62, %f18
444 fdtos %f62, %f19
445 fdtos %f62, %f20
446 fdtos %f62, %f21
447 fdtos %f62, %f22
448 fdtos %f62, %f23
449 fdtos %f62, %f24
450 fdtos %f62, %f25
451 fdtos %f62, %f26
452 fdtos %f62, %f27
453 fdtos %f62, %f28
454 fdtos %f62, %f29
455 fdtos %f62, %f30
456 fdtos %f62, %f31
457
458fitos_emul_fini:
459 ldd [%g6 + TI_FPREGS + (62 * 4)], %f62
460 done
461
462 .globl do_fptrap
463 .align 32
464do_fptrap:
465 stx %fsr, [%g6 + TI_XFSR]
466do_fptrap_after_fsr:
467 ldub [%g6 + TI_FPSAVED], %g3
468 rd %fprs, %g1
469 or %g3, %g1, %g3
470 stb %g3, [%g6 + TI_FPSAVED]
471 rd %gsr, %g3
472 stx %g3, [%g6 + TI_GSR]
473 mov SECONDARY_CONTEXT, %g3
474 ldxa [%g3] ASI_DMMU, %g5
475cplus_fptrap_insn_4:
476 sethi %hi(0), %g2
477 stxa %g2, [%g3] ASI_DMMU
478 membar #Sync
479 add %g6, TI_FPREGS, %g2
480 andcc %g1, FPRS_DL, %g0
481 be,pn %icc, 4f
482 mov 0x40, %g3
483 stda %f0, [%g2] ASI_BLK_S
484 stda %f16, [%g2 + %g3] ASI_BLK_S
485 andcc %g1, FPRS_DU, %g0
486 be,pn %icc, 5f
4874: add %g2, 128, %g2
488 stda %f32, [%g2] ASI_BLK_S
489 stda %f48, [%g2 + %g3] ASI_BLK_S
4905: mov SECONDARY_CONTEXT, %g1
491 membar #Sync
492 stxa %g5, [%g1] ASI_DMMU
493 membar #Sync
494 ba,pt %xcc, etrap
495 wr %g0, 0, %fprs
496
497cplus_fptrap_1:
498 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
499
500 .globl cheetah_plus_patch_fpdis
501cheetah_plus_patch_fpdis:
502 /* We configure the dTLB512_0 for 4MB pages and the
503 * dTLB512_1 for 8K pages when in context zero.
504 */
505 sethi %hi(cplus_fptrap_1), %o0
506 lduw [%o0 + %lo(cplus_fptrap_1)], %o1
507
508 set cplus_fptrap_insn_1, %o2
509 stw %o1, [%o2]
510 flush %o2
511 set cplus_fptrap_insn_2, %o2
512 stw %o1, [%o2]
513 flush %o2
514 set cplus_fptrap_insn_3, %o2
515 stw %o1, [%o2]
516 flush %o2
517 set cplus_fptrap_insn_4, %o2
518 stw %o1, [%o2]
519 flush %o2
520
521 retl
522 nop
523
524 /* The registers for cross calls will be:
525 *
526 * DATA 0: [low 32-bits] Address of function to call, jmp to this
527 * [high 32-bits] MMU Context Argument 0, place in %g5
528 * DATA 1: Address Argument 1, place in %g6
529 * DATA 2: Address Argument 2, place in %g7
530 *
531 * With this method we can do most of the cross-call tlb/cache
532 * flushing very quickly.
533 *
534 * Current CPU's IRQ worklist table is locked into %g1,
535 * don't touch.
536 */
537 .text
538 .align 32
539 .globl do_ivec
540do_ivec:
541 mov 0x40, %g3
542 ldxa [%g3 + %g0] ASI_INTR_R, %g3
543 sethi %hi(KERNBASE), %g4
544 cmp %g3, %g4
545 bgeu,pn %xcc, do_ivec_xcall
546 srlx %g3, 32, %g5
547 stxa %g0, [%g0] ASI_INTR_RECEIVE
548 membar #Sync
549
550 sethi %hi(ivector_table), %g2
551 sllx %g3, 5, %g3
552 or %g2, %lo(ivector_table), %g2
553 add %g2, %g3, %g3
554 ldx [%g3 + 0x08], %g2 /* irq_info */
555 ldub [%g3 + 0x04], %g4 /* pil */
556 brz,pn %g2, do_ivec_spurious
557 mov 1, %g2
558
559 sllx %g2, %g4, %g2
560 sllx %g4, 2, %g4
561 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */
562 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
563 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */
564 wr %g2, 0x0, %set_softint
565 retry
566do_ivec_xcall:
567 mov 0x50, %g1
568
569 ldxa [%g1 + %g0] ASI_INTR_R, %g1
570 srl %g3, 0, %g3
571 mov 0x60, %g7
572 ldxa [%g7 + %g0] ASI_INTR_R, %g7
573 stxa %g0, [%g0] ASI_INTR_RECEIVE
574 membar #Sync
575 ba,pt %xcc, 1f
576 nop
577
578 .align 32
5791: jmpl %g3, %g0
580 nop
581
582do_ivec_spurious:
583 stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */
584 rdpr %pstate, %g5
585
586 wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate
587 sethi %hi(109f), %g7
588 ba,pt %xcc, etrap
589109: or %g7, %lo(109b), %g7
590 call catch_disabled_ivec
591 add %sp, PTREGS_OFF, %o0
592 ba,pt %xcc, rtrap
593 clr %l6
594
595 .globl save_alternate_globals
596save_alternate_globals: /* %o0 = save_area */
597 rdpr %pstate, %o5
598 andn %o5, PSTATE_IE, %o1
599 wrpr %o1, PSTATE_AG, %pstate
600 stx %g0, [%o0 + 0x00]
601 stx %g1, [%o0 + 0x08]
602 stx %g2, [%o0 + 0x10]
603 stx %g3, [%o0 + 0x18]
604 stx %g4, [%o0 + 0x20]
605 stx %g5, [%o0 + 0x28]
606 stx %g6, [%o0 + 0x30]
607 stx %g7, [%o0 + 0x38]
608 wrpr %o1, PSTATE_IG, %pstate
609 stx %g0, [%o0 + 0x40]
610 stx %g1, [%o0 + 0x48]
611 stx %g2, [%o0 + 0x50]
612 stx %g3, [%o0 + 0x58]
613 stx %g4, [%o0 + 0x60]
614 stx %g5, [%o0 + 0x68]
615 stx %g6, [%o0 + 0x70]
616 stx %g7, [%o0 + 0x78]
617 wrpr %o1, PSTATE_MG, %pstate
618 stx %g0, [%o0 + 0x80]
619 stx %g1, [%o0 + 0x88]
620 stx %g2, [%o0 + 0x90]
621 stx %g3, [%o0 + 0x98]
622 stx %g4, [%o0 + 0xa0]
623 stx %g5, [%o0 + 0xa8]
624 stx %g6, [%o0 + 0xb0]
625 stx %g7, [%o0 + 0xb8]
626 wrpr %o5, 0x0, %pstate
627 retl
628 nop
629
630 .globl restore_alternate_globals
631restore_alternate_globals: /* %o0 = save_area */
632 rdpr %pstate, %o5
633 andn %o5, PSTATE_IE, %o1
634 wrpr %o1, PSTATE_AG, %pstate
635 ldx [%o0 + 0x00], %g0
636 ldx [%o0 + 0x08], %g1
637 ldx [%o0 + 0x10], %g2
638 ldx [%o0 + 0x18], %g3
639 ldx [%o0 + 0x20], %g4
640 ldx [%o0 + 0x28], %g5
641 ldx [%o0 + 0x30], %g6
642 ldx [%o0 + 0x38], %g7
643 wrpr %o1, PSTATE_IG, %pstate
644 ldx [%o0 + 0x40], %g0
645 ldx [%o0 + 0x48], %g1
646 ldx [%o0 + 0x50], %g2
647 ldx [%o0 + 0x58], %g3
648 ldx [%o0 + 0x60], %g4
649 ldx [%o0 + 0x68], %g5
650 ldx [%o0 + 0x70], %g6
651 ldx [%o0 + 0x78], %g7
652 wrpr %o1, PSTATE_MG, %pstate
653 ldx [%o0 + 0x80], %g0
654 ldx [%o0 + 0x88], %g1
655 ldx [%o0 + 0x90], %g2
656 ldx [%o0 + 0x98], %g3
657 ldx [%o0 + 0xa0], %g4
658 ldx [%o0 + 0xa8], %g5
659 ldx [%o0 + 0xb0], %g6
660 ldx [%o0 + 0xb8], %g7
661 wrpr %o5, 0x0, %pstate
662 retl
663 nop
664
665 .globl getcc, setcc
666getcc:
667 ldx [%o0 + PT_V9_TSTATE], %o1
668 srlx %o1, 32, %o1
669 and %o1, 0xf, %o1
670 retl
671 stx %o1, [%o0 + PT_V9_G1]
672setcc:
673 ldx [%o0 + PT_V9_TSTATE], %o1
674 ldx [%o0 + PT_V9_G1], %o2
675 or %g0, %ulo(TSTATE_ICC), %o3
676 sllx %o3, 32, %o3
677 andn %o1, %o3, %o1
678 sllx %o2, 32, %o2
679 and %o2, %o3, %o2
680 or %o1, %o2, %o1
681 retl
682 stx %o1, [%o0 + PT_V9_TSTATE]
683
684 .globl utrap, utrap_ill
685utrap: brz,pn %g1, etrap
686 nop
687 save %sp, -128, %sp
688 rdpr %tstate, %l6
689 rdpr %cwp, %l7
690 andn %l6, TSTATE_CWP, %l6
691 wrpr %l6, %l7, %tstate
692 rdpr %tpc, %l6
693 rdpr %tnpc, %l7
694 wrpr %g1, 0, %tnpc
695 done
696utrap_ill:
697 call bad_trap
698 add %sp, PTREGS_OFF, %o0
699 ba,pt %xcc, rtrap
700 clr %l6
701
702#ifdef CONFIG_BLK_DEV_FD
703 .globl floppy_hardint
704floppy_hardint:
705 wr %g0, (1 << 11), %clear_softint
706 sethi %hi(doing_pdma), %g1
707 ld [%g1 + %lo(doing_pdma)], %g2
708 brz,pn %g2, floppy_dosoftint
709 sethi %hi(fdc_status), %g3
710 ldx [%g3 + %lo(fdc_status)], %g3
711 sethi %hi(pdma_vaddr), %g5
712 ldx [%g5 + %lo(pdma_vaddr)], %g4
713 sethi %hi(pdma_size), %g5
714 ldx [%g5 + %lo(pdma_size)], %g5
715
716next_byte:
717 lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7
718 andcc %g7, 0x80, %g0
719 be,pn %icc, floppy_fifo_emptied
720 andcc %g7, 0x20, %g0
721 be,pn %icc, floppy_overrun
722 andcc %g7, 0x40, %g0
723 be,pn %icc, floppy_write
724 sub %g5, 1, %g5
725
726 inc %g3
727 lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7
728 dec %g3
729 orcc %g0, %g5, %g0
730 stb %g7, [%g4]
731 bne,pn %xcc, next_byte
732 add %g4, 1, %g4
733
734 b,pt %xcc, floppy_tdone
735 nop
736
737floppy_write:
738 ldub [%g4], %g7
739 orcc %g0, %g5, %g0
740 inc %g3
741 stba %g7, [%g3] ASI_PHYS_BYPASS_EC_E
742 dec %g3
743 bne,pn %xcc, next_byte
744 add %g4, 1, %g4
745
746floppy_tdone:
747 sethi %hi(pdma_vaddr), %g1
748 stx %g4, [%g1 + %lo(pdma_vaddr)]
749 sethi %hi(pdma_size), %g1
750 stx %g5, [%g1 + %lo(pdma_size)]
751 sethi %hi(auxio_register), %g1
752 ldx [%g1 + %lo(auxio_register)], %g7
753 lduba [%g7] ASI_PHYS_BYPASS_EC_E, %g5
754 or %g5, AUXIO_AUX1_FTCNT, %g5
755/* andn %g5, AUXIO_AUX1_MASK, %g5 */
756 stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E
757 andn %g5, AUXIO_AUX1_FTCNT, %g5
758/* andn %g5, AUXIO_AUX1_MASK, %g5 */
759
760 nop; nop; nop; nop; nop; nop;
761 nop; nop; nop; nop; nop; nop;
762
763 stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E
764 sethi %hi(doing_pdma), %g1
765 b,pt %xcc, floppy_dosoftint
766 st %g0, [%g1 + %lo(doing_pdma)]
767
768floppy_fifo_emptied:
769 sethi %hi(pdma_vaddr), %g1
770 stx %g4, [%g1 + %lo(pdma_vaddr)]
771 sethi %hi(pdma_size), %g1
772 stx %g5, [%g1 + %lo(pdma_size)]
773 sethi %hi(irq_action), %g1
774 or %g1, %lo(irq_action), %g1
775 ldx [%g1 + (11 << 3)], %g3 ! irqaction[floppy_irq]
776 ldx [%g3 + 0x08], %g4 ! action->flags>>48==ino
777 sethi %hi(ivector_table), %g3
778 srlx %g4, 48, %g4
779 or %g3, %lo(ivector_table), %g3
780 sllx %g4, 5, %g4
781 ldx [%g3 + %g4], %g4 ! &ivector_table[ino]
782 ldx [%g4 + 0x10], %g4 ! bucket->iclr
783 stwa %g0, [%g4] ASI_PHYS_BYPASS_EC_E ! ICLR_IDLE
784 membar #Sync ! probably not needed...
785 retry
786
787floppy_overrun:
788 sethi %hi(pdma_vaddr), %g1
789 stx %g4, [%g1 + %lo(pdma_vaddr)]
790 sethi %hi(pdma_size), %g1
791 stx %g5, [%g1 + %lo(pdma_size)]
792 sethi %hi(doing_pdma), %g1
793 st %g0, [%g1 + %lo(doing_pdma)]
794
795floppy_dosoftint:
796 rdpr %pil, %g2
797 wrpr %g0, 15, %pil
798 sethi %hi(109f), %g7
799 b,pt %xcc, etrap_irq
800109: or %g7, %lo(109b), %g7
801
802 mov 11, %o0
803 mov 0, %o1
804 call sparc_floppy_irq
805 add %sp, PTREGS_OFF, %o2
806
807 b,pt %xcc, rtrap_irq
808 nop
809
810#endif /* CONFIG_BLK_DEV_FD */
811
812 /* XXX Here is stuff we still need to write... -DaveM XXX */
813 .globl netbsd_syscall
814netbsd_syscall:
815 retl
816 nop
817
818 /* These next few routines must be sure to clear the
819 * SFSR FaultValid bit so that the fast tlb data protection
820 * handler does not flush the wrong context and lock up the
821 * box.
822 */
823 .globl __do_data_access_exception
824 .globl __do_data_access_exception_tl1
825__do_data_access_exception_tl1:
826 rdpr %pstate, %g4
827 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
828 mov TLB_SFSR, %g3
829 mov DMMU_SFAR, %g5
830 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
831 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
832 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
833 membar #Sync
834 ba,pt %xcc, winfix_dax
835 rdpr %tpc, %g3
836__do_data_access_exception:
837 rdpr %pstate, %g4
838 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
839 mov TLB_SFSR, %g3
840 mov DMMU_SFAR, %g5
841 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
842 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
843 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
844 membar #Sync
845 sethi %hi(109f), %g7
846 ba,pt %xcc, etrap
847109: or %g7, %lo(109b), %g7
848 mov %l4, %o1
849 mov %l5, %o2
850 call data_access_exception
851 add %sp, PTREGS_OFF, %o0
852 ba,pt %xcc, rtrap
853 clr %l6
854
855 .globl __do_instruction_access_exception
856 .globl __do_instruction_access_exception_tl1
857__do_instruction_access_exception_tl1:
858 rdpr %pstate, %g4
859 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
860 mov TLB_SFSR, %g3
861 mov DMMU_SFAR, %g5
862 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
863 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
864 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
865 membar #Sync
866 sethi %hi(109f), %g7
867 ba,pt %xcc, etraptl1
868109: or %g7, %lo(109b), %g7
869 mov %l4, %o1
870 mov %l5, %o2
871 call instruction_access_exception_tl1
872 add %sp, PTREGS_OFF, %o0
873 ba,pt %xcc, rtrap
874 clr %l6
875
876__do_instruction_access_exception:
877 rdpr %pstate, %g4
878 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
879 mov TLB_SFSR, %g3
880 mov DMMU_SFAR, %g5
881 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
882 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
883 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
884 membar #Sync
885 sethi %hi(109f), %g7
886 ba,pt %xcc, etrap
887109: or %g7, %lo(109b), %g7
888 mov %l4, %o1
889 mov %l5, %o2
890 call instruction_access_exception
891 add %sp, PTREGS_OFF, %o0
892 ba,pt %xcc, rtrap
893 clr %l6
894
895 /* This is the trap handler entry point for ECC correctable
896 * errors. They are corrected, but we listen for the trap
897 * so that the event can be logged.
898 *
899 * Disrupting errors are either:
900 * 1) single-bit ECC errors during UDB reads to system
901 * memory
902 * 2) data parity errors during write-back events
903 *
904 * As far as I can make out from the manual, the CEE trap
905 * is only for correctable errors during memory read
906 * accesses by the front-end of the processor.
907 *
908 * The code below is only for trap level 1 CEE events,
909 * as it is the only situation where we can safely record
910 * and log. For trap level >1 we just clear the CE bit
911 * in the AFSR and return.
912 */
913
914 /* Our trap handling infrastructure allows us to preserve
915 * two 64-bit values during etrap for arguments to
916 * subsequent C code. Therefore we encode the information
917 * as follows:
918 *
919 * value 1) Full 64-bits of AFAR
920 * value 2) Low 33-bits of AFSR, then bits 33-->42
921 * are UDBL error status and bits 43-->52
922 * are UDBH error status
923 */
924 .align 64
925 .globl cee_trap
926cee_trap:
927 ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR
928 ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR
929 sllx %g1, 31, %g1 ! Clear reserved bits
930 srlx %g1, 31, %g1 ! in AFSR
931
932 /* NOTE: UltraSparc-I/II have high and low UDB error
933 * registers, corresponding to the two UDB units
934 * present on those chips. UltraSparc-IIi only
935 * has a single UDB, called "SDB" in the manual.
936 * For IIi the upper UDB register always reads
937 * as zero so for our purposes things will just
938 * work with the checks below.
939 */
940 ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status
941 andcc %g3, (1 << 8), %g4 ! Check CE bit
942 sllx %g3, (64 - 10), %g3 ! Clear reserved bits
943 srlx %g3, (64 - 10), %g3 ! in UDB-Low error status
944
945 sllx %g3, (33 + 0), %g3 ! Shift up to encoding area
946 or %g1, %g3, %g1 ! Or it in
947 be,pn %xcc, 1f ! Branch if CE bit was clear
948 nop
949 stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL
950 membar #Sync ! Synchronize ASI stores
9511: mov 0x18, %g5 ! Addr of UDB-High error status
952 ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it
953
954 andcc %g3, (1 << 8), %g4 ! Check CE bit
955 sllx %g3, (64 - 10), %g3 ! Clear reserved bits
956 srlx %g3, (64 - 10), %g3 ! in UDB-High error status
957 sllx %g3, (33 + 10), %g3 ! Shift up to encoding area
958 or %g1, %g3, %g1 ! Or it in
959 be,pn %xcc, 1f ! Branch if CE bit was clear
960 nop
961 nop
962
963 stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH
964 membar #Sync ! Synchronize ASI stores
9651: mov 1, %g5 ! AFSR CE bit is
966 sllx %g5, 20, %g5 ! bit 20
967 stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR
968 membar #Sync ! Synchronize ASI stores
969 sllx %g2, (64 - 41), %g2 ! Clear reserved bits
970 srlx %g2, (64 - 41), %g2 ! in latched AFAR
971
972 andn %g2, 0x0f, %g2 ! Finish resv bit clearing
973 mov %g1, %g4 ! Move AFSR+UDB* into save reg
974 mov %g2, %g5 ! Move AFAR into save reg
975 rdpr %pil, %g2
976 wrpr %g0, 15, %pil
977 ba,pt %xcc, etrap_irq
978 rd %pc, %g7
979 mov %l4, %o0
980
981 mov %l5, %o1
982 call cee_log
983 add %sp, PTREGS_OFF, %o2
984 ba,a,pt %xcc, rtrap_irq
985
986 /* Capture I/D/E-cache state into per-cpu error scoreboard.
987 *
988 * %g1: (TL>=0) ? 1 : 0
989 * %g2: scratch
990 * %g3: scratch
991 * %g4: AFSR
992 * %g5: AFAR
993 * %g6: current thread ptr
994 * %g7: scratch
995 */
996#define CHEETAH_LOG_ERROR \
997 /* Put "TL1" software bit into AFSR. */ \
998 and %g1, 0x1, %g1; \
999 sllx %g1, 63, %g2; \
1000 or %g4, %g2, %g4; \
1001 /* Get log entry pointer for this cpu at this trap level. */ \
1002 BRANCH_IF_JALAPENO(g2,g3,50f) \
1003 ldxa [%g0] ASI_SAFARI_CONFIG, %g2; \
1004 srlx %g2, 17, %g2; \
1005 ba,pt %xcc, 60f; \
1006 and %g2, 0x3ff, %g2; \
100750: ldxa [%g0] ASI_JBUS_CONFIG, %g2; \
1008 srlx %g2, 17, %g2; \
1009 and %g2, 0x1f, %g2; \
101060: sllx %g2, 9, %g2; \
1011 sethi %hi(cheetah_error_log), %g3; \
1012 ldx [%g3 + %lo(cheetah_error_log)], %g3; \
1013 brz,pn %g3, 80f; \
1014 nop; \
1015 add %g3, %g2, %g3; \
1016 sllx %g1, 8, %g1; \
1017 add %g3, %g1, %g1; \
1018 /* %g1 holds pointer to the top of the logging scoreboard */ \
1019 ldx [%g1 + 0x0], %g7; \
1020 cmp %g7, -1; \
1021 bne,pn %xcc, 80f; \
1022 nop; \
1023 stx %g4, [%g1 + 0x0]; \
1024 stx %g5, [%g1 + 0x8]; \
1025 add %g1, 0x10, %g1; \
1026 /* %g1 now points to D-cache logging area */ \
1027 set 0x3ff8, %g2; /* DC_addr mask */ \
1028 and %g5, %g2, %g2; /* DC_addr bits of AFAR */ \
1029 srlx %g5, 12, %g3; \
1030 or %g3, 1, %g3; /* PHYS tag + valid */ \
103110: ldxa [%g2] ASI_DCACHE_TAG, %g7; \
1032 cmp %g3, %g7; /* TAG match? */ \
1033 bne,pt %xcc, 13f; \
1034 nop; \
1035 /* Yep, what we want, capture state. */ \
1036 stx %g2, [%g1 + 0x20]; \
1037 stx %g7, [%g1 + 0x28]; \
1038 /* A membar Sync is required before and after utag access. */ \
1039 membar #Sync; \
1040 ldxa [%g2] ASI_DCACHE_UTAG, %g7; \
1041 membar #Sync; \
1042 stx %g7, [%g1 + 0x30]; \
1043 ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7; \
1044 stx %g7, [%g1 + 0x38]; \
1045 clr %g3; \
104612: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7; \
1047 stx %g7, [%g1]; \
1048 add %g3, (1 << 5), %g3; \
1049 cmp %g3, (4 << 5); \
1050 bl,pt %xcc, 12b; \
1051 add %g1, 0x8, %g1; \
1052 ba,pt %xcc, 20f; \
1053 add %g1, 0x20, %g1; \
105413: sethi %hi(1 << 14), %g7; \
1055 add %g2, %g7, %g2; \
1056 srlx %g2, 14, %g7; \
1057 cmp %g7, 4; \
1058 bl,pt %xcc, 10b; \
1059 nop; \
1060 add %g1, 0x40, %g1; \
106120: /* %g1 now points to I-cache logging area */ \
1062 set 0x1fe0, %g2; /* IC_addr mask */ \
1063 and %g5, %g2, %g2; /* IC_addr bits of AFAR */ \
1064 sllx %g2, 1, %g2; /* IC_addr[13:6]==VA[12:5] */ \
1065 srlx %g5, (13 - 8), %g3; /* Make PTAG */ \
1066 andn %g3, 0xff, %g3; /* Mask off undefined bits */ \
106721: ldxa [%g2] ASI_IC_TAG, %g7; \
1068 andn %g7, 0xff, %g7; \
1069 cmp %g3, %g7; \
1070 bne,pt %xcc, 23f; \
1071 nop; \
1072 /* Yep, what we want, capture state. */ \
1073 stx %g2, [%g1 + 0x40]; \
1074 stx %g7, [%g1 + 0x48]; \
1075 add %g2, (1 << 3), %g2; \
1076 ldxa [%g2] ASI_IC_TAG, %g7; \
1077 add %g2, (1 << 3), %g2; \
1078 stx %g7, [%g1 + 0x50]; \
1079 ldxa [%g2] ASI_IC_TAG, %g7; \
1080 add %g2, (1 << 3), %g2; \
1081 stx %g7, [%g1 + 0x60]; \
1082 ldxa [%g2] ASI_IC_TAG, %g7; \
1083 stx %g7, [%g1 + 0x68]; \
1084 sub %g2, (3 << 3), %g2; \
1085 ldxa [%g2] ASI_IC_STAG, %g7; \
1086 stx %g7, [%g1 + 0x58]; \
1087 clr %g3; \
1088 srlx %g2, 2, %g2; \
108922: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7; \
1090 stx %g7, [%g1]; \
1091 add %g3, (1 << 3), %g3; \
1092 cmp %g3, (8 << 3); \
1093 bl,pt %xcc, 22b; \
1094 add %g1, 0x8, %g1; \
1095 ba,pt %xcc, 30f; \
1096 add %g1, 0x30, %g1; \
109723: sethi %hi(1 << 14), %g7; \
1098 add %g2, %g7, %g2; \
1099 srlx %g2, 14, %g7; \
1100 cmp %g7, 4; \
1101 bl,pt %xcc, 21b; \
1102 nop; \
1103 add %g1, 0x70, %g1; \
110430: /* %g1 now points to E-cache logging area */ \
1105 andn %g5, (32 - 1), %g2; /* E-cache subblock */ \
1106 stx %g2, [%g1 + 0x20]; \
1107 ldxa [%g2] ASI_EC_TAG_DATA, %g7; \
1108 stx %g7, [%g1 + 0x28]; \
1109 ldxa [%g2] ASI_EC_R, %g0; \
1110 clr %g3; \
111131: ldxa [%g3] ASI_EC_DATA, %g7; \
1112 stx %g7, [%g1 + %g3]; \
1113 add %g3, 0x8, %g3; \
1114 cmp %g3, 0x20; \
1115 bl,pt %xcc, 31b; \
1116 nop; \
111780: /* DONE */
1118
1119 /* These get patched into the trap table at boot time
1120 * once we know we have a cheetah processor.
1121 */
1122 .globl cheetah_fecc_trap_vector, cheetah_fecc_trap_vector_tl1
1123cheetah_fecc_trap_vector:
1124 membar #Sync
1125 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
1126 andn %g1, DCU_DC | DCU_IC, %g1
1127 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
1128 membar #Sync
1129 sethi %hi(cheetah_fast_ecc), %g2
1130 jmpl %g2 + %lo(cheetah_fast_ecc), %g0
1131 mov 0, %g1
1132cheetah_fecc_trap_vector_tl1:
1133 membar #Sync
1134 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
1135 andn %g1, DCU_DC | DCU_IC, %g1
1136 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
1137 membar #Sync
1138 sethi %hi(cheetah_fast_ecc), %g2
1139 jmpl %g2 + %lo(cheetah_fast_ecc), %g0
1140 mov 1, %g1
1141 .globl cheetah_cee_trap_vector, cheetah_cee_trap_vector_tl1
1142cheetah_cee_trap_vector:
1143 membar #Sync
1144 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
1145 andn %g1, DCU_IC, %g1
1146 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
1147 membar #Sync
1148 sethi %hi(cheetah_cee), %g2
1149 jmpl %g2 + %lo(cheetah_cee), %g0
1150 mov 0, %g1
1151cheetah_cee_trap_vector_tl1:
1152 membar #Sync
1153 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
1154 andn %g1, DCU_IC, %g1
1155 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
1156 membar #Sync
1157 sethi %hi(cheetah_cee), %g2
1158 jmpl %g2 + %lo(cheetah_cee), %g0
1159 mov 1, %g1
1160 .globl cheetah_deferred_trap_vector, cheetah_deferred_trap_vector_tl1
1161cheetah_deferred_trap_vector:
1162 membar #Sync
1163 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
1164 andn %g1, DCU_DC | DCU_IC, %g1;
1165 stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
1166 membar #Sync;
1167 sethi %hi(cheetah_deferred_trap), %g2
1168 jmpl %g2 + %lo(cheetah_deferred_trap), %g0
1169 mov 0, %g1
1170cheetah_deferred_trap_vector_tl1:
1171 membar #Sync;
1172 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
1173 andn %g1, DCU_DC | DCU_IC, %g1;
1174 stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
1175 membar #Sync;
1176 sethi %hi(cheetah_deferred_trap), %g2
1177 jmpl %g2 + %lo(cheetah_deferred_trap), %g0
1178 mov 1, %g1
1179
1180 /* Cheetah+ specific traps. These are for the new I/D cache parity
1181 * error traps. The first argument to cheetah_plus_parity_handler
1182 * is encoded as follows:
1183 *
1184 * Bit0: 0=dcache,1=icache
1185 * Bit1: 0=recoverable,1=unrecoverable
1186 */
1187 .globl cheetah_plus_dcpe_trap_vector, cheetah_plus_dcpe_trap_vector_tl1
1188cheetah_plus_dcpe_trap_vector:
1189 membar #Sync
1190 sethi %hi(do_cheetah_plus_data_parity), %g7
1191 jmpl %g7 + %lo(do_cheetah_plus_data_parity), %g0
1192 nop
1193 nop
1194 nop
1195 nop
1196 nop
1197
1198do_cheetah_plus_data_parity:
1199 ba,pt %xcc, etrap
1200 rd %pc, %g7
1201 mov 0x0, %o0
1202 call cheetah_plus_parity_error
1203 add %sp, PTREGS_OFF, %o1
1204 ba,pt %xcc, rtrap
1205 clr %l6
1206
1207cheetah_plus_dcpe_trap_vector_tl1:
1208 membar #Sync
1209 wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
1210 sethi %hi(do_dcpe_tl1), %g3
1211 jmpl %g3 + %lo(do_dcpe_tl1), %g0
1212 nop
1213 nop
1214 nop
1215 nop
1216
1217 .globl cheetah_plus_icpe_trap_vector, cheetah_plus_icpe_trap_vector_tl1
1218cheetah_plus_icpe_trap_vector:
1219 membar #Sync
1220 sethi %hi(do_cheetah_plus_insn_parity), %g7
1221 jmpl %g7 + %lo(do_cheetah_plus_insn_parity), %g0
1222 nop
1223 nop
1224 nop
1225 nop
1226 nop
1227
1228do_cheetah_plus_insn_parity:
1229 ba,pt %xcc, etrap
1230 rd %pc, %g7
1231 mov 0x1, %o0
1232 call cheetah_plus_parity_error
1233 add %sp, PTREGS_OFF, %o1
1234 ba,pt %xcc, rtrap
1235 clr %l6
1236
1237cheetah_plus_icpe_trap_vector_tl1:
1238 membar #Sync
1239 wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
1240 sethi %hi(do_icpe_tl1), %g3
1241 jmpl %g3 + %lo(do_icpe_tl1), %g0
1242 nop
1243 nop
1244 nop
1245 nop
1246
1247 /* If we take one of these traps when tl >= 1, then we
1248 * jump to interrupt globals. If some trap level above us
1249 * was also using interrupt globals, we cannot recover.
1250 * We may use all interrupt global registers except %g6.
1251 */
1252 .globl do_dcpe_tl1, do_icpe_tl1
1253do_dcpe_tl1:
1254 rdpr %tl, %g1 ! Save original trap level
1255 mov 1, %g2 ! Setup TSTATE checking loop
1256 sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
12571: wrpr %g2, %tl ! Set trap level to check
1258 rdpr %tstate, %g4 ! Read TSTATE for this level
1259 andcc %g4, %g3, %g0 ! Interrupt globals in use?
1260 bne,a,pn %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable
1261 wrpr %g1, %tl ! Restore original trap level
1262 add %g2, 1, %g2 ! Next trap level
1263 cmp %g2, %g1 ! Hit them all yet?
1264 ble,pt %icc, 1b ! Not yet
1265 nop
1266 wrpr %g1, %tl ! Restore original trap level
1267do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
1268 /* Reset D-cache parity */
1269 sethi %hi(1 << 16), %g1 ! D-cache size
1270 mov (1 << 5), %g2 ! D-cache line size
1271 sub %g1, %g2, %g1 ! Move down 1 cacheline
12721: srl %g1, 14, %g3 ! Compute UTAG
1273 membar #Sync
1274 stxa %g3, [%g1] ASI_DCACHE_UTAG
1275 membar #Sync
1276 sub %g2, 8, %g3 ! 64-bit data word within line
12772: membar #Sync
1278 stxa %g0, [%g1 + %g3] ASI_DCACHE_DATA
1279 membar #Sync
1280 subcc %g3, 8, %g3 ! Next 64-bit data word
1281 bge,pt %icc, 2b
1282 nop
1283 subcc %g1, %g2, %g1 ! Next cacheline
1284 bge,pt %icc, 1b
1285 nop
1286 ba,pt %xcc, dcpe_icpe_tl1_common
1287 nop
1288
1289do_dcpe_tl1_fatal:
1290 sethi %hi(1f), %g7
1291 ba,pt %xcc, etraptl1
12921: or %g7, %lo(1b), %g7
1293 mov 0x2, %o0
1294 call cheetah_plus_parity_error
1295 add %sp, PTREGS_OFF, %o1
1296 ba,pt %xcc, rtrap
1297 clr %l6
1298
1299do_icpe_tl1:
1300 rdpr %tl, %g1 ! Save original trap level
1301 mov 1, %g2 ! Setup TSTATE checking loop
1302 sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
13031: wrpr %g2, %tl ! Set trap level to check
1304 rdpr %tstate, %g4 ! Read TSTATE for this level
1305 andcc %g4, %g3, %g0 ! Interrupt globals in use?
1306 bne,a,pn %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable
1307 wrpr %g1, %tl ! Restore original trap level
1308 add %g2, 1, %g2 ! Next trap level
1309 cmp %g2, %g1 ! Hit them all yet?
1310 ble,pt %icc, 1b ! Not yet
1311 nop
1312 wrpr %g1, %tl ! Restore original trap level
1313do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
1314 /* Flush I-cache */
1315 sethi %hi(1 << 15), %g1 ! I-cache size
1316 mov (1 << 5), %g2 ! I-cache line size
1317 sub %g1, %g2, %g1
13181: or %g1, (2 << 3), %g3
1319 stxa %g0, [%g3] ASI_IC_TAG
1320 membar #Sync
1321 subcc %g1, %g2, %g1
1322 bge,pt %icc, 1b
1323 nop
1324 ba,pt %xcc, dcpe_icpe_tl1_common
1325 nop
1326
1327do_icpe_tl1_fatal:
1328 sethi %hi(1f), %g7
1329 ba,pt %xcc, etraptl1
13301: or %g7, %lo(1b), %g7
1331 mov 0x3, %o0
1332 call cheetah_plus_parity_error
1333 add %sp, PTREGS_OFF, %o1
1334 ba,pt %xcc, rtrap
1335 clr %l6
1336
1337dcpe_icpe_tl1_common:
1338 /* Flush D-cache, re-enable D/I caches in DCU and finally
1339 * retry the trapping instruction.
1340 */
1341 sethi %hi(1 << 16), %g1 ! D-cache size
1342 mov (1 << 5), %g2 ! D-cache line size
1343 sub %g1, %g2, %g1
13441: stxa %g0, [%g1] ASI_DCACHE_TAG
1345 membar #Sync
1346 subcc %g1, %g2, %g1
1347 bge,pt %icc, 1b
1348 nop
1349 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
1350 or %g1, (DCU_DC | DCU_IC), %g1
1351 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
1352 membar #Sync
1353 retry
1354
1355 /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
1356 * in the trap table. That code has done a memory barrier
1357 * and has disabled both the I-cache and D-cache in the DCU
1358 * control register. The I-cache is disabled so that we may
1359 * capture the corrupted cache line, and the D-cache is disabled
1360 * because corrupt data may have been placed there and we don't
1361 * want to reference it.
1362 *
1363 * %g1 is one if this trap occurred at %tl >= 1.
1364 *
1365 * Next, we turn off error reporting so that we don't recurse.
1366 */
1367 .globl cheetah_fast_ecc
1368cheetah_fast_ecc:
1369 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
1370 andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
1371 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
1372 membar #Sync
1373
1374 /* Fetch and clear AFSR/AFAR */
1375 ldxa [%g0] ASI_AFSR, %g4
1376 ldxa [%g0] ASI_AFAR, %g5
1377 stxa %g4, [%g0] ASI_AFSR
1378 membar #Sync
1379
1380 CHEETAH_LOG_ERROR
1381
1382 rdpr %pil, %g2
1383 wrpr %g0, 15, %pil
1384 ba,pt %xcc, etrap_irq
1385 rd %pc, %g7
1386 mov %l4, %o1
1387 mov %l5, %o2
1388 call cheetah_fecc_handler
1389 add %sp, PTREGS_OFF, %o0
1390 ba,a,pt %xcc, rtrap_irq
1391
1392 /* Our caller has disabled I-cache and performed membar Sync. */
1393 .globl cheetah_cee
1394cheetah_cee:
1395 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
1396 andn %g2, ESTATE_ERROR_CEEN, %g2
1397 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
1398 membar #Sync
1399
1400 /* Fetch and clear AFSR/AFAR */
1401 ldxa [%g0] ASI_AFSR, %g4
1402 ldxa [%g0] ASI_AFAR, %g5
1403 stxa %g4, [%g0] ASI_AFSR
1404 membar #Sync
1405
1406 CHEETAH_LOG_ERROR
1407
1408 rdpr %pil, %g2
1409 wrpr %g0, 15, %pil
1410 ba,pt %xcc, etrap_irq
1411 rd %pc, %g7
1412 mov %l4, %o1
1413 mov %l5, %o2
1414 call cheetah_cee_handler
1415 add %sp, PTREGS_OFF, %o0
1416 ba,a,pt %xcc, rtrap_irq
1417
1418 /* Our caller has disabled I-cache+D-cache and performed membar Sync. */
1419 .globl cheetah_deferred_trap
1420cheetah_deferred_trap:
1421 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
1422 andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
1423 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
1424 membar #Sync
1425
1426 /* Fetch and clear AFSR/AFAR */
1427 ldxa [%g0] ASI_AFSR, %g4
1428 ldxa [%g0] ASI_AFAR, %g5
1429 stxa %g4, [%g0] ASI_AFSR
1430 membar #Sync
1431
1432 CHEETAH_LOG_ERROR
1433
1434 rdpr %pil, %g2
1435 wrpr %g0, 15, %pil
1436 ba,pt %xcc, etrap_irq
1437 rd %pc, %g7
1438 mov %l4, %o1
1439 mov %l5, %o2
1440 call cheetah_deferred_handler
1441 add %sp, PTREGS_OFF, %o0
1442 ba,a,pt %xcc, rtrap_irq
1443
1444 .globl __do_privact
1445__do_privact:
1446 mov TLB_SFSR, %g3
1447 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
1448 membar #Sync
1449 sethi %hi(109f), %g7
1450 ba,pt %xcc, etrap
1451109: or %g7, %lo(109b), %g7
1452 call do_privact
1453 add %sp, PTREGS_OFF, %o0
1454 ba,pt %xcc, rtrap
1455 clr %l6
1456
1457 .globl do_mna
1458do_mna:
1459 rdpr %tl, %g3
1460 cmp %g3, 1
1461
1462 /* Setup %g4/%g5 now as they are used in the
1463 * winfixup code.
1464 */
1465 mov TLB_SFSR, %g3
1466 mov DMMU_SFAR, %g4
1467 ldxa [%g4] ASI_DMMU, %g4
1468 ldxa [%g3] ASI_DMMU, %g5
1469 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
1470 membar #Sync
1471 bgu,pn %icc, winfix_mna
1472 rdpr %tpc, %g3
1473
14741: sethi %hi(109f), %g7
1475 ba,pt %xcc, etrap
1476109: or %g7, %lo(109b), %g7
1477 mov %l4, %o1
1478 mov %l5, %o2
1479 call mem_address_unaligned
1480 add %sp, PTREGS_OFF, %o0
1481 ba,pt %xcc, rtrap
1482 clr %l6
1483
1484 .globl do_lddfmna
1485do_lddfmna:
1486 sethi %hi(109f), %g7
1487 mov TLB_SFSR, %g4
1488 ldxa [%g4] ASI_DMMU, %g5
1489 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
1490 membar #Sync
1491 mov DMMU_SFAR, %g4
1492 ldxa [%g4] ASI_DMMU, %g4
1493 ba,pt %xcc, etrap
1494109: or %g7, %lo(109b), %g7
1495 mov %l4, %o1
1496 mov %l5, %o2
1497 call handle_lddfmna
1498 add %sp, PTREGS_OFF, %o0
1499 ba,pt %xcc, rtrap
1500 clr %l6
1501
1502 .globl do_stdfmna
1503do_stdfmna:
1504 sethi %hi(109f), %g7
1505 mov TLB_SFSR, %g4
1506 ldxa [%g4] ASI_DMMU, %g5
1507 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
1508 membar #Sync
1509 mov DMMU_SFAR, %g4
1510 ldxa [%g4] ASI_DMMU, %g4
1511 ba,pt %xcc, etrap
1512109: or %g7, %lo(109b), %g7
1513 mov %l4, %o1
1514 mov %l5, %o2
1515 call handle_stdfmna
1516 add %sp, PTREGS_OFF, %o0
1517 ba,pt %xcc, rtrap
1518 clr %l6
1519
1520 .globl breakpoint_trap
1521breakpoint_trap:
1522 call sparc_breakpoint
1523 add %sp, PTREGS_OFF, %o0
1524 ba,pt %xcc, rtrap
1525 nop
1526
1527#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
1528 defined(CONFIG_SOLARIS_EMUL_MODULE)
1529 /* SunOS uses syscall zero as the 'indirect syscall' it looks
1530 * like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
1531 * This is complete brain damage.
1532 */
1533 .globl sunos_indir
1534sunos_indir:
1535 srl %o0, 0, %o0
1536 mov %o7, %l4
1537 cmp %o0, NR_SYSCALLS
1538 blu,a,pt %icc, 1f
1539 sll %o0, 0x2, %o0
1540 sethi %hi(sunos_nosys), %l6
1541 b,pt %xcc, 2f
1542 or %l6, %lo(sunos_nosys), %l6
15431: sethi %hi(sunos_sys_table), %l7
1544 or %l7, %lo(sunos_sys_table), %l7
1545 lduw [%l7 + %o0], %l6
15462: mov %o1, %o0
1547 mov %o2, %o1
1548 mov %o3, %o2
1549 mov %o4, %o3
1550 mov %o5, %o4
1551 call %l6
1552 mov %l4, %o7
1553
1554 .globl sunos_getpid
1555sunos_getpid:
1556 call sys_getppid
1557 nop
1558 call sys_getpid
1559 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
1560 b,pt %xcc, ret_sys_call
1561 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1562
1563 /* SunOS getuid() returns uid in %o0 and euid in %o1 */
1564 .globl sunos_getuid
1565sunos_getuid:
1566 call sys32_geteuid16
1567 nop
1568 call sys32_getuid16
1569 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
1570 b,pt %xcc, ret_sys_call
1571 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1572
1573 /* SunOS getgid() returns gid in %o0 and egid in %o1 */
1574 .globl sunos_getgid
1575sunos_getgid:
1576 call sys32_getegid16
1577 nop
1578 call sys32_getgid16
1579 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
1580 b,pt %xcc, ret_sys_call
1581 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1582#endif
1583
1584 /* SunOS's execv() call only specifies the argv argument, the
1585 * environment settings are the same as the calling processes.
1586 */
1587 .globl sunos_execv
1588sys_execve:
1589 sethi %hi(sparc_execve), %g1
1590 ba,pt %xcc, execve_merge
1591 or %g1, %lo(sparc_execve), %g1
1592#ifdef CONFIG_COMPAT
1593 .globl sys_execve
1594sunos_execv:
1595 stx %g0, [%sp + PTREGS_OFF + PT_V9_I2]
1596 .globl sys32_execve
1597sys32_execve:
1598 sethi %hi(sparc32_execve), %g1
1599 or %g1, %lo(sparc32_execve), %g1
1600#endif
1601execve_merge:
1602 flushw
1603 jmpl %g1, %g0
1604 add %sp, PTREGS_OFF, %o0
1605
1606 .globl sys_pipe, sys_sigpause, sys_nis_syscall
1607 .globl sys_sigsuspend, sys_rt_sigsuspend
1608 .globl sys_rt_sigreturn
1609 .globl sys_ptrace
1610 .globl sys_sigaltstack
1611 .align 32
1612sys_pipe: ba,pt %xcc, sparc_pipe
1613 add %sp, PTREGS_OFF, %o0
1614sys_nis_syscall:ba,pt %xcc, c_sys_nis_syscall
1615 add %sp, PTREGS_OFF, %o0
1616sys_memory_ordering:
1617 ba,pt %xcc, sparc_memory_ordering
1618 add %sp, PTREGS_OFF, %o1
1619sys_sigaltstack:ba,pt %xcc, do_sigaltstack
1620 add %i6, STACK_BIAS, %o2
1621#ifdef CONFIG_COMPAT
1622 .globl sys32_sigstack
1623sys32_sigstack: ba,pt %xcc, do_sys32_sigstack
1624 mov %i6, %o2
1625 .globl sys32_sigaltstack
1626sys32_sigaltstack:
1627 ba,pt %xcc, do_sys32_sigaltstack
1628 mov %i6, %o2
1629#endif
1630 .align 32
1631sys_sigsuspend: add %sp, PTREGS_OFF, %o0
1632 call do_sigsuspend
1633 add %o7, 1f-.-4, %o7
1634 nop
1635sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
1636 add %sp, PTREGS_OFF, %o2
1637 call do_rt_sigsuspend
1638 add %o7, 1f-.-4, %o7
1639 nop
1640#ifdef CONFIG_COMPAT
1641 .globl sys32_rt_sigsuspend
1642sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
1643 srl %o0, 0, %o0
1644 add %sp, PTREGS_OFF, %o2
1645 call do_rt_sigsuspend32
1646 add %o7, 1f-.-4, %o7
1647#endif
1648 /* NOTE: %o0 has a correct value already */
1649sys_sigpause: add %sp, PTREGS_OFF, %o1
1650 call do_sigpause
1651 add %o7, 1f-.-4, %o7
1652 nop
1653#ifdef CONFIG_COMPAT
1654 .globl sys32_sigreturn
1655sys32_sigreturn:
1656 add %sp, PTREGS_OFF, %o0
1657 call do_sigreturn32
1658 add %o7, 1f-.-4, %o7
1659 nop
1660#endif
1661sys_rt_sigreturn:
1662 add %sp, PTREGS_OFF, %o0
1663 call do_rt_sigreturn
1664 add %o7, 1f-.-4, %o7
1665 nop
1666#ifdef CONFIG_COMPAT
1667 .globl sys32_rt_sigreturn
1668sys32_rt_sigreturn:
1669 add %sp, PTREGS_OFF, %o0
1670 call do_rt_sigreturn32
1671 add %o7, 1f-.-4, %o7
1672 nop
1673#endif
1674sys_ptrace: add %sp, PTREGS_OFF, %o0
1675 call do_ptrace
1676 add %o7, 1f-.-4, %o7
1677 nop
1678 .align 32
16791: ldx [%curptr + TI_FLAGS], %l5
1680 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1681 be,pt %icc, rtrap
1682 clr %l6
1683 call syscall_trace
1684 nop
1685
1686 ba,pt %xcc, rtrap
1687 clr %l6
1688
1689 /* This is how fork() was meant to be done, 8 instruction entry.
1690 *
1691 * I questioned the following code briefly, let me clear things
1692 * up so you must not reason on it like I did.
1693 *
1694 * Know the fork_kpsr etc. we use in the sparc32 port? We don't
1695 * need it here because the only piece of window state we copy to
1696 * the child is the CWP register. Even if the parent sleeps,
1697 * we are safe because we stuck it into pt_regs of the parent
1698 * so it will not change.
1699 *
1700 * XXX This raises the question, whether we can do the same on
1701 * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The
1702 * XXX answer is yes. We stick fork_kpsr in UREG_G0 and
1703 * XXX fork_kwim in UREG_G1 (global registers are considered
1704 * XXX volatile across a system call in the sparc ABI I think
1705 * XXX if it isn't we can use regs->y instead, anyone who depends
1706 * XXX upon the Y register being preserved across a fork deserves
1707 * XXX to lose).
1708 *
1709 * In fact we should take advantage of that fact for other things
1710 * during system calls...
1711 */
1712 .globl sys_fork, sys_vfork, sys_clone, sparc_exit
1713 .globl ret_from_syscall
1714 .align 32
1715sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */
1716 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
1717 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1718 ba,pt %xcc, sys_clone
1719sys_fork: clr %o1
1720 mov SIGCHLD, %o0
1721sys_clone: flushw
1722 movrz %o1, %fp, %o1
1723 mov 0, %o3
1724 ba,pt %xcc, sparc_do_fork
1725 add %sp, PTREGS_OFF, %o2
1726ret_from_syscall:
1727 /* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
1728 * %o7 for us. Check performance counter stuff too.
1729 */
1730 andn %o7, _TIF_NEWCHILD, %l0
1731 stx %l0, [%g6 + TI_FLAGS]
1732 call schedule_tail
1733 mov %g7, %o0
1734 andcc %l0, _TIF_PERFCTR, %g0
1735 be,pt %icc, 1f
1736 nop
1737 ldx [%g6 + TI_PCR], %o7
1738 wr %g0, %o7, %pcr
1739
1740 /* Blackbird errata workaround. See commentary in
1741 * smp.c:smp_percpu_timer_interrupt() for more
1742 * information.
1743 */
1744 ba,pt %xcc, 99f
1745 nop
1746 .align 64
174799: wr %g0, %g0, %pic
1748 rd %pic, %g0
1749
17501: b,pt %xcc, ret_sys_call
1751 ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0
1752sparc_exit: wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate
1753 rdpr %otherwin, %g1
1754 rdpr %cansave, %g3
1755 add %g3, %g1, %g3
1756 wrpr %g3, 0x0, %cansave
1757 wrpr %g0, 0x0, %otherwin
1758 wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate
1759 ba,pt %xcc, sys_exit
1760 stb %g0, [%g6 + TI_WSAVED]
1761
1762linux_sparc_ni_syscall:
1763 sethi %hi(sys_ni_syscall), %l7
1764 b,pt %xcc, 4f
1765 or %l7, %lo(sys_ni_syscall), %l7
1766
1767linux_syscall_trace32:
1768 call syscall_trace
1769 nop
1770 srl %i0, 0, %o0
1771 mov %i4, %o4
1772 srl %i1, 0, %o1
1773 srl %i2, 0, %o2
1774 b,pt %xcc, 2f
1775 srl %i3, 0, %o3
1776
1777linux_syscall_trace:
1778 call syscall_trace
1779 nop
1780 mov %i0, %o0
1781 mov %i1, %o1
1782 mov %i2, %o2
1783 mov %i3, %o3
1784 b,pt %xcc, 2f
1785 mov %i4, %o4
1786
1787
1788 /* Linux 32-bit and SunOS system calls enter here... */
1789 .align 32
1790 .globl linux_sparc_syscall32
1791linux_sparc_syscall32:
1792 /* Direct access to user regs, much faster. */
1793 cmp %g1, NR_SYSCALLS ! IEU1 Group
1794 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
1795 srl %i0, 0, %o0 ! IEU0
1796 sll %g1, 2, %l4 ! IEU0 Group
1797#ifdef SYSCALL_TRACING
1798 call syscall_trace_entry
1799 add %sp, PTREGS_OFF, %o0
1800 srl %i0, 0, %o0
1801#endif
1802 srl %i4, 0, %o4 ! IEU1
1803 lduw [%l7 + %l4], %l7 ! Load
1804 srl %i1, 0, %o1 ! IEU0 Group
1805 ldx [%curptr + TI_FLAGS], %l0 ! Load
1806
1807 srl %i5, 0, %o5 ! IEU1
1808 srl %i2, 0, %o2 ! IEU0 Group
1809 andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU0 Group
1810 bne,pn %icc, linux_syscall_trace32 ! CTI
1811 mov %i0, %l5 ! IEU1
1812 call %l7 ! CTI Group brk forced
1813 srl %i3, 0, %o3 ! IEU0
1814 ba,a,pt %xcc, 3f
1815
1816 /* Linux native and SunOS system calls enter here... */
1817 .align 32
1818 .globl linux_sparc_syscall, ret_sys_call
1819linux_sparc_syscall:
1820 /* Direct access to user regs, much faster. */
1821 cmp %g1, NR_SYSCALLS ! IEU1 Group
1822 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
1823 mov %i0, %o0 ! IEU0
1824 sll %g1, 2, %l4 ! IEU0 Group
1825#ifdef SYSCALL_TRACING
1826 call syscall_trace_entry
1827 add %sp, PTREGS_OFF, %o0
1828 mov %i0, %o0
1829#endif
1830 mov %i1, %o1 ! IEU1
1831 lduw [%l7 + %l4], %l7 ! Load
18324: mov %i2, %o2 ! IEU0 Group
1833 ldx [%curptr + TI_FLAGS], %l0 ! Load
1834
1835 mov %i3, %o3 ! IEU1
1836 mov %i4, %o4 ! IEU0 Group
1837 andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU1 Group+1 bubble
1838 bne,pn %icc, linux_syscall_trace ! CTI Group
1839 mov %i0, %l5 ! IEU0
18402: call %l7 ! CTI Group brk forced
1841 mov %i5, %o5 ! IEU0
1842 nop
1843
18443: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1845ret_sys_call:
1846#ifdef SYSCALL_TRACING
1847 mov %o0, %o1
1848 call syscall_trace_exit
1849 add %sp, PTREGS_OFF, %o0
1850 mov %o1, %o0
1851#endif
1852 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
1853 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
1854 sra %o0, 0, %o0
1855 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
1856 sllx %g2, 32, %g2
1857
1858 /* Check if force_successful_syscall_return()
1859 * was invoked.
1860 */
1861 ldx [%curptr + TI_FLAGS], %l0
1862 andcc %l0, _TIF_SYSCALL_SUCCESS, %g0
1863 be,pt %icc, 1f
1864 andn %l0, _TIF_SYSCALL_SUCCESS, %l0
1865 ba,pt %xcc, 80f
1866 stx %l0, [%curptr + TI_FLAGS]
1867
18681:
1869 cmp %o0, -ERESTART_RESTARTBLOCK
1870 bgeu,pn %xcc, 1f
1871 andcc %l0, _TIF_SYSCALL_TRACE, %l6
187280:
1873 /* System call success, clear Carry condition code. */
1874 andn %g3, %g2, %g3
1875 stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
1876 bne,pn %icc, linux_syscall_trace2
1877 add %l1, 0x4, %l2 ! npc = npc+4
1878 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1879 ba,pt %xcc, rtrap_clr_l6
1880 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1881
18821:
1883 /* System call failure, set Carry condition code.
1884 * Also, get abs(errno) to return to the process.
1885 */
1886 andcc %l0, _TIF_SYSCALL_TRACE, %l6
1887 sub %g0, %o0, %o0
1888 or %g3, %g2, %g3
1889 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1890 mov 1, %l6
1891 stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
1892 bne,pn %icc, linux_syscall_trace2
1893 add %l1, 0x4, %l2 ! npc = npc+4
1894 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1895
1896 b,pt %xcc, rtrap
1897 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1898linux_syscall_trace2:
1899 call syscall_trace
1900 nop
1901 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1902 ba,pt %xcc, rtrap
1903 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1904
1905 .align 32
1906 .globl __flushw_user
1907__flushw_user:
1908 rdpr %otherwin, %g1
1909 brz,pn %g1, 2f
1910 clr %g2
19111: save %sp, -128, %sp
1912 rdpr %otherwin, %g1
1913 brnz,pt %g1, 1b
1914 add %g2, 1, %g2
19151: sub %g2, 1, %g2
1916 brnz,pt %g2, 1b
1917 restore %g0, %g0, %g0
19182: retl
1919 nop
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
new file mode 100644
index 000000000000..50d2af1d98ae
--- /dev/null
+++ b/arch/sparc64/kernel/etrap.S
@@ -0,0 +1,301 @@
1/* $Id: etrap.S,v 1.46 2002/02/09 19:49:30 davem Exp $
2 * etrap.S: Preparing for entry into the kernel on Sparc V9.
3 *
4 * Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
8#include <linux/config.h>
9
10#include <asm/asi.h>
11#include <asm/pstate.h>
12#include <asm/ptrace.h>
13#include <asm/page.h>
14#include <asm/spitfire.h>
15#include <asm/head.h>
16#include <asm/processor.h>
17#include <asm/mmu.h>
18
19#define TASK_REGOFF (THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ)
20#define ETRAP_PSTATE1 (PSTATE_RMO | PSTATE_PRIV)
21#define ETRAP_PSTATE2 \
22 (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE)
23
24/*
25 * On entry, %g7 is return address - 0x4.
26 * %g4 and %g5 will be preserved %l4 and %l5 respectively.
27 */
28
29 .text
30 .align 64
31 .globl etrap, etrap_irq, etraptl1
32etrap: rdpr %pil, %g2
33etrap_irq:
34 rdpr %tstate, %g1
35 sllx %g2, 20, %g3
36 andcc %g1, TSTATE_PRIV, %g0
37 or %g1, %g3, %g1
38 bne,pn %xcc, 1f
39 sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2
40 wrpr %g0, 7, %cleanwin
41
42 sethi %hi(TASK_REGOFF), %g2
43 sethi %hi(TSTATE_PEF), %g3
44 or %g2, %lo(TASK_REGOFF), %g2
45 and %g1, %g3, %g3
46 brnz,pn %g3, 1f
47 add %g6, %g2, %g2
48 wr %g0, 0, %fprs
491: rdpr %tpc, %g3
50
51 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
52 rdpr %tnpc, %g1
53 stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
54 rd %y, %g3
55 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
56 st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y]
57 save %g2, -STACK_BIAS, %sp ! Ordering here is critical
58 mov %g6, %l6
59
60 bne,pn %xcc, 3f
61 mov PRIMARY_CONTEXT, %l4
62 rdpr %canrestore, %g3
63 rdpr %wstate, %g2
64 wrpr %g0, 0, %canrestore
65 sll %g2, 3, %g2
66 mov 1, %l5
67 stb %l5, [%l6 + TI_FPDEPTH]
68
69 wrpr %g3, 0, %otherwin
70 wrpr %g2, 0, %wstate
71cplus_etrap_insn_1:
72 sethi %hi(0), %g3
73 sllx %g3, 32, %g3
74cplus_etrap_insn_2:
75 sethi %hi(0), %g2
76 or %g3, %g2, %g3
77 stxa %g3, [%l4] ASI_DMMU
78 flush %l6
79 wr %g0, ASI_AIUS, %asi
802: wrpr %g0, 0x0, %tl
81 mov %g4, %l4
82 mov %g5, %l5
83
84 mov %g7, %l2
85 wrpr %g0, ETRAP_PSTATE1, %pstate
86 stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
87 stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
88 stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
89 stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
90 stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
91 stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
92
93 stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
94 stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
95 stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
96 stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
97 stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
98 stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
99 stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
100
101 stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
102 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
103 wrpr %g0, ETRAP_PSTATE2, %pstate
104 mov %l6, %g6
105#ifdef CONFIG_SMP
106 mov TSB_REG, %g3
107 ldxa [%g3] ASI_IMMU, %g5
108#endif
109 jmpl %l2 + 0x4, %g0
110 ldx [%g6 + TI_TASK], %g4
111
1123: ldub [%l6 + TI_FPDEPTH], %l5
113 add %l6, TI_FPSAVED + 1, %l4
114 srl %l5, 1, %l3
115 add %l5, 2, %l5
116 stb %l5, [%l6 + TI_FPDEPTH]
117 ba,pt %xcc, 2b
118 stb %g0, [%l4 + %l3]
119 nop
120
121etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
122 * We place this right after pt_regs on the trap stack.
123 * The layout is:
124 * 0x00 TL1's TSTATE
125 * 0x08 TL1's TPC
126 * 0x10 TL1's TNPC
127 * 0x18 TL1's TT
128 * ...
129 * 0x58 TL4's TT
130 * 0x60 TL
131 */
132 sub %sp, ((4 * 8) * 4) + 8, %g2
133 rdpr %tl, %g1
134
135 wrpr %g0, 1, %tl
136 rdpr %tstate, %g3
137 stx %g3, [%g2 + STACK_BIAS + 0x00]
138 rdpr %tpc, %g3
139 stx %g3, [%g2 + STACK_BIAS + 0x08]
140 rdpr %tnpc, %g3
141 stx %g3, [%g2 + STACK_BIAS + 0x10]
142 rdpr %tt, %g3
143 stx %g3, [%g2 + STACK_BIAS + 0x18]
144
145 wrpr %g0, 2, %tl
146 rdpr %tstate, %g3
147 stx %g3, [%g2 + STACK_BIAS + 0x20]
148 rdpr %tpc, %g3
149 stx %g3, [%g2 + STACK_BIAS + 0x28]
150 rdpr %tnpc, %g3
151 stx %g3, [%g2 + STACK_BIAS + 0x30]
152 rdpr %tt, %g3
153 stx %g3, [%g2 + STACK_BIAS + 0x38]
154
155 wrpr %g0, 3, %tl
156 rdpr %tstate, %g3
157 stx %g3, [%g2 + STACK_BIAS + 0x40]
158 rdpr %tpc, %g3
159 stx %g3, [%g2 + STACK_BIAS + 0x48]
160 rdpr %tnpc, %g3
161 stx %g3, [%g2 + STACK_BIAS + 0x50]
162 rdpr %tt, %g3
163 stx %g3, [%g2 + STACK_BIAS + 0x58]
164
165 wrpr %g0, 4, %tl
166 rdpr %tstate, %g3
167 stx %g3, [%g2 + STACK_BIAS + 0x60]
168 rdpr %tpc, %g3
169 stx %g3, [%g2 + STACK_BIAS + 0x68]
170 rdpr %tnpc, %g3
171 stx %g3, [%g2 + STACK_BIAS + 0x70]
172 rdpr %tt, %g3
173 stx %g3, [%g2 + STACK_BIAS + 0x78]
174
175 wrpr %g1, %tl
176 stx %g1, [%g2 + STACK_BIAS + 0x80]
177
178 rdpr %tstate, %g1
179 sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2
180 ba,pt %xcc, 1b
181 andcc %g1, TSTATE_PRIV, %g0
182
183 .align 64
184 .globl scetrap
185scetrap: rdpr %pil, %g2
186 rdpr %tstate, %g1
187 sllx %g2, 20, %g3
188 andcc %g1, TSTATE_PRIV, %g0
189 or %g1, %g3, %g1
190 bne,pn %xcc, 1f
191 sub %sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2
192 wrpr %g0, 7, %cleanwin
193
194 sllx %g1, 51, %g3
195 sethi %hi(TASK_REGOFF), %g2
196 or %g2, %lo(TASK_REGOFF), %g2
197 brlz,pn %g3, 1f
198 add %g6, %g2, %g2
199 wr %g0, 0, %fprs
2001: rdpr %tpc, %g3
201 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
202
203 rdpr %tnpc, %g1
204 stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
205 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
206 save %g2, -STACK_BIAS, %sp ! Ordering here is critical
207 mov %g6, %l6
208 bne,pn %xcc, 2f
209 mov ASI_P, %l7
210 rdpr %canrestore, %g3
211
212 rdpr %wstate, %g2
213 wrpr %g0, 0, %canrestore
214 sll %g2, 3, %g2
215 mov PRIMARY_CONTEXT, %l4
216 wrpr %g3, 0, %otherwin
217 wrpr %g2, 0, %wstate
218cplus_etrap_insn_3:
219 sethi %hi(0), %g3
220 sllx %g3, 32, %g3
221cplus_etrap_insn_4:
222 sethi %hi(0), %g2
223 or %g3, %g2, %g3
224 stxa %g3, [%l4] ASI_DMMU
225 flush %l6
226
227 mov ASI_AIUS, %l7
2282: mov %g4, %l4
229 mov %g5, %l5
230 add %g7, 0x4, %l2
231 wrpr %g0, ETRAP_PSTATE1, %pstate
232 stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
233 stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
234 sllx %l7, 24, %l7
235
236 stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
237 rdpr %cwp, %l0
238 stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
239 stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
240 stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
241 stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
242 or %l7, %l0, %l7
243 sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0
244
245 or %l7, %l0, %l7
246 wrpr %l2, %tnpc
247 wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate
248 stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
249 stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
250 stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
251 stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
252 stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
253
254 stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
255 stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
256 mov %l6, %g6
257 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
258#ifdef CONFIG_SMP
259 mov TSB_REG, %g3
260 ldxa [%g3] ASI_IMMU, %g5
261#endif
262 ldx [%g6 + TI_TASK], %g4
263 done
264
265#undef TASK_REGOFF
266#undef ETRAP_PSTATE1
267
268cplus_einsn_1:
269 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
270cplus_einsn_2:
271 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
272
273 .globl cheetah_plus_patch_etrap
274cheetah_plus_patch_etrap:
275 /* We configure the dTLB512_0 for 4MB pages and the
276 * dTLB512_1 for 8K pages when in context zero.
277 */
278 sethi %hi(cplus_einsn_1), %o0
279 sethi %hi(cplus_etrap_insn_1), %o2
280 lduw [%o0 + %lo(cplus_einsn_1)], %o1
281 or %o2, %lo(cplus_etrap_insn_1), %o2
282 stw %o1, [%o2]
283 flush %o2
284 sethi %hi(cplus_etrap_insn_3), %o2
285 or %o2, %lo(cplus_etrap_insn_3), %o2
286 stw %o1, [%o2]
287 flush %o2
288
289 sethi %hi(cplus_einsn_2), %o0
290 sethi %hi(cplus_etrap_insn_2), %o2
291 lduw [%o0 + %lo(cplus_einsn_2)], %o1
292 or %o2, %lo(cplus_etrap_insn_2), %o2
293 stw %o1, [%o2]
294 flush %o2
295 sethi %hi(cplus_etrap_insn_4), %o2
296 or %o2, %lo(cplus_etrap_insn_4), %o2
297 stw %o1, [%o2]
298 flush %o2
299
300 retl
301 nop
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
new file mode 100644
index 000000000000..8104a56ca2d8
--- /dev/null
+++ b/arch/sparc64/kernel/head.S
@@ -0,0 +1,782 @@
1/* $Id: head.S,v 1.87 2002/02/09 19:49:31 davem Exp $
2 * head.S: Initial boot code for the Sparc64 port of Linux.
3 *
4 * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 */
9
10#include <linux/config.h>
11#include <linux/version.h>
12#include <linux/errno.h>
13#include <asm/thread_info.h>
14#include <asm/asi.h>
15#include <asm/pstate.h>
16#include <asm/ptrace.h>
17#include <asm/spitfire.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/errno.h>
21#include <asm/signal.h>
22#include <asm/processor.h>
23#include <asm/lsu.h>
24#include <asm/dcr.h>
25#include <asm/dcu.h>
26#include <asm/head.h>
27#include <asm/ttable.h>
28#include <asm/mmu.h>
29
30/* This section from from _start to sparc64_boot_end should fit into
31 * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
32 * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
33 * 0x0000.0000.0040.6000 and empty_bad_page, which is from
34 * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
35 */
36
37 .text
38 .globl start, _start, stext, _stext
39_start:
40start:
41_stext:
42stext:
43bootup_user_stack:
44! 0x0000000000404000
45 b sparc64_boot
46 flushw /* Flush register file. */
47
48/* This stuff has to be in sync with SILO and other potential boot loaders
49 * Fields should be kept upward compatible and whenever any change is made,
50 * HdrS version should be incremented.
51 */
52 .global root_flags, ram_flags, root_dev
53 .global sparc_ramdisk_image, sparc_ramdisk_size
54 .global sparc_ramdisk_image64
55
56 .ascii "HdrS"
57 .word LINUX_VERSION_CODE
58
59 /* History:
60 *
61 * 0x0300 : Supports being located at other than 0x4000
62 * 0x0202 : Supports kernel params string
63 * 0x0201 : Supports reboot_command
64 */
65 .half 0x0301 /* HdrS version */
66
67root_flags:
68 .half 1
69root_dev:
70 .half 0
71ram_flags:
72 .half 0
73sparc_ramdisk_image:
74 .word 0
75sparc_ramdisk_size:
76 .word 0
77 .xword reboot_command
78 .xword bootstr_info
79sparc_ramdisk_image64:
80 .xword 0
81 .word _end
82
83 /* We must be careful, 32-bit OpenBOOT will get confused if it
84 * tries to save away a register window to a 64-bit kernel
85 * stack address. Flush all windows, disable interrupts,
86 * remap if necessary, jump onto kernel trap table, then kernel
87 * stack, or else we die.
88 *
89 * PROM entry point is on %o4
90 */
91sparc64_boot:
92 BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
93 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
94 ba,pt %xcc, spitfire_boot
95 nop
96
97cheetah_plus_boot:
98 /* Preserve OBP chosen DCU and DCR register settings. */
99 ba,pt %xcc, cheetah_generic_boot
100 nop
101
102cheetah_boot:
103 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
104 wr %g1, %asr18
105
106 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
107 or %g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
108 sllx %g7, 32, %g7
109 or %g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7
110 stxa %g7, [%g0] ASI_DCU_CONTROL_REG
111 membar #Sync
112
113cheetah_generic_boot:
114 mov TSB_EXTENSION_P, %g3
115 stxa %g0, [%g3] ASI_DMMU
116 stxa %g0, [%g3] ASI_IMMU
117 membar #Sync
118
119 mov TSB_EXTENSION_S, %g3
120 stxa %g0, [%g3] ASI_DMMU
121 membar #Sync
122
123 mov TSB_EXTENSION_N, %g3
124 stxa %g0, [%g3] ASI_DMMU
125 stxa %g0, [%g3] ASI_IMMU
126 membar #Sync
127
128 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
129 wr %g0, 0, %fprs
130
131 /* Just like for Spitfire, we probe itlb-2 for a mapping which
132 * matches our current %pc. We take the physical address in
133 * that mapping and use it to make our own.
134 */
135
136 /* %g5 holds the tlb data */
137 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
138 sllx %g5, 32, %g5
139 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
140
141 /* Put PADDR tlb data mask into %g3. */
142 sethi %uhi(_PAGE_PADDR), %g3
143 or %g3, %ulo(_PAGE_PADDR), %g3
144 sllx %g3, 32, %g3
145 sethi %hi(_PAGE_PADDR), %g7
146 or %g7, %lo(_PAGE_PADDR), %g7
147 or %g3, %g7, %g3
148
149 set 2 << 16, %l0 /* TLB entry walker. */
150 set 0x1fff, %l2 /* Page mask. */
151 rd %pc, %l3
152 andn %l3, %l2, %g2 /* vaddr comparator */
153
1541: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
155 membar #Sync
156 andn %g1, %l2, %g1
157 cmp %g1, %g2
158 be,pn %xcc, cheetah_got_tlbentry
159 nop
160 and %l0, (127 << 3), %g1
161 cmp %g1, (127 << 3)
162 blu,pt %xcc, 1b
163 add %l0, (1 << 3), %l0
164
165 /* Search the small TLB. OBP never maps us like that but
166 * newer SILO can.
167 */
168 clr %l0
169
1701: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
171 membar #Sync
172 andn %g1, %l2, %g1
173 cmp %g1, %g2
174 be,pn %xcc, cheetah_got_tlbentry
175 nop
176 cmp %l0, (15 << 3)
177 blu,pt %xcc, 1b
178 add %l0, (1 << 3), %l0
179
180 /* BUG() if we get here... */
181 ta 0x5
182
183cheetah_got_tlbentry:
184 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0
185 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
186 membar #Sync
187 and %g1, %g3, %g1
188 set 0x5fff, %l0
189 andn %g1, %l0, %g1
190 or %g5, %g1, %g5
191
192 /* Clear out any KERNBASE area entries. */
193 set 2 << 16, %l0
194 sethi %hi(KERNBASE), %g3
195 sethi %hi(KERNBASE<<1), %g7
196 mov TLB_TAG_ACCESS, %l7
197
198 /* First, check ITLB */
1991: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
200 membar #Sync
201 andn %g1, %l2, %g1
202 cmp %g1, %g3
203 blu,pn %xcc, 2f
204 cmp %g1, %g7
205 bgeu,pn %xcc, 2f
206 nop
207 stxa %g0, [%l7] ASI_IMMU
208 membar #Sync
209 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
210 membar #Sync
211
2122: and %l0, (127 << 3), %g1
213 cmp %g1, (127 << 3)
214 blu,pt %xcc, 1b
215 add %l0, (1 << 3), %l0
216
217 /* Next, check DTLB */
218 set 2 << 16, %l0
2191: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
220 membar #Sync
221 andn %g1, %l2, %g1
222 cmp %g1, %g3
223 blu,pn %xcc, 2f
224 cmp %g1, %g7
225 bgeu,pn %xcc, 2f
226 nop
227 stxa %g0, [%l7] ASI_DMMU
228 membar #Sync
229 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
230 membar #Sync
231
2322: and %l0, (511 << 3), %g1
233 cmp %g1, (511 << 3)
234 blu,pt %xcc, 1b
235 add %l0, (1 << 3), %l0
236
237 /* On Cheetah+, have to check second DTLB. */
238 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
239 ba,pt %xcc, 9f
240 nop
241
2422: set 3 << 16, %l0
2431: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
244 membar #Sync
245 andn %g1, %l2, %g1
246 cmp %g1, %g3
247 blu,pn %xcc, 2f
248 cmp %g1, %g7
249 bgeu,pn %xcc, 2f
250 nop
251 stxa %g0, [%l7] ASI_DMMU
252 membar #Sync
253 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
254 membar #Sync
255
2562: and %l0, (511 << 3), %g1
257 cmp %g1, (511 << 3)
258 blu,pt %xcc, 1b
259 add %l0, (1 << 3), %l0
260
2619:
262
263 /* Now lock the TTE we created into ITLB-0 and DTLB-0,
264 * entry 15 (and maybe 14 too).
265 */
266 sethi %hi(KERNBASE), %g3
267 set (0 << 16) | (15 << 3), %g7
268 stxa %g3, [%l7] ASI_DMMU
269 membar #Sync
270 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
271 membar #Sync
272 stxa %g3, [%l7] ASI_IMMU
273 membar #Sync
274 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
275 membar #Sync
276 flush %g3
277 membar #Sync
278 sethi %hi(_end), %g3 /* Check for bigkernel case */
279 or %g3, %lo(_end), %g3
280 srl %g3, 23, %g3 /* Check if _end > 8M */
281 brz,pt %g3, 1f
282 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
283 sethi %hi(0x400000), %g3
284 or %g3, %lo(0x400000), %g3
285 add %g5, %g3, %g5 /* New tte data */
286 andn %g5, (_PAGE_G), %g5
287 sethi %hi(KERNBASE+0x400000), %g3
288 or %g3, %lo(KERNBASE+0x400000), %g3
289 set (0 << 16) | (14 << 3), %g7
290 stxa %g3, [%l7] ASI_DMMU
291 membar #Sync
292 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
293 membar #Sync
294 stxa %g3, [%l7] ASI_IMMU
295 membar #Sync
296 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
297 membar #Sync
298 flush %g3
299 membar #Sync
300 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
301 ba,pt %xcc, 1f
302 nop
303
3041: set sun4u_init, %g2
305 jmpl %g2 + %g0, %g0
306 nop
307
308spitfire_boot:
309 /* Typically PROM has already enabled both MMU's and both on-chip
310 * caches, but we do it here anyway just to be paranoid.
311 */
312 mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
313 stxa %g1, [%g0] ASI_LSU_CONTROL
314 membar #Sync
315
316 /*
317 * Make sure we are in privileged mode, have address masking,
318 * using the ordinary globals and have enabled floating
319 * point.
320 *
321 * Again, typically PROM has left %pil at 13 or similar, and
322 * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
323 */
324 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
325 wr %g0, 0, %fprs
326
327spitfire_create_mappings:
328 /* %g5 holds the tlb data */
329 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
330 sllx %g5, 32, %g5
331 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
332
333 /* Base of physical memory cannot reliably be assumed to be
334 * at 0x0! Figure out where it happens to be. -DaveM
335 */
336
337 /* Put PADDR tlb data mask into %g3. */
338 sethi %uhi(_PAGE_PADDR_SF), %g3
339 or %g3, %ulo(_PAGE_PADDR_SF), %g3
340 sllx %g3, 32, %g3
341 sethi %hi(_PAGE_PADDR_SF), %g7
342 or %g7, %lo(_PAGE_PADDR_SF), %g7
343 or %g3, %g7, %g3
344
345 /* Walk through entire ITLB, looking for entry which maps
346 * our %pc currently, stick PADDR from there into %g5 tlb data.
347 */
348 clr %l0 /* TLB entry walker. */
349 set 0x1fff, %l2 /* Page mask. */
350 rd %pc, %l3
351 andn %l3, %l2, %g2 /* vaddr comparator */
3521:
353 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
354 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
355 nop
356 nop
357 nop
358 andn %g1, %l2, %g1 /* Get vaddr */
359 cmp %g1, %g2
360 be,a,pn %xcc, spitfire_got_tlbentry
361 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
362 cmp %l0, (63 << 3)
363 blu,pt %xcc, 1b
364 add %l0, (1 << 3), %l0
365
366 /* BUG() if we get here... */
367 ta 0x5
368
369spitfire_got_tlbentry:
370 /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
371 nop
372 nop
373 nop
374 and %g1, %g3, %g1 /* Mask to just get paddr bits. */
375 set 0x5fff, %l3 /* Mask offset to get phys base. */
376 andn %g1, %l3, %g1
377
378 /* NOTE: We hold on to %g1 paddr base as we need it below to lock
379 * NOTE: the PROM cif code into the TLB.
380 */
381
382 or %g5, %g1, %g5 /* Or it into TAG being built. */
383
384 clr %l0 /* TLB entry walker. */
385 sethi %hi(KERNBASE), %g3 /* 4M lower limit */
386 sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */
387 mov TLB_TAG_ACCESS, %l7
3881:
389 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
390 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
391 nop
392 nop
393 nop
394 andn %g1, %l2, %g1 /* Get vaddr */
395 cmp %g1, %g3
396 blu,pn %xcc, 2f
397 cmp %g1, %g7
398 bgeu,pn %xcc, 2f
399 nop
400 stxa %g0, [%l7] ASI_IMMU
401 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
402 membar #Sync
4032:
404 cmp %l0, (63 << 3)
405 blu,pt %xcc, 1b
406 add %l0, (1 << 3), %l0
407
408 nop; nop; nop
409
410 clr %l0 /* TLB entry walker. */
4111:
412 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
413 ldxa [%l0] ASI_DTLB_TAG_READ, %g1
414 nop
415 nop
416 nop
417 andn %g1, %l2, %g1 /* Get vaddr */
418 cmp %g1, %g3
419 blu,pn %xcc, 2f
420 cmp %g1, %g7
421 bgeu,pn %xcc, 2f
422 nop
423 stxa %g0, [%l7] ASI_DMMU
424 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
425 membar #Sync
4262:
427 cmp %l0, (63 << 3)
428 blu,pt %xcc, 1b
429 add %l0, (1 << 3), %l0
430
431 nop; nop; nop
432
433
434 /* PROM never puts any TLB entries into the MMU with the lock bit
435 * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
436 */
437
438 sethi %hi(KERNBASE), %g3
439 mov (63 << 3), %g7
440 stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
441 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
442 membar #Sync
443 stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */
444 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
445 membar #Sync
446 flush %g3
447 membar #Sync
448 sethi %hi(_end), %g3 /* Check for bigkernel case */
449 or %g3, %lo(_end), %g3
450 srl %g3, 23, %g3 /* Check if _end > 8M */
451 brz,pt %g3, 2f
452 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
453 sethi %hi(0x400000), %g3
454 or %g3, %lo(0x400000), %g3
455 add %g5, %g3, %g5 /* New tte data */
456 andn %g5, (_PAGE_G), %g5
457 sethi %hi(KERNBASE+0x400000), %g3
458 or %g3, %lo(KERNBASE+0x400000), %g3
459 mov (62 << 3), %g7
460 stxa %g3, [%l7] ASI_DMMU
461 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
462 membar #Sync
463 stxa %g3, [%l7] ASI_IMMU
464 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
465 membar #Sync
466 flush %g3
467 membar #Sync
468 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
4692: ba,pt %xcc, 1f
470 nop
4711:
472 set sun4u_init, %g2
473 jmpl %g2 + %g0, %g0
474 nop
475
476sun4u_init:
477 /* Set ctx 0 */
478 mov PRIMARY_CONTEXT, %g7
479 stxa %g0, [%g7] ASI_DMMU
480 membar #Sync
481
482 mov SECONDARY_CONTEXT, %g7
483 stxa %g0, [%g7] ASI_DMMU
484 membar #Sync
485
486 /* We are now safely (we hope) in Nucleus context (0), rewrite
487 * the KERNBASE TTE's so they no longer have the global bit set.
488 * Don't forget to setup TAG_ACCESS first 8-)
489 */
490 mov TLB_TAG_ACCESS, %g2
491 stxa %g3, [%g2] ASI_IMMU
492 stxa %g3, [%g2] ASI_DMMU
493 membar #Sync
494
495 BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
496
497 ba,pt %xcc, spitfire_tlb_fixup
498 nop
499
500cheetah_tlb_fixup:
501 set (0 << 16) | (15 << 3), %g7
502 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0
503 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
504 andn %g1, (_PAGE_G), %g1
505 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
506 membar #Sync
507
508 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0
509 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
510 andn %g1, (_PAGE_G), %g1
511 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
512 membar #Sync
513
514 /* Kill instruction prefetch queues. */
515 flush %g3
516 membar #Sync
517
518 mov 2, %g2 /* Set TLB type to cheetah+. */
519 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
520
521 mov 1, %g2 /* Set TLB type to cheetah. */
522
5231: sethi %hi(tlb_type), %g1
524 stw %g2, [%g1 + %lo(tlb_type)]
525
526 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
527 ba,pt %xcc, 2f
528 nop
529
5301: /* Patch context register writes to support nucleus page
531 * size correctly.
532 */
533 call cheetah_plus_patch_etrap
534 nop
535 call cheetah_plus_patch_rtrap
536 nop
537 call cheetah_plus_patch_fpdis
538 nop
539 call cheetah_plus_patch_winfixup
540 nop
541
542
5432: /* Patch copy/page operations to cheetah optimized versions. */
544 call cheetah_patch_copyops
545 nop
546 call cheetah_patch_cachetlbops
547 nop
548
549 ba,pt %xcc, tlb_fixup_done
550 nop
551
552spitfire_tlb_fixup:
553 mov (63 << 3), %g7
554 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
555 andn %g1, (_PAGE_G), %g1
556 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
557 membar #Sync
558
559 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
560 andn %g1, (_PAGE_G), %g1
561 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
562 membar #Sync
563
564 /* Kill instruction prefetch queues. */
565 flush %g3
566 membar #Sync
567
568 /* Set TLB type to spitfire. */
569 mov 0, %g2
570 sethi %hi(tlb_type), %g1
571 stw %g2, [%g1 + %lo(tlb_type)]
572
573tlb_fixup_done:
574 sethi %hi(init_thread_union), %g6
575 or %g6, %lo(init_thread_union), %g6
576 ldx [%g6 + TI_TASK], %g4
577 mov %sp, %l6
578 mov %o4, %l7
579
580#if 0 /* We don't do it like this anymore, but for historical hack value
581 * I leave this snippet here to show how crazy we can be sometimes. 8-)
582 */
583
584 /* Setup "Linux Current Register", thanks Sun 8-) */
585 wr %g0, 0x1, %pcr
586
587 /* Blackbird errata workaround. See commentary in
588 * smp.c:smp_percpu_timer_interrupt() for more
589 * information.
590 */
591 ba,pt %xcc, 99f
592 nop
593 .align 64
59499: wr %g6, %g0, %pic
595 rd %pic, %g0
596#endif
597
598 wr %g0, ASI_P, %asi
599 mov 1, %g1
600 sllx %g1, THREAD_SHIFT, %g1
601 sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
602 add %g6, %g1, %sp
603 mov 0, %fp
604
605 /* Set per-cpu pointer initially to zero, this makes
606 * the boot-cpu use the in-kernel-image per-cpu areas
607 * before setup_per_cpu_area() is invoked.
608 */
609 clr %g5
610
611 wrpr %g0, 0, %wstate
612 wrpr %g0, 0x0, %tl
613
614 /* Clear the bss */
615 sethi %hi(__bss_start), %o0
616 or %o0, %lo(__bss_start), %o0
617 sethi %hi(_end), %o1
618 or %o1, %lo(_end), %o1
619 call __bzero
620 sub %o1, %o0, %o1
621
622 mov %l6, %o1 ! OpenPROM stack
623 call prom_init
624 mov %l7, %o0 ! OpenPROM cif handler
625
626 /* Off we go.... */
627 call start_kernel
628 nop
629 /* Not reached... */
630
631/* IMPORTANT NOTE: Whenever making changes here, check
632 * trampoline.S as well. -jj */
633 .globl setup_tba
634setup_tba: /* i0 = is_starfire */
635 save %sp, -160, %sp
636
637 rdpr %tba, %g7
638 sethi %hi(prom_tba), %o1
639 or %o1, %lo(prom_tba), %o1
640 stx %g7, [%o1]
641
642 /* Setup "Linux" globals 8-) */
643 rdpr %pstate, %o1
644 mov %g6, %o2
645 wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate
646 sethi %hi(sparc64_ttable_tl0), %g1
647 wrpr %g1, %tba
648 mov %o2, %g6
649
650 /* Set up MMU globals */
651 wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate
652
653 /* Set fixed globals used by dTLB miss handler. */
654#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
655#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
656
657 mov TSB_REG, %g1
658 stxa %g0, [%g1] ASI_DMMU
659 membar #Sync
660 stxa %g0, [%g1] ASI_IMMU
661 membar #Sync
662 mov TLB_SFSR, %g1
663 sethi %uhi(KERN_HIGHBITS), %g2
664 or %g2, %ulo(KERN_HIGHBITS), %g2
665 sllx %g2, 32, %g2
666 or %g2, KERN_LOWBITS, %g2
667
668 BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base)
669 ba,pt %xcc, spitfire_vpte_base
670 nop
671
672cheetah_vpte_base:
673 sethi %uhi(VPTE_BASE_CHEETAH), %g3
674 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
675 ba,pt %xcc, 2f
676 sllx %g3, 32, %g3
677
678spitfire_vpte_base:
679 sethi %uhi(VPTE_BASE_SPITFIRE), %g3
680 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
681 sllx %g3, 32, %g3
682
6832:
684 clr %g7
685#undef KERN_HIGHBITS
686#undef KERN_LOWBITS
687
688 /* Kill PROM timer */
689 sethi %hi(0x80000000), %o2
690 sllx %o2, 32, %o2
691 wr %o2, 0, %tick_cmpr
692
693 BRANCH_IF_ANY_CHEETAH(o2,o3,1f)
694
695 ba,pt %xcc, 2f
696 nop
697
698 /* Disable STICK_INT interrupts. */
6991:
700 sethi %hi(0x80000000), %o2
701 sllx %o2, 32, %o2
702 wr %o2, %asr25
703
704 /* Ok, we're done setting up all the state our trap mechanims needs,
705 * now get back into normal globals and let the PROM know what is up.
706 */
7072:
708 wrpr %g0, %g0, %wstate
709 wrpr %o1, PSTATE_IE, %pstate
710
711 call init_irqwork_curcpu
712 nop
713
714 call prom_set_trap_table
715 sethi %hi(sparc64_ttable_tl0), %o0
716
717 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
718 ba,pt %xcc, 2f
719 nop
720
7211: /* Start using proper page size encodings in ctx register. */
722 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
723 mov PRIMARY_CONTEXT, %g1
724 sllx %g3, 32, %g3
725 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
726 or %g3, %g2, %g3
727 stxa %g3, [%g1] ASI_DMMU
728 membar #Sync
729
7302:
731 rdpr %pstate, %o1
732 or %o1, PSTATE_IE, %o1
733 wrpr %o1, 0, %pstate
734
735 ret
736 restore
737
738/*
739 * The following skips make sure the trap table in ttable.S is aligned
740 * on a 32K boundary as required by the v9 specs for TBA register.
741 */
742sparc64_boot_end:
743 .skip 0x2000 + _start - sparc64_boot_end
744bootup_user_stack_end:
745 .skip 0x2000
746
747#ifdef CONFIG_SBUS
748/* This is just a hack to fool make depend config.h discovering
749 strategy: As the .S files below need config.h, but
750 make depend does not find it for them, we include config.h
751 in head.S */
752#endif
753
754! 0x0000000000408000
755
756#include "ttable.S"
757#include "systbls.S"
758
759 .align 1024
760 .globl swapper_pg_dir
761swapper_pg_dir:
762 .word 0
763
764#include "etrap.S"
765#include "rtrap.S"
766#include "winfixup.S"
767#include "entry.S"
768
769 /* This is just anal retentiveness on my part... */
770 .align 16384
771
772 .data
773 .align 8
774 .globl prom_tba, tlb_type
775prom_tba: .xword 0
776tlb_type: .word 0 /* Must NOT end up in BSS */
777 .section ".fixup",#alloc,#execinstr
778 .globl __ret_efault
779__ret_efault:
780 ret
781 restore %g0, -EFAULT, %o0
782
diff --git a/arch/sparc64/kernel/idprom.c b/arch/sparc64/kernel/idprom.c
new file mode 100644
index 000000000000..3b6789e09a72
--- /dev/null
+++ b/arch/sparc64/kernel/idprom.c
@@ -0,0 +1,49 @@
1/* $Id: idprom.c,v 1.3 1999/08/31 06:54:53 davem Exp $
2 * idprom.c: Routines to load the idprom into kernel addresses and
3 * interpret the data contained within.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/init.h>
11
12#include <asm/oplib.h>
13#include <asm/idprom.h>
14
15struct idprom *idprom;
16static struct idprom idprom_buffer;
17
18/* Calculate the IDPROM checksum (xor of the data bytes). */
19static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
20{
21 unsigned char cksum, i, *ptr = (unsigned char *)idprom;
22
23 for (i = cksum = 0; i <= 0x0E; i++)
24 cksum ^= *ptr++;
25
26 return cksum;
27}
28
29/* Create a local IDPROM copy and verify integrity. */
30void __init idprom_init(void)
31{
32 prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
33
34 idprom = &idprom_buffer;
35
36 if (idprom->id_format != 0x01) {
37 prom_printf("IDPROM: Warning, unknown format type!\n");
38 }
39
40 if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
41 prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n",
42 idprom->id_cksum, calc_idprom_cksum(idprom));
43 }
44
45 printk("Ethernet address: %02x:%02x:%02x:%02x:%02x:%02x\n",
46 idprom->id_ethaddr[0], idprom->id_ethaddr[1],
47 idprom->id_ethaddr[2], idprom->id_ethaddr[3],
48 idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
49}
diff --git a/arch/sparc64/kernel/init_task.c b/arch/sparc64/kernel/init_task.c
new file mode 100644
index 000000000000..329b38fa5c89
--- /dev/null
+++ b/arch/sparc64/kernel/init_task.c
@@ -0,0 +1,35 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init_task.h>
5#include <linux/mqueue.h>
6
7#include <asm/pgtable.h>
8#include <asm/uaccess.h>
9#include <asm/processor.h>
10
11static struct fs_struct init_fs = INIT_FS;
12static struct files_struct init_files = INIT_FILES;
13static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
14static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15struct mm_struct init_mm = INIT_MM(init_mm);
16
17EXPORT_SYMBOL(init_mm);
18
19/* .text section in head.S is aligned at 2 page boundary and this gets linked
20 * right after that so that the init_thread_union is aligned properly as well.
21 * We really don't need this special alignment like the Intel does, but
22 * I do it anyways for completeness.
23 */
24__asm__ (".text");
25union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) };
26
27/*
28 * Initial task structure.
29 *
30 * All other task structs will be allocated on slabs in fork.c
31 */
32EXPORT_SYMBOL(init_task);
33
34__asm__(".data");
35struct task_struct init_task = INIT_TASK(init_task);
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
new file mode 100644
index 000000000000..43fc3173d480
--- /dev/null
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -0,0 +1,597 @@
1/* $Id: ioctl32.c,v 1.136 2002/01/14 09:49:52 davem Exp $
2 * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
3 *
4 * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 2003 Pavel Machek (pavel@suse.cz)
7 *
8 * These routines maintain argument size conversion between 32bit and 64bit
9 * ioctls.
10 */
11
12#define INCLUDES
13#include "compat_ioctl.c"
14#include <linux/ncp_fs.h>
15#include <linux/syscalls.h>
16#include <asm/fbio.h>
17#include <asm/kbio.h>
18#include <asm/vuid_event.h>
19#include <asm/envctrl.h>
20#include <asm/display7seg.h>
21#include <asm/openpromio.h>
22#include <asm/audioio.h>
23#include <asm/watchdog.h>
24
25/* Use this to get at 32-bit user passed pointers.
26 * See sys_sparc32.c for description about it.
27 */
28#define A(__x) compat_ptr(__x)
29
30static __inline__ void *alloc_user_space(long len)
31{
32 struct pt_regs *regs = current_thread_info()->kregs;
33 unsigned long usp = regs->u_regs[UREG_I6];
34
35 if (!(test_thread_flag(TIF_32BIT)))
36 usp += STACK_BIAS;
37
38 return (void *) (usp - len);
39}
40
41#define CODE
42#include "compat_ioctl.c"
43
44struct fbcmap32 {
45 int index; /* first element (0 origin) */
46 int count;
47 u32 red;
48 u32 green;
49 u32 blue;
50};
51
52#define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32)
53#define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32)
54
55static int fbiogetputcmap(unsigned int fd, unsigned int cmd, unsigned long arg)
56{
57 struct fbcmap32 __user *argp = (void __user *)arg;
58 struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
59 u32 addr;
60 int ret;
61
62 ret = copy_in_user(p, argp, 2 * sizeof(int));
63 ret |= get_user(addr, &argp->red);
64 ret |= put_user(compat_ptr(addr), &p->red);
65 ret |= get_user(addr, &argp->green);
66 ret |= put_user(compat_ptr(addr), &p->green);
67 ret |= get_user(addr, &argp->blue);
68 ret |= put_user(compat_ptr(addr), &p->blue);
69 if (ret)
70 return -EFAULT;
71 return sys_ioctl(fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (unsigned long)p);
72}
73
74struct fbcursor32 {
75 short set; /* what to set, choose from the list above */
76 short enable; /* cursor on/off */
77 struct fbcurpos pos; /* cursor position */
78 struct fbcurpos hot; /* cursor hot spot */
79 struct fbcmap32 cmap; /* color map info */
80 struct fbcurpos size; /* cursor bit map size */
81 u32 image; /* cursor image bits */
82 u32 mask; /* cursor mask bits */
83};
84
85#define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32)
86#define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32)
87
88static int fbiogscursor(unsigned int fd, unsigned int cmd, unsigned long arg)
89{
90 struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
91 struct fbcursor32 __user *argp = (void __user *)arg;
92 compat_uptr_t addr;
93 int ret;
94
95 ret = copy_in_user(p, argp,
96 2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
97 ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
98 ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
99 ret |= get_user(addr, &argp->cmap.red);
100 ret |= put_user(compat_ptr(addr), &p->cmap.red);
101 ret |= get_user(addr, &argp->cmap.green);
102 ret |= put_user(compat_ptr(addr), &p->cmap.green);
103 ret |= get_user(addr, &argp->cmap.blue);
104 ret |= put_user(compat_ptr(addr), &p->cmap.blue);
105 ret |= get_user(addr, &argp->mask);
106 ret |= put_user(compat_ptr(addr), &p->mask);
107 ret |= get_user(addr, &argp->image);
108 ret |= put_user(compat_ptr(addr), &p->image);
109 if (ret)
110 return -EFAULT;
111 return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p);
112}
113
114#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
115/* This really belongs in include/linux/drm.h -DaveM */
116#include "../../../drivers/char/drm/drm.h"
117
118typedef struct drm32_version {
119 int version_major; /* Major version */
120 int version_minor; /* Minor version */
121 int version_patchlevel;/* Patch level */
122 int name_len; /* Length of name buffer */
123 u32 name; /* Name of driver */
124 int date_len; /* Length of date buffer */
125 u32 date; /* User-space buffer to hold date */
126 int desc_len; /* Length of desc buffer */
127 u32 desc; /* User-space buffer to hold desc */
128} drm32_version_t;
129#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
130
131static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
132{
133 drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
134 drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
135 compat_uptr_t addr;
136 int n;
137 int ret;
138
139 if (clear_user(p, 3 * sizeof(int)) ||
140 get_user(n, &uversion->name_len) ||
141 put_user(n, &p->name_len) ||
142 get_user(addr, &uversion->name) ||
143 put_user(compat_ptr(addr), &p->name) ||
144 get_user(n, &uversion->date_len) ||
145 put_user(n, &p->date_len) ||
146 get_user(addr, &uversion->date) ||
147 put_user(compat_ptr(addr), &p->date) ||
148 get_user(n, &uversion->desc_len) ||
149 put_user(n, &p->desc_len) ||
150 get_user(addr, &uversion->desc) ||
151 put_user(compat_ptr(addr), &p->desc))
152 return -EFAULT;
153
154 ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
155 if (ret)
156 return ret;
157
158 if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
159 get_user(n, &p->name_len) ||
160 put_user(n, &uversion->name_len) ||
161 get_user(n, &p->date_len) ||
162 put_user(n, &uversion->date_len) ||
163 get_user(n, &p->desc_len) ||
164 put_user(n, &uversion->desc_len))
165 return -EFAULT;
166
167 return 0;
168}
169
170typedef struct drm32_unique {
171 int unique_len; /* Length of unique */
172 u32 unique; /* Unique name for driver instantiation */
173} drm32_unique_t;
174#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
175#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
176
177static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
178{
179 drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg;
180 drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p));
181 compat_uptr_t addr;
182 int n;
183 int ret;
184
185 if (get_user(n, &uarg->unique_len) ||
186 put_user(n, &p->unique_len) ||
187 get_user(addr, &uarg->unique) ||
188 put_user(compat_ptr(addr), &p->unique))
189 return -EFAULT;
190
191 if (cmd == DRM32_IOCTL_GET_UNIQUE)
192 ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p);
193 else
194 ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p);
195
196 if (ret)
197 return ret;
198
199 if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len))
200 return -EFAULT;
201
202 return 0;
203}
204
205typedef struct drm32_map {
206 u32 offset; /* Requested physical address (0 for SAREA)*/
207 u32 size; /* Requested physical size (bytes) */
208 drm_map_type_t type; /* Type of memory to map */
209 drm_map_flags_t flags; /* Flags */
210 u32 handle; /* User-space: "Handle" to pass to mmap */
211 /* Kernel-space: kernel-virtual address */
212 int mtrr; /* MTRR slot used */
213 /* Private data */
214} drm32_map_t;
215#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
216
217static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
218{
219 drm32_map_t __user *uarg = (drm32_map_t __user *) arg;
220 drm_map_t karg;
221 mm_segment_t old_fs;
222 u32 tmp;
223 int ret;
224
225 ret = get_user(karg.offset, &uarg->offset);
226 ret |= get_user(karg.size, &uarg->size);
227 ret |= get_user(karg.type, &uarg->type);
228 ret |= get_user(karg.flags, &uarg->flags);
229 ret |= get_user(tmp, &uarg->handle);
230 ret |= get_user(karg.mtrr, &uarg->mtrr);
231 if (ret)
232 return -EFAULT;
233
234 karg.handle = (void *) (unsigned long) tmp;
235
236 old_fs = get_fs();
237 set_fs(KERNEL_DS);
238 ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
239 set_fs(old_fs);
240
241 if (!ret) {
242 ret = put_user(karg.offset, &uarg->offset);
243 ret |= put_user(karg.size, &uarg->size);
244 ret |= put_user(karg.type, &uarg->type);
245 ret |= put_user(karg.flags, &uarg->flags);
246 tmp = (u32) (long)karg.handle;
247 ret |= put_user(tmp, &uarg->handle);
248 ret |= put_user(karg.mtrr, &uarg->mtrr);
249 if (ret)
250 ret = -EFAULT;
251 }
252
253 return ret;
254}
255
256typedef struct drm32_buf_info {
257 int count; /* Entries in list */
258 u32 list; /* (drm_buf_desc_t *) */
259} drm32_buf_info_t;
260#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
261
262static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
263{
264 drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg;
265 drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p));
266 compat_uptr_t addr;
267 int n;
268 int ret;
269
270 if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
271 get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
272 return -EFAULT;
273
274 ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p);
275 if (ret)
276 return ret;
277
278 if (get_user(n, &p->count) || put_user(n, &uarg->count))
279 return -EFAULT;
280
281 return 0;
282}
283
284typedef struct drm32_buf_free {
285 int count;
286 u32 list; /* (int *) */
287} drm32_buf_free_t;
288#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
289
290static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
291{
292 drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg;
293 drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p));
294 compat_uptr_t addr;
295 int n;
296
297 if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
298 get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
299 return -EFAULT;
300
301 return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p);
302}
303
304typedef struct drm32_buf_pub {
305 int idx; /* Index into master buflist */
306 int total; /* Buffer size */
307 int used; /* Amount of buffer in use (for DMA) */
308 u32 address; /* Address of buffer (void *) */
309} drm32_buf_pub_t;
310
311typedef struct drm32_buf_map {
312 int count; /* Length of buflist */
313 u32 virtual; /* Mmaped area in user-virtual (void *) */
314 u32 list; /* Buffer information (drm_buf_pub_t *) */
315} drm32_buf_map_t;
316#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
317
318static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
319{
320 drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg;
321 drm32_buf_pub_t __user *ulist;
322 drm_buf_map_t __user *arg64;
323 drm_buf_pub_t __user *list;
324 int orig_count, ret, i;
325 int n;
326 compat_uptr_t addr;
327
328 if (get_user(orig_count, &uarg->count))
329 return -EFAULT;
330
331 arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) +
332 (size_t)orig_count * sizeof(drm_buf_pub_t));
333 list = (void __user *)(arg64 + 1);
334
335 if (put_user(orig_count, &arg64->count) ||
336 put_user(list, &arg64->list) ||
337 get_user(addr, &uarg->virtual) ||
338 put_user(compat_ptr(addr), &arg64->virtual) ||
339 get_user(addr, &uarg->list))
340 return -EFAULT;
341
342 ulist = compat_ptr(addr);
343
344 for (i = 0; i < orig_count; i++) {
345 if (get_user(n, &ulist[i].idx) ||
346 put_user(n, &list[i].idx) ||
347 get_user(n, &ulist[i].total) ||
348 put_user(n, &list[i].total) ||
349 get_user(n, &ulist[i].used) ||
350 put_user(n, &list[i].used) ||
351 get_user(addr, &ulist[i].address) ||
352 put_user(compat_ptr(addr), &list[i].address))
353 return -EFAULT;
354 }
355
356 ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64);
357 if (ret)
358 return ret;
359
360 for (i = 0; i < orig_count; i++) {
361 void __user *p;
362 if (get_user(n, &list[i].idx) ||
363 put_user(n, &ulist[i].idx) ||
364 get_user(n, &list[i].total) ||
365 put_user(n, &ulist[i].total) ||
366 get_user(n, &list[i].used) ||
367 put_user(n, &ulist[i].used) ||
368 get_user(p, &list[i].address) ||
369 put_user((unsigned long)p, &ulist[i].address))
370 return -EFAULT;
371 }
372
373 if (get_user(n, &arg64->count) || put_user(n, &uarg->count))
374 return -EFAULT;
375
376 return 0;
377}
378
379typedef struct drm32_dma {
380 /* Indices here refer to the offset into
381 buflist in drm_buf_get_t. */
382 int context; /* Context handle */
383 int send_count; /* Number of buffers to send */
384 u32 send_indices; /* List of handles to buffers (int *) */
385 u32 send_sizes; /* Lengths of data to send (int *) */
386 drm_dma_flags_t flags; /* Flags */
387 int request_count; /* Number of buffers requested */
388 int request_size; /* Desired size for buffers */
389 u32 request_indices; /* Buffer information (int *) */
390 u32 request_sizes; /* (int *) */
391 int granted_count; /* Number of buffers granted */
392} drm32_dma_t;
393#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
394
395/* RED PEN The DRM layer blindly dereferences the send/request
396 * index/size arrays even though they are userland
397 * pointers. -DaveM
398 */
399static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
400{
401 drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg;
402 drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p));
403 compat_uptr_t addr;
404 int ret;
405
406 if (copy_in_user(p, uarg, 2 * sizeof(int)) ||
407 get_user(addr, &uarg->send_indices) ||
408 put_user(compat_ptr(addr), &p->send_indices) ||
409 get_user(addr, &uarg->send_sizes) ||
410 put_user(compat_ptr(addr), &p->send_sizes) ||
411 copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) ||
412 copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))||
413 copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) ||
414 get_user(addr, &uarg->request_indices) ||
415 put_user(compat_ptr(addr), &p->request_indices) ||
416 get_user(addr, &uarg->request_sizes) ||
417 put_user(compat_ptr(addr), &p->request_sizes) ||
418 copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int)))
419 return -EFAULT;
420
421 ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p);
422 if (ret)
423 return ret;
424
425 if (copy_in_user(uarg, p, 2 * sizeof(int)) ||
426 copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) ||
427 copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))||
428 copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) ||
429 copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int)))
430 return -EFAULT;
431
432 return 0;
433}
434
435typedef struct drm32_ctx_res {
436 int count;
437 u32 contexts; /* (drm_ctx_t *) */
438} drm32_ctx_res_t;
439#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
440
441static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
442{
443 drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg;
444 drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p));
445 compat_uptr_t addr;
446 int ret;
447
448 if (copy_in_user(p, uarg, sizeof(int)) ||
449 get_user(addr, &uarg->contexts) ||
450 put_user(compat_ptr(addr), &p->contexts))
451 return -EFAULT;
452
453 ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p);
454 if (ret)
455 return ret;
456
457 if (copy_in_user(uarg, p, sizeof(int)))
458 return -EFAULT;
459
460 return 0;
461}
462
463#endif
464
465typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
466
467#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
468#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl32_handler_t)(handler), NULL },
469#define IOCTL_TABLE_START \
470 struct ioctl_trans ioctl_start[] = {
471#define IOCTL_TABLE_END \
472 };
473
474IOCTL_TABLE_START
475#include <linux/compat_ioctl.h>
476#define DECLARES
477#include "compat_ioctl.c"
478COMPATIBLE_IOCTL(TIOCSTART)
479COMPATIBLE_IOCTL(TIOCSTOP)
480COMPATIBLE_IOCTL(TIOCSLTC)
481COMPATIBLE_IOCTL(FBIOGTYPE)
482COMPATIBLE_IOCTL(FBIOSATTR)
483COMPATIBLE_IOCTL(FBIOGATTR)
484COMPATIBLE_IOCTL(FBIOSVIDEO)
485COMPATIBLE_IOCTL(FBIOGVIDEO)
486COMPATIBLE_IOCTL(FBIOGCURSOR32) /* This is not implemented yet. Later it should be converted... */
487COMPATIBLE_IOCTL(FBIOSCURPOS)
488COMPATIBLE_IOCTL(FBIOGCURPOS)
489COMPATIBLE_IOCTL(FBIOGCURMAX)
490/* Little k */
491COMPATIBLE_IOCTL(KIOCTYPE)
492COMPATIBLE_IOCTL(KIOCLAYOUT)
493COMPATIBLE_IOCTL(KIOCGTRANS)
494COMPATIBLE_IOCTL(KIOCTRANS)
495COMPATIBLE_IOCTL(KIOCCMD)
496COMPATIBLE_IOCTL(KIOCSDIRECT)
497COMPATIBLE_IOCTL(KIOCSLED)
498COMPATIBLE_IOCTL(KIOCGLED)
499COMPATIBLE_IOCTL(KIOCSRATE)
500COMPATIBLE_IOCTL(KIOCGRATE)
501COMPATIBLE_IOCTL(VUIDSFORMAT)
502COMPATIBLE_IOCTL(VUIDGFORMAT)
503/* Little v, the video4linux ioctls */
504COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
505COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
506COMPATIBLE_IOCTL(ENVCTRL_RD_WARNING_TEMPERATURE)
507COMPATIBLE_IOCTL(ENVCTRL_RD_SHUTDOWN_TEMPERATURE)
508COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_TEMPERATURE)
509COMPATIBLE_IOCTL(ENVCTRL_RD_FAN_STATUS)
510COMPATIBLE_IOCTL(ENVCTRL_RD_VOLTAGE_STATUS)
511COMPATIBLE_IOCTL(ENVCTRL_RD_SCSI_TEMPERATURE)
512COMPATIBLE_IOCTL(ENVCTRL_RD_ETHERNET_TEMPERATURE)
513COMPATIBLE_IOCTL(ENVCTRL_RD_MTHRBD_TEMPERATURE)
514COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_VOLTAGE)
515COMPATIBLE_IOCTL(ENVCTRL_RD_GLOBALADDRESS)
516/* COMPATIBLE_IOCTL(D7SIOCRD) same value as ENVCTRL_RD_VOLTAGE_STATUS */
517COMPATIBLE_IOCTL(D7SIOCWR)
518COMPATIBLE_IOCTL(D7SIOCTM)
519/* OPENPROMIO, SunOS/Solaris only, the NetBSD one's have
520 * embedded pointers in the arg which we'd need to clean up...
521 */
522COMPATIBLE_IOCTL(OPROMGETOPT)
523COMPATIBLE_IOCTL(OPROMSETOPT)
524COMPATIBLE_IOCTL(OPROMNXTOPT)
525COMPATIBLE_IOCTL(OPROMSETOPT2)
526COMPATIBLE_IOCTL(OPROMNEXT)
527COMPATIBLE_IOCTL(OPROMCHILD)
528COMPATIBLE_IOCTL(OPROMGETPROP)
529COMPATIBLE_IOCTL(OPROMNXTPROP)
530COMPATIBLE_IOCTL(OPROMU2P)
531COMPATIBLE_IOCTL(OPROMGETCONS)
532COMPATIBLE_IOCTL(OPROMGETFBNAME)
533COMPATIBLE_IOCTL(OPROMGETBOOTARGS)
534COMPATIBLE_IOCTL(OPROMSETCUR)
535COMPATIBLE_IOCTL(OPROMPCI2NODE)
536COMPATIBLE_IOCTL(OPROMPATH2NODE)
537/* Big L */
538COMPATIBLE_IOCTL(LOOP_SET_STATUS64)
539COMPATIBLE_IOCTL(LOOP_GET_STATUS64)
540/* Big A */
541COMPATIBLE_IOCTL(AUDIO_GETINFO)
542COMPATIBLE_IOCTL(AUDIO_SETINFO)
543COMPATIBLE_IOCTL(AUDIO_DRAIN)
544COMPATIBLE_IOCTL(AUDIO_GETDEV)
545COMPATIBLE_IOCTL(AUDIO_GETDEV_SUNOS)
546COMPATIBLE_IOCTL(AUDIO_FLUSH)
547COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
548#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
549COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC)
550COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID)
551COMPATIBLE_IOCTL(DRM_IOCTL_AUTH_MAGIC)
552COMPATIBLE_IOCTL(DRM_IOCTL_BLOCK)
553COMPATIBLE_IOCTL(DRM_IOCTL_UNBLOCK)
554COMPATIBLE_IOCTL(DRM_IOCTL_CONTROL)
555COMPATIBLE_IOCTL(DRM_IOCTL_ADD_BUFS)
556COMPATIBLE_IOCTL(DRM_IOCTL_MARK_BUFS)
557COMPATIBLE_IOCTL(DRM_IOCTL_ADD_CTX)
558COMPATIBLE_IOCTL(DRM_IOCTL_RM_CTX)
559COMPATIBLE_IOCTL(DRM_IOCTL_MOD_CTX)
560COMPATIBLE_IOCTL(DRM_IOCTL_GET_CTX)
561COMPATIBLE_IOCTL(DRM_IOCTL_SWITCH_CTX)
562COMPATIBLE_IOCTL(DRM_IOCTL_NEW_CTX)
563COMPATIBLE_IOCTL(DRM_IOCTL_ADD_DRAW)
564COMPATIBLE_IOCTL(DRM_IOCTL_RM_DRAW)
565COMPATIBLE_IOCTL(DRM_IOCTL_LOCK)
566COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK)
567COMPATIBLE_IOCTL(DRM_IOCTL_FINISH)
568#endif /* DRM */
569COMPATIBLE_IOCTL(WIOCSTART)
570COMPATIBLE_IOCTL(WIOCSTOP)
571COMPATIBLE_IOCTL(WIOCGSTAT)
572/* And these ioctls need translation */
573/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
574HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap)
575HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap)
576HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor)
577#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
578HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version)
579HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique)
580HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique)
581HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap)
582HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs)
583HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs)
584HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs)
585HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma)
586HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx)
587#endif /* DRM */
588#if 0
589HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl)
590HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl)
591HANDLE_IOCTL(RTC32_EPOCH_READ, do_rtc_ioctl)
592HANDLE_IOCTL(RTC32_EPOCH_SET, do_rtc_ioctl)
593#endif
594/* take care of sizeof(sizeof()) breakage */
595IOCTL_TABLE_END
596
597int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
new file mode 100644
index 000000000000..12c93a3eee26
--- /dev/null
+++ b/arch/sparc64/kernel/iommu_common.c
@@ -0,0 +1,231 @@
1/* $Id: iommu_common.c,v 1.9 2001/12/17 07:05:09 davem Exp $
2 * iommu_common.c: UltraSparc SBUS/PCI common iommu code.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include "iommu_common.h"
8
9/* You are _strongly_ advised to enable the following debugging code
10 * any time you make changes to the sg code below, run it for a while
11 * with filesystems mounted read-only before buying the farm... -DaveM
12 */
13
14#ifdef VERIFY_SG
15static int verify_lengths(struct scatterlist *sg, int nents, int npages)
16{
17 int sg_len, dma_len;
18 int i, pgcount;
19
20 sg_len = 0;
21 for (i = 0; i < nents; i++)
22 sg_len += sg[i].length;
23
24 dma_len = 0;
25 for (i = 0; i < nents && sg[i].dma_length; i++)
26 dma_len += sg[i].dma_length;
27
28 if (sg_len != dma_len) {
29 printk("verify_lengths: Error, different, sg[%d] dma[%d]\n",
30 sg_len, dma_len);
31 return -1;
32 }
33
34 pgcount = 0;
35 for (i = 0; i < nents && sg[i].dma_length; i++) {
36 unsigned long start, end;
37
38 start = sg[i].dma_address;
39 start = start & IO_PAGE_MASK;
40
41 end = sg[i].dma_address + sg[i].dma_length;
42 end = (end + (IO_PAGE_SIZE - 1)) & IO_PAGE_MASK;
43
44 pgcount += ((end - start) >> IO_PAGE_SHIFT);
45 }
46
47 if (pgcount != npages) {
48 printk("verify_lengths: Error, page count wrong, "
49 "npages[%d] pgcount[%d]\n",
50 npages, pgcount);
51 return -1;
52 }
53
54 /* This test passes... */
55 return 0;
56}
57
58static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg, int nents, iopte_t **__iopte)
59{
60 struct scatterlist *sg = *__sg;
61 iopte_t *iopte = *__iopte;
62 u32 dlen = dma_sg->dma_length;
63 u32 daddr;
64 unsigned int sglen;
65 unsigned long sgaddr;
66
67 daddr = dma_sg->dma_address;
68 sglen = sg->length;
69 sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
70 while (dlen > 0) {
71 unsigned long paddr;
72
73 /* SG and DMA_SG must begin at the same sub-page boundary. */
74 if ((sgaddr & ~IO_PAGE_MASK) != (daddr & ~IO_PAGE_MASK)) {
75 printk("verify_one_map: Wrong start offset "
76 "sg[%08lx] dma[%08x]\n",
77 sgaddr, daddr);
78 nents = -1;
79 goto out;
80 }
81
82 /* Verify the IOPTE points to the right page. */
83 paddr = iopte_val(*iopte) & IOPTE_PAGE;
84 if ((paddr + PAGE_OFFSET) != (sgaddr & IO_PAGE_MASK)) {
85 printk("verify_one_map: IOPTE[%08lx] maps the "
86 "wrong page, should be [%08lx]\n",
87 iopte_val(*iopte), (sgaddr & IO_PAGE_MASK) - PAGE_OFFSET);
88 nents = -1;
89 goto out;
90 }
91
92 /* If this SG crosses a page, adjust to that next page
93 * boundary and loop.
94 */
95 if ((sgaddr & IO_PAGE_MASK) ^ ((sgaddr + sglen - 1) & IO_PAGE_MASK)) {
96 unsigned long next_page, diff;
97
98 next_page = (sgaddr + IO_PAGE_SIZE) & IO_PAGE_MASK;
99 diff = next_page - sgaddr;
100 sgaddr += diff;
101 daddr += diff;
102 sglen -= diff;
103 dlen -= diff;
104 if (dlen > 0)
105 iopte++;
106 continue;
107 }
108
109 /* SG wholly consumed within this page. */
110 daddr += sglen;
111 dlen -= sglen;
112
113 if (dlen > 0 && ((daddr & ~IO_PAGE_MASK) == 0))
114 iopte++;
115
116 sg++;
117 if (--nents <= 0)
118 break;
119 sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
120 sglen = sg->length;
121 }
122 if (dlen < 0) {
123 /* Transfer overrun, big problems. */
124 printk("verify_one_map: Transfer overrun by %d bytes.\n",
125 -dlen);
126 nents = -1;
127 } else {
128 /* Advance to next dma_sg implies that the next iopte will
129 * begin it.
130 */
131 iopte++;
132 }
133
134out:
135 *__sg = sg;
136 *__iopte = iopte;
137 return nents;
138}
139
140static int verify_maps(struct scatterlist *sg, int nents, iopte_t *iopte)
141{
142 struct scatterlist *dma_sg = sg;
143 struct scatterlist *orig_dma_sg = dma_sg;
144 int orig_nents = nents;
145
146 for (;;) {
147 nents = verify_one_map(dma_sg, &sg, nents, &iopte);
148 if (nents <= 0)
149 break;
150 dma_sg++;
151 if (dma_sg->dma_length == 0)
152 break;
153 }
154
155 if (nents > 0) {
156 printk("verify_maps: dma maps consumed by some sgs remain (%d)\n",
157 nents);
158 return -1;
159 }
160
161 if (nents < 0) {
162 printk("verify_maps: Error, messed up mappings, "
163 "at sg %d dma_sg %d\n",
164 (int) (orig_nents + nents), (int) (dma_sg - orig_dma_sg));
165 return -1;
166 }
167
168 /* This test passes... */
169 return 0;
170}
171
172void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages)
173{
174 if (verify_lengths(sg, nents, npages) < 0 ||
175 verify_maps(sg, nents, iopte) < 0) {
176 int i;
177
178 printk("verify_sglist: Crap, messed up mappings, dumping, iodma at ");
179 printk("%016lx.\n", sg->dma_address & IO_PAGE_MASK);
180
181 for (i = 0; i < nents; i++) {
182 printk("sg(%d): page_addr(%p) off(%x) length(%x) "
183 "dma_address[%016lx] dma_length[%016lx]\n",
184 i,
185 page_address(sg[i].page), sg[i].offset,
186 sg[i].length,
187 sg[i].dma_address, sg[i].dma_length);
188 }
189 }
190
191 /* Seems to be ok */
192}
193#endif
194
195unsigned long prepare_sg(struct scatterlist *sg, int nents)
196{
197 struct scatterlist *dma_sg = sg;
198 unsigned long prev;
199 u32 dent_addr, dent_len;
200
201 prev = (unsigned long) (page_address(sg->page) + sg->offset);
202 prev += (unsigned long) (dent_len = sg->length);
203 dent_addr = (u32) ((unsigned long)(page_address(sg->page) + sg->offset)
204 & (IO_PAGE_SIZE - 1UL));
205 while (--nents) {
206 unsigned long addr;
207
208 sg++;
209 addr = (unsigned long) (page_address(sg->page) + sg->offset);
210 if (! VCONTIG(prev, addr)) {
211 dma_sg->dma_address = dent_addr;
212 dma_sg->dma_length = dent_len;
213 dma_sg++;
214
215 dent_addr = ((dent_addr +
216 dent_len +
217 (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT);
218 dent_addr <<= IO_PAGE_SHIFT;
219 dent_addr += addr & (IO_PAGE_SIZE - 1UL);
220 dent_len = 0;
221 }
222 dent_len += sg->length;
223 prev = addr + sg->length;
224 }
225 dma_sg->dma_address = dent_addr;
226 dma_sg->dma_length = dent_len;
227
228 return ((unsigned long) dent_addr +
229 (unsigned long) dent_len +
230 (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT;
231}
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
new file mode 100644
index 000000000000..ad791014419c
--- /dev/null
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -0,0 +1,48 @@
1/* $Id: iommu_common.h,v 1.5 2001/12/11 09:41:01 davem Exp $
2 * iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/sched.h>
10#include <linux/mm.h>
11
12#include <asm/iommu.h>
13#include <asm/scatterlist.h>
14
15/*
16 * These give mapping size of each iommu pte/tlb.
17 */
18#define IO_PAGE_SHIFT 13
19#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT)
20#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1))
21#define IO_PAGE_ALIGN(addr) (((addr)+IO_PAGE_SIZE-1)&IO_PAGE_MASK)
22
23#define IO_TSB_ENTRIES (128*1024)
24#define IO_TSB_SIZE (IO_TSB_ENTRIES * 8)
25
26/*
27 * This is the hardwired shift in the iotlb tag/data parts.
28 */
29#define IOMMU_PAGE_SHIFT 13
30
31/* You are _strongly_ advised to enable the following debugging code
32 * any time you make changes to the sg code below, run it for a while
33 * with filesystems mounted read-only before buying the farm... -DaveM
34 */
35#undef VERIFY_SG
36
37#ifdef VERIFY_SG
38extern void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages);
39#endif
40
41/* Two addresses are "virtually contiguous" if and only if:
42 * 1) They are equal, or...
43 * 2) They are both on a page boundary
44 */
45#define VCONTIG(__X, __Y) (((__X) == (__Y)) || \
46 (((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL)
47
48extern unsigned long prepare_sg(struct scatterlist *sg, int nents);
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
new file mode 100644
index 000000000000..a38cb5036df0
--- /dev/null
+++ b/arch/sparc64/kernel/irq.c
@@ -0,0 +1,1269 @@
1/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/ptrace.h>
13#include <linux/errno.h>
14#include <linux/kernel_stat.h>
15#include <linux/signal.h>
16#include <linux/mm.h>
17#include <linux/interrupt.h>
18#include <linux/slab.h>
19#include <linux/random.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/proc_fs.h>
23#include <linux/seq_file.h>
24
25#include <asm/ptrace.h>
26#include <asm/processor.h>
27#include <asm/atomic.h>
28#include <asm/system.h>
29#include <asm/irq.h>
30#include <asm/sbus.h>
31#include <asm/iommu.h>
32#include <asm/upa.h>
33#include <asm/oplib.h>
34#include <asm/timer.h>
35#include <asm/smp.h>
36#include <asm/starfire.h>
37#include <asm/uaccess.h>
38#include <asm/cache.h>
39#include <asm/cpudata.h>
40
41#ifdef CONFIG_SMP
42static void distribute_irqs(void);
43#endif
44
45/* UPA nodes send interrupt packet to UltraSparc with first data reg
46 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
47 * delivered. We must translate this into a non-vector IRQ so we can
48 * set the softint on this cpu.
49 *
50 * To make processing these packets efficient and race free we use
51 * an array of irq buckets below. The interrupt vector handler in
52 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
53 * The IVEC handler does not need to act atomically, the PIL dispatch
54 * code uses CAS to get an atomic snapshot of the list and clear it
55 * at the same time.
56 */
57
58struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
59
60/* This has to be in the main kernel image, it cannot be
61 * turned into per-cpu data. The reason is that the main
62 * kernel image is locked into the TLB and this structure
63 * is accessed from the vectored interrupt trap handler. If
64 * access to this structure takes a TLB miss it could cause
65 * the 5-level sparc v9 trap stack to overflow.
66 */
67struct irq_work_struct {
68 unsigned int irq_worklists[16];
69};
70struct irq_work_struct __irq_work[NR_CPUS];
71#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
72
73#ifdef CONFIG_PCI
74/* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
75 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
76 * for devices behind busses other than APB on Sabre systems.
77 *
78 * Currently these physical addresses are just config space accesses
79 * to the command register for that device.
80 */
81unsigned long pci_dma_wsync;
82unsigned long dma_sync_reg_table[256];
83unsigned char dma_sync_reg_table_entry = 0;
84#endif
85
86/* This is based upon code in the 32-bit Sparc kernel written mostly by
87 * David Redman (djhr@tadpole.co.uk).
88 */
89#define MAX_STATIC_ALLOC 4
90static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
91static int static_irq_count;
92
93/* This is exported so that fast IRQ handlers can get at it... -DaveM */
94struct irqaction *irq_action[NR_IRQS+1] = {
95 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
97};
98
99/* This only synchronizes entities which modify IRQ handler
100 * state and some selected user-level spots that want to
101 * read things in the table. IRQ handler processing orders
102 * its' accesses such that no locking is needed.
103 */
104static DEFINE_SPINLOCK(irq_action_lock);
105
106static void register_irq_proc (unsigned int irq);
107
108/*
109 * Upper 2b of irqaction->flags holds the ino.
110 * irqaction->mask holds the smp affinity information.
111 */
112#define put_ino_in_irqaction(action, irq) \
113 action->flags &= 0xffffffffffffUL; \
114 if (__bucket(irq) == &pil0_dummy_bucket) \
115 action->flags |= 0xdeadUL << 48; \
116 else \
117 action->flags |= __irq_ino(irq) << 48;
118#define get_ino_in_irqaction(action) (action->flags >> 48)
119
120#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
121#define get_smpaff_in_irqaction(action) ((action)->mask)
122
123int show_interrupts(struct seq_file *p, void *v)
124{
125 unsigned long flags;
126 int i = *(loff_t *) v;
127 struct irqaction *action;
128#ifdef CONFIG_SMP
129 int j;
130#endif
131
132 spin_lock_irqsave(&irq_action_lock, flags);
133 if (i <= NR_IRQS) {
134 if (!(action = *(i + irq_action)))
135 goto out_unlock;
136 seq_printf(p, "%3d: ", i);
137#ifndef CONFIG_SMP
138 seq_printf(p, "%10u ", kstat_irqs(i));
139#else
140 for (j = 0; j < NR_CPUS; j++) {
141 if (!cpu_online(j))
142 continue;
143 seq_printf(p, "%10u ",
144 kstat_cpu(j).irqs[i]);
145 }
146#endif
147 seq_printf(p, " %s:%lx", action->name,
148 get_ino_in_irqaction(action));
149 for (action = action->next; action; action = action->next) {
150 seq_printf(p, ", %s:%lx", action->name,
151 get_ino_in_irqaction(action));
152 }
153 seq_putc(p, '\n');
154 }
155out_unlock:
156 spin_unlock_irqrestore(&irq_action_lock, flags);
157
158 return 0;
159}
160
161/* Now these are always passed a true fully specified sun4u INO. */
162void enable_irq(unsigned int irq)
163{
164 struct ino_bucket *bucket = __bucket(irq);
165 unsigned long imap;
166 unsigned long tid;
167
168 imap = bucket->imap;
169 if (imap == 0UL)
170 return;
171
172 preempt_disable();
173
174 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
175 unsigned long ver;
176
177 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
178 if ((ver >> 32) == 0x003e0016) {
179 /* We set it to our JBUS ID. */
180 __asm__ __volatile__("ldxa [%%g0] %1, %0"
181 : "=r" (tid)
182 : "i" (ASI_JBUS_CONFIG));
183 tid = ((tid & (0x1fUL<<17)) << 9);
184 tid &= IMAP_TID_JBUS;
185 } else {
186 /* We set it to our Safari AID. */
187 __asm__ __volatile__("ldxa [%%g0] %1, %0"
188 : "=r" (tid)
189 : "i" (ASI_SAFARI_CONFIG));
190 tid = ((tid & (0x3ffUL<<17)) << 9);
191 tid &= IMAP_AID_SAFARI;
192 }
193 } else if (this_is_starfire == 0) {
194 /* We set it to our UPA MID. */
195 __asm__ __volatile__("ldxa [%%g0] %1, %0"
196 : "=r" (tid)
197 : "i" (ASI_UPA_CONFIG));
198 tid = ((tid & UPA_CONFIG_MID) << 9);
199 tid &= IMAP_TID_UPA;
200 } else {
201 tid = (starfire_translate(imap, smp_processor_id()) << 26);
202 tid &= IMAP_TID_UPA;
203 }
204
205 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
206 * of this SYSIO's preconfigured IGN in the SYSIO Control
207 * Register, the hardware just mirrors that value here.
208 * However for Graphics and UPA Slave devices the full
209 * IMAP_INR field can be set by the programmer here.
210 *
211 * Things like FFB can now be handled via the new IRQ mechanism.
212 */
213 upa_writel(tid | IMAP_VALID, imap);
214
215 preempt_enable();
216}
217
218/* This now gets passed true ino's as well. */
219void disable_irq(unsigned int irq)
220{
221 struct ino_bucket *bucket = __bucket(irq);
222 unsigned long imap;
223
224 imap = bucket->imap;
225 if (imap != 0UL) {
226 u32 tmp;
227
228 /* NOTE: We do not want to futz with the IRQ clear registers
229 * and move the state to IDLE, the SCSI code does call
230 * disable_irq() to assure atomicity in the queue cmd
231 * SCSI adapter driver code. Thus we'd lose interrupts.
232 */
233 tmp = upa_readl(imap);
234 tmp &= ~IMAP_VALID;
235 upa_writel(tmp, imap);
236 }
237}
238
239/* The timer is the one "weird" interrupt which is generated by
240 * the CPU %tick register and not by some normal vectored interrupt
241 * source. To handle this special case, we use this dummy INO bucket.
242 */
243static struct ino_bucket pil0_dummy_bucket = {
244 0, /* irq_chain */
245 0, /* pil */
246 0, /* pending */
247 0, /* flags */
248 0, /* __unused */
249 NULL, /* irq_info */
250 0UL, /* iclr */
251 0UL, /* imap */
252};
253
254unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
255{
256 struct ino_bucket *bucket;
257 int ino;
258
259 if (pil == 0) {
260 if (iclr != 0UL || imap != 0UL) {
261 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
262 iclr, imap);
263 prom_halt();
264 }
265 return __irq(&pil0_dummy_bucket);
266 }
267
268 /* RULE: Both must be specified in all other cases. */
269 if (iclr == 0UL || imap == 0UL) {
270 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
271 pil, inofixup, iclr, imap);
272 prom_halt();
273 }
274
275 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
276 if (ino > NUM_IVECS) {
277 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
278 ino, pil, inofixup, iclr, imap);
279 prom_halt();
280 }
281
282 /* Ok, looks good, set it up. Don't touch the irq_chain or
283 * the pending flag.
284 */
285 bucket = &ivector_table[ino];
286 if ((bucket->flags & IBF_ACTIVE) ||
287 (bucket->irq_info != NULL)) {
288 /* This is a gross fatal error if it happens here. */
289 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
290 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
291 ino, pil, inofixup, iclr, imap);
292 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
293 bucket->pil, bucket->iclr, bucket->imap);
294 prom_printf("IRQ: Cannot continue, halting...\n");
295 prom_halt();
296 }
297 bucket->imap = imap;
298 bucket->iclr = iclr;
299 bucket->pil = pil;
300 bucket->flags = 0;
301
302 bucket->irq_info = NULL;
303
304 return __irq(bucket);
305}
306
307static void atomic_bucket_insert(struct ino_bucket *bucket)
308{
309 unsigned long pstate;
310 unsigned int *ent;
311
312 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
313 __asm__ __volatile__("wrpr %0, %1, %%pstate"
314 : : "r" (pstate), "i" (PSTATE_IE));
315 ent = irq_work(smp_processor_id(), bucket->pil);
316 bucket->irq_chain = *ent;
317 *ent = __irq(bucket);
318 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
319}
320
321int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
322 unsigned long irqflags, const char *name, void *dev_id)
323{
324 struct irqaction *action, *tmp = NULL;
325 struct ino_bucket *bucket = __bucket(irq);
326 unsigned long flags;
327 int pending = 0;
328
329 if ((bucket != &pil0_dummy_bucket) &&
330 (bucket < &ivector_table[0] ||
331 bucket >= &ivector_table[NUM_IVECS])) {
332 unsigned int *caller;
333
334 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
335 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
336 "from %p, irq %08x.\n", caller, irq);
337 return -EINVAL;
338 }
339 if (!handler)
340 return -EINVAL;
341
342 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
343 /*
344 * This function might sleep, we want to call it first,
345 * outside of the atomic block. In SA_STATIC_ALLOC case,
346 * random driver's kmalloc will fail, but it is safe.
347 * If already initialized, random driver will not reinit.
348 * Yes, this might clear the entropy pool if the wrong
349 * driver is attempted to be loaded, without actually
350 * installing a new handler, but is this really a problem,
351 * only the sysadmin is able to do this.
352 */
353 rand_initialize_irq(irq);
354 }
355
356 spin_lock_irqsave(&irq_action_lock, flags);
357
358 action = *(bucket->pil + irq_action);
359 if (action) {
360 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
361 for (tmp = action; tmp->next; tmp = tmp->next)
362 ;
363 else {
364 spin_unlock_irqrestore(&irq_action_lock, flags);
365 return -EBUSY;
366 }
367 action = NULL; /* Or else! */
368 }
369
370 /* If this is flagged as statically allocated then we use our
371 * private struct which is never freed.
372 */
373 if (irqflags & SA_STATIC_ALLOC) {
374 if (static_irq_count < MAX_STATIC_ALLOC)
375 action = &static_irqaction[static_irq_count++];
376 else
377 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
378 "using kmalloc\n", irq, name);
379 }
380 if (action == NULL)
381 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
382 GFP_ATOMIC);
383
384 if (!action) {
385 spin_unlock_irqrestore(&irq_action_lock, flags);
386 return -ENOMEM;
387 }
388
389 if (bucket == &pil0_dummy_bucket) {
390 bucket->irq_info = action;
391 bucket->flags |= IBF_ACTIVE;
392 } else {
393 if ((bucket->flags & IBF_ACTIVE) != 0) {
394 void *orig = bucket->irq_info;
395 void **vector = NULL;
396
397 if ((bucket->flags & IBF_PCI) == 0) {
398 printk("IRQ: Trying to share non-PCI bucket.\n");
399 goto free_and_ebusy;
400 }
401 if ((bucket->flags & IBF_MULTI) == 0) {
402 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
403 if (vector == NULL)
404 goto free_and_enomem;
405
406 /* We might have slept. */
407 if ((bucket->flags & IBF_MULTI) != 0) {
408 int ent;
409
410 kfree(vector);
411 vector = (void **)bucket->irq_info;
412 for(ent = 0; ent < 4; ent++) {
413 if (vector[ent] == NULL) {
414 vector[ent] = action;
415 break;
416 }
417 }
418 if (ent == 4)
419 goto free_and_ebusy;
420 } else {
421 vector[0] = orig;
422 vector[1] = action;
423 vector[2] = NULL;
424 vector[3] = NULL;
425 bucket->irq_info = vector;
426 bucket->flags |= IBF_MULTI;
427 }
428 } else {
429 int ent;
430
431 vector = (void **)orig;
432 for (ent = 0; ent < 4; ent++) {
433 if (vector[ent] == NULL) {
434 vector[ent] = action;
435 break;
436 }
437 }
438 if (ent == 4)
439 goto free_and_ebusy;
440 }
441 } else {
442 bucket->irq_info = action;
443 bucket->flags |= IBF_ACTIVE;
444 }
445 pending = bucket->pending;
446 if (pending)
447 bucket->pending = 0;
448 }
449
450 action->handler = handler;
451 action->flags = irqflags;
452 action->name = name;
453 action->next = NULL;
454 action->dev_id = dev_id;
455 put_ino_in_irqaction(action, irq);
456 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
457
458 if (tmp)
459 tmp->next = action;
460 else
461 *(bucket->pil + irq_action) = action;
462
463 enable_irq(irq);
464
465 /* We ate the IVEC already, this makes sure it does not get lost. */
466 if (pending) {
467 atomic_bucket_insert(bucket);
468 set_softint(1 << bucket->pil);
469 }
470 spin_unlock_irqrestore(&irq_action_lock, flags);
471 if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))
472 register_irq_proc(__irq_ino(irq));
473
474#ifdef CONFIG_SMP
475 distribute_irqs();
476#endif
477 return 0;
478
479free_and_ebusy:
480 kfree(action);
481 spin_unlock_irqrestore(&irq_action_lock, flags);
482 return -EBUSY;
483
484free_and_enomem:
485 kfree(action);
486 spin_unlock_irqrestore(&irq_action_lock, flags);
487 return -ENOMEM;
488}
489
490EXPORT_SYMBOL(request_irq);
491
492void free_irq(unsigned int irq, void *dev_id)
493{
494 struct irqaction *action;
495 struct irqaction *tmp = NULL;
496 unsigned long flags;
497 struct ino_bucket *bucket = __bucket(irq), *bp;
498
499 if ((bucket != &pil0_dummy_bucket) &&
500 (bucket < &ivector_table[0] ||
501 bucket >= &ivector_table[NUM_IVECS])) {
502 unsigned int *caller;
503
504 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
505 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
506 "from %p, irq %08x.\n", caller, irq);
507 return;
508 }
509
510 spin_lock_irqsave(&irq_action_lock, flags);
511
512 action = *(bucket->pil + irq_action);
513 if (!action->handler) {
514 printk("Freeing free IRQ %d\n", bucket->pil);
515 return;
516 }
517 if (dev_id) {
518 for ( ; action; action = action->next) {
519 if (action->dev_id == dev_id)
520 break;
521 tmp = action;
522 }
523 if (!action) {
524 printk("Trying to free free shared IRQ %d\n", bucket->pil);
525 spin_unlock_irqrestore(&irq_action_lock, flags);
526 return;
527 }
528 } else if (action->flags & SA_SHIRQ) {
529 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
530 spin_unlock_irqrestore(&irq_action_lock, flags);
531 return;
532 }
533
534 if (action->flags & SA_STATIC_ALLOC) {
535 printk("Attempt to free statically allocated IRQ %d (%s)\n",
536 bucket->pil, action->name);
537 spin_unlock_irqrestore(&irq_action_lock, flags);
538 return;
539 }
540
541 if (action && tmp)
542 tmp->next = action->next;
543 else
544 *(bucket->pil + irq_action) = action->next;
545
546 spin_unlock_irqrestore(&irq_action_lock, flags);
547
548 synchronize_irq(irq);
549
550 spin_lock_irqsave(&irq_action_lock, flags);
551
552 if (bucket != &pil0_dummy_bucket) {
553 unsigned long imap = bucket->imap;
554 void **vector, *orig;
555 int ent;
556
557 orig = bucket->irq_info;
558 vector = (void **)orig;
559
560 if ((bucket->flags & IBF_MULTI) != 0) {
561 int other = 0;
562 void *orphan = NULL;
563 for (ent = 0; ent < 4; ent++) {
564 if (vector[ent] == action)
565 vector[ent] = NULL;
566 else if (vector[ent] != NULL) {
567 orphan = vector[ent];
568 other++;
569 }
570 }
571
572 /* Only free when no other shared irq
573 * uses this bucket.
574 */
575 if (other) {
576 if (other == 1) {
577 /* Convert back to non-shared bucket. */
578 bucket->irq_info = orphan;
579 bucket->flags &= ~(IBF_MULTI);
580 kfree(vector);
581 }
582 goto out;
583 }
584 } else {
585 bucket->irq_info = NULL;
586 }
587
588 /* This unique interrupt source is now inactive. */
589 bucket->flags &= ~IBF_ACTIVE;
590
591 /* See if any other buckets share this bucket's IMAP
592 * and are still active.
593 */
594 for (ent = 0; ent < NUM_IVECS; ent++) {
595 bp = &ivector_table[ent];
596 if (bp != bucket &&
597 bp->imap == imap &&
598 (bp->flags & IBF_ACTIVE) != 0)
599 break;
600 }
601
602 /* Only disable when no other sub-irq levels of
603 * the same IMAP are active.
604 */
605 if (ent == NUM_IVECS)
606 disable_irq(irq);
607 }
608
609out:
610 kfree(action);
611 spin_unlock_irqrestore(&irq_action_lock, flags);
612}
613
614EXPORT_SYMBOL(free_irq);
615
616#ifdef CONFIG_SMP
617void synchronize_irq(unsigned int irq)
618{
619 struct ino_bucket *bucket = __bucket(irq);
620
621#if 0
622 /* The following is how I wish I could implement this.
623 * Unfortunately the ICLR registers are read-only, you can
624 * only write ICLR_foo values to them. To get the current
625 * IRQ status you would need to get at the IRQ diag registers
626 * in the PCI/SBUS controller and the layout of those vary
627 * from one controller to the next, sigh... -DaveM
628 */
629 unsigned long iclr = bucket->iclr;
630
631 while (1) {
632 u32 tmp = upa_readl(iclr);
633
634 if (tmp == ICLR_TRANSMIT ||
635 tmp == ICLR_PENDING) {
636 cpu_relax();
637 continue;
638 }
639 break;
640 }
641#else
642 /* So we have to do this with a INPROGRESS bit just like x86. */
643 while (bucket->flags & IBF_INPROGRESS)
644 cpu_relax();
645#endif
646}
647#endif /* CONFIG_SMP */
648
649void catch_disabled_ivec(struct pt_regs *regs)
650{
651 int cpu = smp_processor_id();
652 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
653
654 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
655 * to other devices. Here a single IMAP enabled potentially multiple
656 * unique interrupt sources (which each do have a unique ICLR register.
657 *
658 * So what we do is just register that the IVEC arrived, when registered
659 * for real the request_irq() code will check the bit and signal
660 * a local CPU interrupt for it.
661 */
662#if 0
663 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
664 bucket - &ivector_table[0], regs->tpc);
665#endif
666 *irq_work(cpu, 0) = 0;
667 bucket->pending = 1;
668}
669
670/* Tune this... */
671#define FORWARD_VOLUME 12
672
673#ifdef CONFIG_SMP
674
675static inline void redirect_intr(int cpu, struct ino_bucket *bp)
676{
677 /* Ok, here is what is going on:
678 * 1) Retargeting IRQs on Starfire is very
679 * expensive so just forget about it on them.
680 * 2) Moving around very high priority interrupts
681 * is a losing game.
682 * 3) If the current cpu is idle, interrupts are
683 * useful work, so keep them here. But do not
684 * pass to our neighbour if he is not very idle.
685 * 4) If sysadmin explicitly asks for directed intrs,
686 * Just Do It.
687 */
688 struct irqaction *ap = bp->irq_info;
689 cpumask_t cpu_mask;
690 unsigned int buddy, ticks;
691
692 cpu_mask = get_smpaff_in_irqaction(ap);
693 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
694 if (cpus_empty(cpu_mask))
695 cpu_mask = cpu_online_map;
696
697 if (this_is_starfire != 0 ||
698 bp->pil >= 10 || current->pid == 0)
699 goto out;
700
701 /* 'cpu' is the MID (ie. UPAID), calculate the MID
702 * of our buddy.
703 */
704 buddy = cpu + 1;
705 if (buddy >= NR_CPUS)
706 buddy = 0;
707
708 ticks = 0;
709 while (!cpu_isset(buddy, cpu_mask)) {
710 if (++buddy >= NR_CPUS)
711 buddy = 0;
712 if (++ticks > NR_CPUS) {
713 put_smpaff_in_irqaction(ap, CPU_MASK_NONE);
714 goto out;
715 }
716 }
717
718 if (buddy == cpu)
719 goto out;
720
721 /* Voo-doo programming. */
722 if (cpu_data(buddy).idle_volume < FORWARD_VOLUME)
723 goto out;
724
725 /* This just so happens to be correct on Cheetah
726 * at the moment.
727 */
728 buddy <<= 26;
729
730 /* Push it to our buddy. */
731 upa_writel(buddy | IMAP_VALID, bp->imap);
732
733out:
734 return;
735}
736
737#endif
738
739void handler_irq(int irq, struct pt_regs *regs)
740{
741 struct ino_bucket *bp, *nbp;
742 int cpu = smp_processor_id();
743
744#ifndef CONFIG_SMP
745 /*
746 * Check for TICK_INT on level 14 softint.
747 */
748 {
749 unsigned long clr_mask = 1 << irq;
750 unsigned long tick_mask = tick_ops->softint_mask;
751
752 if ((irq == 14) && (get_softint() & tick_mask)) {
753 irq = 0;
754 clr_mask = tick_mask;
755 }
756 clear_softint(clr_mask);
757 }
758#else
759 int should_forward = 1;
760
761 clear_softint(1 << irq);
762#endif
763
764 irq_enter();
765 kstat_this_cpu.irqs[irq]++;
766
767 /* Sliiiick... */
768#ifndef CONFIG_SMP
769 bp = ((irq != 0) ?
770 __bucket(xchg32(irq_work(cpu, irq), 0)) :
771 &pil0_dummy_bucket);
772#else
773 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
774#endif
775 for ( ; bp != NULL; bp = nbp) {
776 unsigned char flags = bp->flags;
777 unsigned char random = 0;
778
779 nbp = __bucket(bp->irq_chain);
780 bp->irq_chain = 0;
781
782 bp->flags |= IBF_INPROGRESS;
783
784 if ((flags & IBF_ACTIVE) != 0) {
785#ifdef CONFIG_PCI
786 if ((flags & IBF_DMA_SYNC) != 0) {
787 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
788 upa_readq(pci_dma_wsync);
789 }
790#endif
791 if ((flags & IBF_MULTI) == 0) {
792 struct irqaction *ap = bp->irq_info;
793 int ret;
794
795 ret = ap->handler(__irq(bp), ap->dev_id, regs);
796 if (ret == IRQ_HANDLED)
797 random |= ap->flags;
798 } else {
799 void **vector = (void **)bp->irq_info;
800 int ent;
801 for (ent = 0; ent < 4; ent++) {
802 struct irqaction *ap = vector[ent];
803 if (ap != NULL) {
804 int ret;
805
806 ret = ap->handler(__irq(bp),
807 ap->dev_id,
808 regs);
809 if (ret == IRQ_HANDLED)
810 random |= ap->flags;
811 }
812 }
813 }
814 /* Only the dummy bucket lacks IMAP/ICLR. */
815 if (bp->pil != 0) {
816#ifdef CONFIG_SMP
817 if (should_forward) {
818 redirect_intr(cpu, bp);
819 should_forward = 0;
820 }
821#endif
822 upa_writel(ICLR_IDLE, bp->iclr);
823
824 /* Test and add entropy */
825 if (random & SA_SAMPLE_RANDOM)
826 add_interrupt_randomness(irq);
827 }
828 } else
829 bp->pending = 1;
830
831 bp->flags &= ~IBF_INPROGRESS;
832 }
833 irq_exit();
834}
835
836#ifdef CONFIG_BLK_DEV_FD
837extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
838
839void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
840{
841 struct irqaction *action = *(irq + irq_action);
842 struct ino_bucket *bucket;
843 int cpu = smp_processor_id();
844
845 irq_enter();
846 kstat_this_cpu.irqs[irq]++;
847
848 *(irq_work(cpu, irq)) = 0;
849 bucket = get_ino_in_irqaction(action) + ivector_table;
850
851 bucket->flags |= IBF_INPROGRESS;
852
853 floppy_interrupt(irq, dev_cookie, regs);
854 upa_writel(ICLR_IDLE, bucket->iclr);
855
856 bucket->flags &= ~IBF_INPROGRESS;
857
858 irq_exit();
859}
860#endif
861
862/* The following assumes that the branch lies before the place we
863 * are branching to. This is the case for a trap vector...
864 * You have been warned.
865 */
866#define SPARC_BRANCH(dest_addr, inst_addr) \
867 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
868
869#define SPARC_NOP (0x01000000)
870
871static void install_fast_irq(unsigned int cpu_irq,
872 irqreturn_t (*handler)(int, void *, struct pt_regs *))
873{
874 extern unsigned long sparc64_ttable_tl0;
875 unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
876 unsigned int *insns;
877
878 ttent += 0x820;
879 ttent += (cpu_irq - 1) << 5;
880 insns = (unsigned int *) ttent;
881 insns[0] = SPARC_BRANCH(((unsigned long) handler),
882 ((unsigned long)&insns[0]));
883 insns[1] = SPARC_NOP;
884 __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
885}
886
887int request_fast_irq(unsigned int irq,
888 irqreturn_t (*handler)(int, void *, struct pt_regs *),
889 unsigned long irqflags, const char *name, void *dev_id)
890{
891 struct irqaction *action;
892 struct ino_bucket *bucket = __bucket(irq);
893 unsigned long flags;
894
895 /* No pil0 dummy buckets allowed here. */
896 if (bucket < &ivector_table[0] ||
897 bucket >= &ivector_table[NUM_IVECS]) {
898 unsigned int *caller;
899
900 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
901 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
902 "from %p, irq %08x.\n", caller, irq);
903 return -EINVAL;
904 }
905
906 if (!handler)
907 return -EINVAL;
908
909 if ((bucket->pil == 0) || (bucket->pil == 14)) {
910 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
911 return -EBUSY;
912 }
913
914 spin_lock_irqsave(&irq_action_lock, flags);
915
916 action = *(bucket->pil + irq_action);
917 if (action) {
918 if (action->flags & SA_SHIRQ)
919 panic("Trying to register fast irq when already shared.\n");
920 if (irqflags & SA_SHIRQ)
921 panic("Trying to register fast irq as shared.\n");
922 printk("request_fast_irq: Trying to register yet already owned.\n");
923 spin_unlock_irqrestore(&irq_action_lock, flags);
924 return -EBUSY;
925 }
926
927 /*
928 * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we
929 * support smp intr affinity in this path.
930 */
931 if (irqflags & SA_STATIC_ALLOC) {
932 if (static_irq_count < MAX_STATIC_ALLOC)
933 action = &static_irqaction[static_irq_count++];
934 else
935 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
936 "using kmalloc\n", bucket->pil, name);
937 }
938 if (action == NULL)
939 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
940 GFP_ATOMIC);
941 if (!action) {
942 spin_unlock_irqrestore(&irq_action_lock, flags);
943 return -ENOMEM;
944 }
945 install_fast_irq(bucket->pil, handler);
946
947 bucket->irq_info = action;
948 bucket->flags |= IBF_ACTIVE;
949
950 action->handler = handler;
951 action->flags = irqflags;
952 action->dev_id = NULL;
953 action->name = name;
954 action->next = NULL;
955 put_ino_in_irqaction(action, irq);
956 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
957
958 *(bucket->pil + irq_action) = action;
959 enable_irq(irq);
960
961 spin_unlock_irqrestore(&irq_action_lock, flags);
962
963#ifdef CONFIG_SMP
964 distribute_irqs();
965#endif
966 return 0;
967}
968
969/* We really don't need these at all on the Sparc. We only have
970 * stubs here because they are exported to modules.
971 */
972unsigned long probe_irq_on(void)
973{
974 return 0;
975}
976
977EXPORT_SYMBOL(probe_irq_on);
978
979int probe_irq_off(unsigned long mask)
980{
981 return 0;
982}
983
984EXPORT_SYMBOL(probe_irq_off);
985
986#ifdef CONFIG_SMP
987static int retarget_one_irq(struct irqaction *p, int goal_cpu)
988{
989 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
990 unsigned long imap = bucket->imap;
991 unsigned int tid;
992
993 while (!cpu_online(goal_cpu)) {
994 if (++goal_cpu >= NR_CPUS)
995 goal_cpu = 0;
996 }
997
998 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
999 tid = goal_cpu << 26;
1000 tid &= IMAP_AID_SAFARI;
1001 } else if (this_is_starfire == 0) {
1002 tid = goal_cpu << 26;
1003 tid &= IMAP_TID_UPA;
1004 } else {
1005 tid = (starfire_translate(imap, goal_cpu) << 26);
1006 tid &= IMAP_TID_UPA;
1007 }
1008 upa_writel(tid | IMAP_VALID, imap);
1009
1010 while (!cpu_online(goal_cpu)) {
1011 if (++goal_cpu >= NR_CPUS)
1012 goal_cpu = 0;
1013 }
1014
1015 return goal_cpu;
1016}
1017
1018/* Called from request_irq. */
1019static void distribute_irqs(void)
1020{
1021 unsigned long flags;
1022 int cpu, level;
1023
1024 spin_lock_irqsave(&irq_action_lock, flags);
1025 cpu = 0;
1026
1027 /*
1028 * Skip the timer at [0], and very rare error/power intrs at [15].
1029 * Also level [12], it causes problems on Ex000 systems.
1030 */
1031 for (level = 1; level < NR_IRQS; level++) {
1032 struct irqaction *p = irq_action[level];
1033 if (level == 12) continue;
1034 while(p) {
1035 cpu = retarget_one_irq(p, cpu);
1036 p = p->next;
1037 }
1038 }
1039 spin_unlock_irqrestore(&irq_action_lock, flags);
1040}
1041#endif
1042
1043
1044struct sun5_timer *prom_timers;
1045static u64 prom_limit0, prom_limit1;
1046
1047static void map_prom_timers(void)
1048{
1049 unsigned int addr[3];
1050 int tnode, err;
1051
1052 /* PROM timer node hangs out in the top level of device siblings... */
1053 tnode = prom_finddevice("/counter-timer");
1054
1055 /* Assume if node is not present, PROM uses different tick mechanism
1056 * which we should not care about.
1057 */
1058 if (tnode == 0 || tnode == -1) {
1059 prom_timers = (struct sun5_timer *) 0;
1060 return;
1061 }
1062
1063 /* If PROM is really using this, it must be mapped by him. */
1064 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
1065 if (err == -1) {
1066 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1067 prom_timers = (struct sun5_timer *) 0;
1068 return;
1069 }
1070 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1071}
1072
1073static void kill_prom_timer(void)
1074{
1075 if (!prom_timers)
1076 return;
1077
1078 /* Save them away for later. */
1079 prom_limit0 = prom_timers->limit0;
1080 prom_limit1 = prom_timers->limit1;
1081
1082 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1083 * We turn both off here just to be paranoid.
1084 */
1085 prom_timers->limit0 = 0;
1086 prom_timers->limit1 = 0;
1087
1088 /* Wheee, eat the interrupt packet too... */
1089 __asm__ __volatile__(
1090" mov 0x40, %%g2\n"
1091" ldxa [%%g0] %0, %%g1\n"
1092" ldxa [%%g2] %1, %%g1\n"
1093" stxa %%g0, [%%g0] %0\n"
1094" membar #Sync\n"
1095 : /* no outputs */
1096 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
1097 : "g1", "g2");
1098}
1099
1100void enable_prom_timer(void)
1101{
1102 if (!prom_timers)
1103 return;
1104
1105 /* Set it to whatever was there before. */
1106 prom_timers->limit1 = prom_limit1;
1107 prom_timers->count1 = 0;
1108 prom_timers->limit0 = prom_limit0;
1109 prom_timers->count0 = 0;
1110}
1111
1112void init_irqwork_curcpu(void)
1113{
1114 register struct irq_work_struct *workp asm("o2");
1115 register unsigned long tmp asm("o3");
1116 int cpu = hard_smp_processor_id();
1117
1118 memset(__irq_work + cpu, 0, sizeof(*workp));
1119
1120 /* Make sure we are called with PSTATE_IE disabled. */
1121 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1122 : "=r" (tmp));
1123 if (tmp & PSTATE_IE) {
1124 prom_printf("BUG: init_irqwork_curcpu() called with "
1125 "PSTATE_IE enabled, bailing.\n");
1126 __asm__ __volatile__("mov %%i7, %0\n\t"
1127 : "=r" (tmp));
1128 prom_printf("BUG: Called from %lx\n", tmp);
1129 prom_halt();
1130 }
1131
1132 /* Set interrupt globals. */
1133 workp = &__irq_work[cpu];
1134 __asm__ __volatile__(
1135 "rdpr %%pstate, %0\n\t"
1136 "wrpr %0, %1, %%pstate\n\t"
1137 "mov %2, %%g6\n\t"
1138 "wrpr %0, 0x0, %%pstate\n\t"
1139 : "=&r" (tmp)
1140 : "i" (PSTATE_IG), "r" (workp));
1141}
1142
1143/* Only invoked on boot processor. */
1144void __init init_IRQ(void)
1145{
1146 map_prom_timers();
1147 kill_prom_timer();
1148 memset(&ivector_table[0], 0, sizeof(ivector_table));
1149
1150 /* We need to clear any IRQ's pending in the soft interrupt
1151 * registers, a spurious one could be left around from the
1152 * PROM timer which we just disabled.
1153 */
1154 clear_softint(get_softint());
1155
1156 /* Now that ivector table is initialized, it is safe
1157 * to receive IRQ vector traps. We will normally take
1158 * one or two right now, in case some device PROM used
1159 * to boot us wants to speak to us. We just ignore them.
1160 */
1161 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1162 "or %%g1, %0, %%g1\n\t"
1163 "wrpr %%g1, 0x0, %%pstate"
1164 : /* No outputs */
1165 : "i" (PSTATE_IE)
1166 : "g1");
1167}
1168
1169static struct proc_dir_entry * root_irq_dir;
1170static struct proc_dir_entry * irq_dir [NUM_IVECS];
1171
1172#ifdef CONFIG_SMP
1173
1174static int irq_affinity_read_proc (char *page, char **start, off_t off,
1175 int count, int *eof, void *data)
1176{
1177 struct ino_bucket *bp = ivector_table + (long)data;
1178 struct irqaction *ap = bp->irq_info;
1179 cpumask_t mask;
1180 int len;
1181
1182 mask = get_smpaff_in_irqaction(ap);
1183 if (cpus_empty(mask))
1184 mask = cpu_online_map;
1185
1186 len = cpumask_scnprintf(page, count, mask);
1187 if (count - len < 2)
1188 return -EINVAL;
1189 len += sprintf(page + len, "\n");
1190 return len;
1191}
1192
1193static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1194{
1195 struct ino_bucket *bp = ivector_table + irq;
1196
1197 /* Users specify affinity in terms of hw cpu ids.
1198 * As soon as we do this, handler_irq() might see and take action.
1199 */
1200 put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
1201
1202 /* Migration is simply done by the next cpu to service this
1203 * interrupt.
1204 */
1205}
1206
1207static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
1208 unsigned long count, void *data)
1209{
1210 int irq = (long) data, full_count = count, err;
1211 cpumask_t new_value;
1212
1213 err = cpumask_parse(buffer, count, new_value);
1214
1215 /*
1216 * Do not allow disabling IRQs completely - it's a too easy
1217 * way to make the system unusable accidentally :-) At least
1218 * one online CPU still has to be targeted.
1219 */
1220 cpus_and(new_value, new_value, cpu_online_map);
1221 if (cpus_empty(new_value))
1222 return -EINVAL;
1223
1224 set_intr_affinity(irq, new_value);
1225
1226 return full_count;
1227}
1228
1229#endif
1230
1231#define MAX_NAMELEN 10
1232
1233static void register_irq_proc (unsigned int irq)
1234{
1235 char name [MAX_NAMELEN];
1236
1237 if (!root_irq_dir || irq_dir[irq])
1238 return;
1239
1240 memset(name, 0, MAX_NAMELEN);
1241 sprintf(name, "%x", irq);
1242
1243 /* create /proc/irq/1234 */
1244 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1245
1246#ifdef CONFIG_SMP
1247 /* XXX SMP affinity not supported on starfire yet. */
1248 if (this_is_starfire == 0) {
1249 struct proc_dir_entry *entry;
1250
1251 /* create /proc/irq/1234/smp_affinity */
1252 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1253
1254 if (entry) {
1255 entry->nlink = 1;
1256 entry->data = (void *)(long)irq;
1257 entry->read_proc = irq_affinity_read_proc;
1258 entry->write_proc = irq_affinity_write_proc;
1259 }
1260 }
1261#endif
1262}
1263
1264void init_irq_proc (void)
1265{
1266 /* create /proc/irq */
1267 root_irq_dir = proc_mkdir("irq", NULL);
1268}
1269
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
new file mode 100644
index 000000000000..30862abee611
--- /dev/null
+++ b/arch/sparc64/kernel/isa.c
@@ -0,0 +1,329 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/pci.h>
4#include <linux/slab.h>
5#include <asm/oplib.h>
6#include <asm/isa.h>
7
8struct sparc_isa_bridge *isa_chain;
9
10static void __init fatal_err(const char *reason)
11{
12 prom_printf("ISA: fatal error, %s.\n", reason);
13}
14
15static void __init report_dev(struct sparc_isa_device *isa_dev, int child)
16{
17 if (child)
18 printk(" (%s)", isa_dev->prom_name);
19 else
20 printk(" [%s", isa_dev->prom_name);
21}
22
23static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev,
24 struct linux_prom_registers *pregs,
25 int pregs_size)
26{
27 unsigned long base, len;
28 int prop_len;
29
30 prop_len = prom_getproperty(isa_dev->prom_node, "reg",
31 (char *) pregs, pregs_size);
32
33 if (prop_len <= 0)
34 return;
35
36 /* Only the first one is interesting. */
37 len = pregs[0].reg_size;
38 base = (((unsigned long)pregs[0].which_io << 32) |
39 (unsigned long)pregs[0].phys_addr);
40 base += isa_dev->bus->parent->io_space.start;
41
42 isa_dev->resource.start = base;
43 isa_dev->resource.end = (base + len - 1UL);
44 isa_dev->resource.flags = IORESOURCE_IO;
45 isa_dev->resource.name = isa_dev->prom_name;
46
47 request_resource(&isa_dev->bus->parent->io_space,
48 &isa_dev->resource);
49}
50
51/* I can't believe they didn't put a real INO in the isa device
52 * interrupts property. The whole point of the OBP properties
53 * is to shield the kernel from IRQ routing details.
54 *
55 * The P1275 standard for ISA devices seems to also have been
56 * totally ignored.
57 *
58 * On later systems, an interrupt-map and interrupt-map-mask scheme
59 * akin to EBUS is used.
60 */
61static struct {
62 int obp_irq;
63 int pci_ino;
64} grover_irq_table[] = {
65 { 1, 0x00 }, /* dma, unknown ino at this point */
66 { 2, 0x27 }, /* floppy */
67 { 3, 0x22 }, /* parallel */
68 { 4, 0x2b }, /* serial */
69 { 5, 0x25 }, /* acpi power management */
70
71 { 0, 0x00 } /* end of table */
72};
73
74static int __init isa_dev_get_irq_using_imap(struct sparc_isa_device *isa_dev,
75 struct sparc_isa_bridge *isa_br,
76 int *interrupt,
77 struct linux_prom_registers *pregs)
78{
79 unsigned int hi, lo, irq;
80 int i;
81
82 hi = pregs->which_io & isa_br->isa_intmask.phys_hi;
83 lo = pregs->phys_addr & isa_br->isa_intmask.phys_lo;
84 irq = *interrupt & isa_br->isa_intmask.interrupt;
85 for (i = 0; i < isa_br->num_isa_intmap; i++) {
86 if ((isa_br->isa_intmap[i].phys_hi == hi) &&
87 (isa_br->isa_intmap[i].phys_lo == lo) &&
88 (isa_br->isa_intmap[i].interrupt == irq)) {
89 *interrupt = isa_br->isa_intmap[i].cinterrupt;
90 return 0;
91 }
92 }
93 return -1;
94}
95
96static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev,
97 struct linux_prom_registers *pregs)
98{
99 int irq_prop;
100
101 irq_prop = prom_getintdefault(isa_dev->prom_node,
102 "interrupts", -1);
103 if (irq_prop <= 0) {
104 goto no_irq;
105 } else {
106 struct pci_controller_info *pcic;
107 struct pci_pbm_info *pbm;
108 int i;
109
110 if (isa_dev->bus->num_isa_intmap) {
111 if (!isa_dev_get_irq_using_imap(isa_dev,
112 isa_dev->bus,
113 &irq_prop,
114 pregs))
115 goto route_irq;
116 }
117
118 for (i = 0; grover_irq_table[i].obp_irq != 0; i++) {
119 if (grover_irq_table[i].obp_irq == irq_prop) {
120 int ino = grover_irq_table[i].pci_ino;
121
122 if (ino == 0)
123 goto no_irq;
124
125 irq_prop = ino;
126 goto route_irq;
127 }
128 }
129 goto no_irq;
130
131route_irq:
132 pbm = isa_dev->bus->parent;
133 pcic = pbm->parent;
134 isa_dev->irq = pcic->irq_build(pbm, NULL, irq_prop);
135 return;
136 }
137
138no_irq:
139 isa_dev->irq = PCI_IRQ_NONE;
140}
141
142static void __init isa_fill_children(struct sparc_isa_device *parent_isa_dev)
143{
144 int node = prom_getchild(parent_isa_dev->prom_node);
145
146 if (node == 0)
147 return;
148
149 printk(" ->");
150 while (node != 0) {
151 struct linux_prom_registers regs[PROMREG_MAX];
152 struct sparc_isa_device *isa_dev;
153 int prop_len;
154
155 isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL);
156 if (!isa_dev) {
157 fatal_err("cannot allocate child isa_dev");
158 prom_halt();
159 }
160
161 memset(isa_dev, 0, sizeof(*isa_dev));
162
163 /* Link it in to parent. */
164 isa_dev->next = parent_isa_dev->child;
165 parent_isa_dev->child = isa_dev;
166
167 isa_dev->bus = parent_isa_dev->bus;
168 isa_dev->prom_node = node;
169 prop_len = prom_getproperty(node, "name",
170 (char *) isa_dev->prom_name,
171 sizeof(isa_dev->prom_name));
172 if (prop_len <= 0) {
173 fatal_err("cannot get child isa_dev OBP node name");
174 prom_halt();
175 }
176
177 prop_len = prom_getproperty(node, "compatible",
178 (char *) isa_dev->compatible,
179 sizeof(isa_dev->compatible));
180
181 /* Not having this is OK. */
182 if (prop_len <= 0)
183 isa_dev->compatible[0] = '\0';
184
185 isa_dev_get_resource(isa_dev, regs, sizeof(regs));
186 isa_dev_get_irq(isa_dev, regs);
187
188 report_dev(isa_dev, 1);
189
190 node = prom_getsibling(node);
191 }
192}
193
194static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
195{
196 int node = prom_getchild(isa_br->prom_node);
197
198 while (node != 0) {
199 struct linux_prom_registers regs[PROMREG_MAX];
200 struct sparc_isa_device *isa_dev;
201 int prop_len;
202
203 isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL);
204 if (!isa_dev) {
205 fatal_err("cannot allocate isa_dev");
206 prom_halt();
207 }
208
209 memset(isa_dev, 0, sizeof(*isa_dev));
210
211 /* Link it in. */
212 isa_dev->next = NULL;
213 if (isa_br->devices == NULL) {
214 isa_br->devices = isa_dev;
215 } else {
216 struct sparc_isa_device *tmp = isa_br->devices;
217
218 while (tmp->next)
219 tmp = tmp->next;
220
221 tmp->next = isa_dev;
222 }
223
224 isa_dev->bus = isa_br;
225 isa_dev->prom_node = node;
226 prop_len = prom_getproperty(node, "name",
227 (char *) isa_dev->prom_name,
228 sizeof(isa_dev->prom_name));
229 if (prop_len <= 0) {
230 fatal_err("cannot get isa_dev OBP node name");
231 prom_halt();
232 }
233
234 prop_len = prom_getproperty(node, "compatible",
235 (char *) isa_dev->compatible,
236 sizeof(isa_dev->compatible));
237
238 /* Not having this is OK. */
239 if (prop_len <= 0)
240 isa_dev->compatible[0] = '\0';
241
242 isa_dev_get_resource(isa_dev, regs, sizeof(regs));
243 isa_dev_get_irq(isa_dev, regs);
244
245 report_dev(isa_dev, 0);
246
247 isa_fill_children(isa_dev);
248
249 printk("]");
250
251 node = prom_getsibling(node);
252 }
253}
254
255void __init isa_init(void)
256{
257 struct pci_dev *pdev;
258 unsigned short vendor, device;
259 int index = 0;
260
261 vendor = PCI_VENDOR_ID_AL;
262 device = PCI_DEVICE_ID_AL_M1533;
263
264 pdev = NULL;
265 while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) {
266 struct pcidev_cookie *pdev_cookie;
267 struct pci_pbm_info *pbm;
268 struct sparc_isa_bridge *isa_br;
269 int prop_len;
270
271 pdev_cookie = pdev->sysdata;
272 if (!pdev_cookie) {
273 printk("ISA: Warning, ISA bridge ignored due to "
274 "lack of OBP data.\n");
275 continue;
276 }
277 pbm = pdev_cookie->pbm;
278
279 isa_br = kmalloc(sizeof(*isa_br), GFP_KERNEL);
280 if (!isa_br) {
281 fatal_err("cannot allocate sparc_isa_bridge");
282 prom_halt();
283 }
284
285 memset(isa_br, 0, sizeof(*isa_br));
286
287 /* Link it in. */
288 isa_br->next = isa_chain;
289 isa_chain = isa_br;
290
291 isa_br->parent = pbm;
292 isa_br->self = pdev;
293 isa_br->index = index++;
294 isa_br->prom_node = pdev_cookie->prom_node;
295 strncpy(isa_br->prom_name, pdev_cookie->prom_name,
296 sizeof(isa_br->prom_name));
297
298 prop_len = prom_getproperty(isa_br->prom_node,
299 "ranges",
300 (char *) isa_br->isa_ranges,
301 sizeof(isa_br->isa_ranges));
302 if (prop_len <= 0)
303 isa_br->num_isa_ranges = 0;
304 else
305 isa_br->num_isa_ranges =
306 (prop_len / sizeof(struct linux_prom_isa_ranges));
307
308 prop_len = prom_getproperty(isa_br->prom_node,
309 "interrupt-map",
310 (char *) isa_br->isa_intmap,
311 sizeof(isa_br->isa_intmap));
312 if (prop_len <= 0)
313 isa_br->num_isa_intmap = 0;
314 else
315 isa_br->num_isa_intmap =
316 (prop_len / sizeof(struct linux_prom_isa_intmap));
317
318 prop_len = prom_getproperty(isa_br->prom_node,
319 "interrupt-map-mask",
320 (char *) &(isa_br->isa_intmask),
321 sizeof(isa_br->isa_intmask));
322
323 printk("isa%d:", isa_br->index);
324
325 isa_fill_devices(isa_br);
326
327 printk("\n");
328 }
329}
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S
new file mode 100644
index 000000000000..b5e32dfa4fbc
--- /dev/null
+++ b/arch/sparc64/kernel/itlb_base.S
@@ -0,0 +1,83 @@
1/* $Id: itlb_base.S,v 1.12 2002/02/09 19:49:30 davem Exp $
2 * itlb_base.S: Front end to ITLB miss replacement strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#if PAGE_SHIFT == 13
10/*
11 * To compute vpte offset, we need to do ((addr >> 13) << 3),
12 * which can be optimized to (addr >> 10) if bits 10/11/12 can
13 * be guaranteed to be 0 ... mmu_context.h does guarantee this
14 * by only using 10 bits in the hwcontext value.
15 */
16#define CREATE_VPTE_OFFSET1(r1, r2) \
17 srax r1, 10, r2
18#define CREATE_VPTE_OFFSET2(r1, r2)
19#define CREATE_VPTE_NOP nop
20#else /* PAGE_SHIFT */
21#define CREATE_VPTE_OFFSET1(r1, r2) \
22 srax r1, PAGE_SHIFT, r2
23#define CREATE_VPTE_OFFSET2(r1, r2) \
24 sllx r2, 3, r2
25#define CREATE_VPTE_NOP
26#endif /* PAGE_SHIFT */
27
28
29/* Ways we can get here:
30 *
31 * 1) Nucleus instruction misses from module code.
32 * 2) All user instruction misses.
33 *
34 * All real page faults merge their code paths to the
35 * sparc64_realfault_common label below.
36 */
37
38/* ITLB ** ICACHE line 1: Quick user TLB misses */
39 ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS
40 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
41 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
42 ldxa [%g3 + %g6] ASI_P, %g5 ! Load VPTE
431: brgez,pn %g5, 3f ! Not valid, branch out
44 sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot
45 andcc %g5, %g4, %g0 ! Executable?
46 be,pn %xcc, 3f ! Nope, branch.
47 nop ! Delay-slot
482: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
49 retry ! Trap return
503: rdpr %pstate, %g4 ! Move into alternate globals
51
52/* ITLB ** ICACHE line 2: Real faults */
53 wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate
54 rdpr %tpc, %g5 ! And load faulting VA
55 mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB
56sparc64_realfault_common: ! Called by TL0 dtlb_miss too
57 stb %g4, [%g6 + TI_FAULT_CODE]
58 stx %g5, [%g6 + TI_FAULT_ADDR]
59 ba,pt %xcc, etrap ! Save state
601: rd %pc, %g7 ! ...
61 nop
62
63/* ITLB ** ICACHE line 3: Finish faults + window fixups */
64 call do_sparc64_fault ! Call fault handler
65 add %sp, PTREGS_OFF, %o0! Compute pt_regs arg
66 ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
67 nop
68winfix_trampoline:
69 rdpr %tpc, %g3 ! Prepare winfixup TNPC
70 or %g3, 0x7c, %g3 ! Compute offset to branch
71 wrpr %g3, %tnpc ! Write it into TNPC
72 done ! Do it to it
73
74/* ITLB ** ICACHE line 4: Unused... */
75 nop
76 nop
77 nop
78 nop
79 CREATE_VPTE_NOP
80
81#undef CREATE_VPTE_OFFSET1
82#undef CREATE_VPTE_OFFSET2
83#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
new file mode 100644
index 000000000000..7066d7ba667a
--- /dev/null
+++ b/arch/sparc64/kernel/kprobes.c
@@ -0,0 +1,394 @@
1/* arch/sparc64/kernel/kprobes.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/config.h>
7#include <linux/kernel.h>
8#include <linux/kprobes.h>
9
10#include <asm/kdebug.h>
11#include <asm/signal.h>
12
13/* We do not have hardware single-stepping on sparc64.
14 * So we implement software single-stepping with breakpoint
15 * traps. The top-level scheme is similar to that used
16 * in the x86 kprobes implementation.
17 *
18 * In the kprobe->ainsn.insn[] array we store the original
19 * instruction at index zero and a break instruction at
20 * index one.
21 *
22 * When we hit a kprobe we:
23 * - Run the pre-handler
24 * - Remember "regs->tnpc" and interrupt level stored in
25 * "regs->tstate" so we can restore them later
26 * - Disable PIL interrupts
27 * - Set regs->tpc to point to kprobe->ainsn.insn[0]
28 * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
29 * - Mark that we are actively in a kprobe
30 *
31 * At this point we wait for the second breakpoint at
32 * kprobe->ainsn.insn[1] to hit. When it does we:
33 * - Run the post-handler
34 * - Set regs->tpc to "remembered" regs->tnpc stored above,
35 * restore the PIL interrupt level in "regs->tstate" as well
36 * - Make any adjustments necessary to regs->tnpc in order
37 * to handle relative branches correctly. See below.
38 * - Mark that we are no longer actively in a kprobe.
39 */
40
41int arch_prepare_kprobe(struct kprobe *p)
42{
43 return 0;
44}
45
46void arch_copy_kprobe(struct kprobe *p)
47{
48 p->ainsn.insn[0] = *p->addr;
49 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
50}
51
52void arch_remove_kprobe(struct kprobe *p)
53{
54}
55
56/* kprobe_status settings */
57#define KPROBE_HIT_ACTIVE 0x00000001
58#define KPROBE_HIT_SS 0x00000002
59
60static struct kprobe *current_kprobe;
61static unsigned long current_kprobe_orig_tnpc;
62static unsigned long current_kprobe_orig_tstate_pil;
63static unsigned int kprobe_status;
64
65static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
66{
67 current_kprobe_orig_tnpc = regs->tnpc;
68 current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
69 regs->tstate |= TSTATE_PIL;
70
71 /*single step inline, if it a breakpoint instruction*/
72 if (p->opcode == BREAKPOINT_INSTRUCTION) {
73 regs->tpc = (unsigned long) p->addr;
74 regs->tnpc = current_kprobe_orig_tnpc;
75 } else {
76 regs->tpc = (unsigned long) &p->ainsn.insn[0];
77 regs->tnpc = (unsigned long) &p->ainsn.insn[1];
78 }
79}
80
81static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
82{
83 *p->addr = p->opcode;
84 flushi(p->addr);
85
86 regs->tpc = (unsigned long) p->addr;
87 regs->tnpc = current_kprobe_orig_tnpc;
88 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
89 current_kprobe_orig_tstate_pil);
90}
91
92static int kprobe_handler(struct pt_regs *regs)
93{
94 struct kprobe *p;
95 void *addr = (void *) regs->tpc;
96 int ret = 0;
97
98 preempt_disable();
99
100 if (kprobe_running()) {
101 /* We *are* holding lock here, so this is safe.
102 * Disarm the probe we just hit, and ignore it.
103 */
104 p = get_kprobe(addr);
105 if (p) {
106 if (kprobe_status == KPROBE_HIT_SS) {
107 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
108 current_kprobe_orig_tstate_pil);
109 unlock_kprobes();
110 goto no_kprobe;
111 }
112 disarm_kprobe(p, regs);
113 ret = 1;
114 } else {
115 p = current_kprobe;
116 if (p->break_handler && p->break_handler(p, regs))
117 goto ss_probe;
118 }
119 /* If it's not ours, can't be delete race, (we hold lock). */
120 goto no_kprobe;
121 }
122
123 lock_kprobes();
124 p = get_kprobe(addr);
125 if (!p) {
126 unlock_kprobes();
127 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
128 /*
129 * The breakpoint instruction was removed right
130 * after we hit it. Another cpu has removed
131 * either a probepoint or a debugger breakpoint
132 * at this address. In either case, no further
133 * handling of this interrupt is appropriate.
134 */
135 ret = 1;
136 }
137 /* Not one of ours: let kernel handle it */
138 goto no_kprobe;
139 }
140
141 kprobe_status = KPROBE_HIT_ACTIVE;
142 current_kprobe = p;
143 if (p->pre_handler && p->pre_handler(p, regs))
144 return 1;
145
146ss_probe:
147 prepare_singlestep(p, regs);
148 kprobe_status = KPROBE_HIT_SS;
149 return 1;
150
151no_kprobe:
152 preempt_enable_no_resched();
153 return ret;
154}
155
156/* If INSN is a relative control transfer instruction,
157 * return the corrected branch destination value.
158 *
159 * The original INSN location was REAL_PC, it actually
160 * executed at PC and produced destination address NPC.
161 */
162static unsigned long relbranch_fixup(u32 insn, unsigned long real_pc,
163 unsigned long pc, unsigned long npc)
164{
165 /* Branch not taken, no mods necessary. */
166 if (npc == pc + 0x4UL)
167 return real_pc + 0x4UL;
168
169 /* The three cases are call, branch w/prediction,
170 * and traditional branch.
171 */
172 if ((insn & 0xc0000000) == 0x40000000 ||
173 (insn & 0xc1c00000) == 0x00400000 ||
174 (insn & 0xc1c00000) == 0x00800000) {
175 /* The instruction did all the work for us
176 * already, just apply the offset to the correct
177 * instruction location.
178 */
179 return (real_pc + (npc - pc));
180 }
181
182 return real_pc + 0x4UL;
183}
184
185/* If INSN is an instruction which writes it's PC location
186 * into a destination register, fix that up.
187 */
188static void retpc_fixup(struct pt_regs *regs, u32 insn, unsigned long real_pc)
189{
190 unsigned long *slot = NULL;
191
192 /* Simplest cast is call, which always uses %o7 */
193 if ((insn & 0xc0000000) == 0x40000000) {
194 slot = &regs->u_regs[UREG_I7];
195 }
196
197 /* Jmpl encodes the register inside of the opcode */
198 if ((insn & 0xc1f80000) == 0x81c00000) {
199 unsigned long rd = ((insn >> 25) & 0x1f);
200
201 if (rd <= 15) {
202 slot = &regs->u_regs[rd];
203 } else {
204 /* Hard case, it goes onto the stack. */
205 flushw_all();
206
207 rd -= 16;
208 slot = (unsigned long *)
209 (regs->u_regs[UREG_FP] + STACK_BIAS);
210 slot += rd;
211 }
212 }
213 if (slot != NULL)
214 *slot = real_pc;
215}
216
217/*
218 * Called after single-stepping. p->addr is the address of the
219 * instruction whose first byte has been replaced by the breakpoint
220 * instruction. To avoid the SMP problems that can occur when we
221 * temporarily put back the original opcode to single-step, we
222 * single-stepped a copy of the instruction. The address of this
223 * copy is p->ainsn.insn.
224 *
225 * This function prepares to return from the post-single-step
226 * breakpoint trap.
227 */
228static void resume_execution(struct kprobe *p, struct pt_regs *regs)
229{
230 u32 insn = p->ainsn.insn[0];
231
232 regs->tpc = current_kprobe_orig_tnpc;
233 regs->tnpc = relbranch_fixup(insn,
234 (unsigned long) p->addr,
235 (unsigned long) &p->ainsn.insn[0],
236 regs->tnpc);
237 retpc_fixup(regs, insn, (unsigned long) p->addr);
238
239 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
240 current_kprobe_orig_tstate_pil);
241}
242
243static inline int post_kprobe_handler(struct pt_regs *regs)
244{
245 if (!kprobe_running())
246 return 0;
247
248 if (current_kprobe->post_handler)
249 current_kprobe->post_handler(current_kprobe, regs, 0);
250
251 resume_execution(current_kprobe, regs);
252
253 unlock_kprobes();
254 preempt_enable_no_resched();
255
256 return 1;
257}
258
259/* Interrupts disabled, kprobe_lock held. */
260static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
261{
262 if (current_kprobe->fault_handler
263 && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
264 return 1;
265
266 if (kprobe_status & KPROBE_HIT_SS) {
267 resume_execution(current_kprobe, regs);
268
269 unlock_kprobes();
270 preempt_enable_no_resched();
271 }
272 return 0;
273}
274
275/*
276 * Wrapper routine to for handling exceptions.
277 */
278int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
279 void *data)
280{
281 struct die_args *args = (struct die_args *)data;
282 switch (val) {
283 case DIE_DEBUG:
284 if (kprobe_handler(args->regs))
285 return NOTIFY_STOP;
286 break;
287 case DIE_DEBUG_2:
288 if (post_kprobe_handler(args->regs))
289 return NOTIFY_STOP;
290 break;
291 case DIE_GPF:
292 if (kprobe_running() &&
293 kprobe_fault_handler(args->regs, args->trapnr))
294 return NOTIFY_STOP;
295 break;
296 case DIE_PAGE_FAULT:
297 if (kprobe_running() &&
298 kprobe_fault_handler(args->regs, args->trapnr))
299 return NOTIFY_STOP;
300 break;
301 default:
302 break;
303 }
304 return NOTIFY_DONE;
305}
306
307asmlinkage void kprobe_trap(unsigned long trap_level, struct pt_regs *regs)
308{
309 BUG_ON(trap_level != 0x170 && trap_level != 0x171);
310
311 if (user_mode(regs)) {
312 local_irq_enable();
313 bad_trap(regs, trap_level);
314 return;
315 }
316
317 /* trap_level == 0x170 --> ta 0x70
318 * trap_level == 0x171 --> ta 0x71
319 */
320 if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
321 (trap_level == 0x170) ? "debug" : "debug_2",
322 regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
323 bad_trap(regs, trap_level);
324}
325
326/* Jprobes support. */
327static struct pt_regs jprobe_saved_regs;
328static struct pt_regs *jprobe_saved_regs_location;
329static struct sparc_stackf jprobe_saved_stack;
330
331int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
332{
333 struct jprobe *jp = container_of(p, struct jprobe, kp);
334
335 jprobe_saved_regs_location = regs;
336 memcpy(&jprobe_saved_regs, regs, sizeof(*regs));
337
338 /* Save a whole stack frame, this gets arguments
339 * pushed onto the stack after using up all the
340 * arg registers.
341 */
342 memcpy(&jprobe_saved_stack,
343 (char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
344 sizeof(jprobe_saved_stack));
345
346 regs->tpc = (unsigned long) jp->entry;
347 regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
348 regs->tstate |= TSTATE_PIL;
349
350 return 1;
351}
352
353void jprobe_return(void)
354{
355 preempt_enable_no_resched();
356 __asm__ __volatile__(
357 ".globl jprobe_return_trap_instruction\n"
358"jprobe_return_trap_instruction:\n\t"
359 "ta 0x70");
360}
361
362extern void jprobe_return_trap_instruction(void);
363
364extern void __show_regs(struct pt_regs * regs);
365
366int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
367{
368 u32 *addr = (u32 *) regs->tpc;
369
370 if (addr == (u32 *) jprobe_return_trap_instruction) {
371 if (jprobe_saved_regs_location != regs) {
372 printk("JPROBE: Current regs (%p) does not match "
373 "saved regs (%p).\n",
374 regs, jprobe_saved_regs_location);
375 printk("JPROBE: Saved registers\n");
376 __show_regs(jprobe_saved_regs_location);
377 printk("JPROBE: Current registers\n");
378 __show_regs(regs);
379 BUG();
380 }
381 /* Restore old register state. Do pt_regs
382 * first so that UREG_FP is the original one for
383 * the stack frame restore.
384 */
385 memcpy(regs, &jprobe_saved_regs, sizeof(*regs));
386
387 memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
388 &jprobe_saved_stack,
389 sizeof(jprobe_saved_stack));
390
391 return 1;
392 }
393 return 0;
394}
diff --git a/arch/sparc64/kernel/module.c b/arch/sparc64/kernel/module.c
new file mode 100644
index 000000000000..6c83e372f75d
--- /dev/null
+++ b/arch/sparc64/kernel/module.c
@@ -0,0 +1,209 @@
1/* Kernel module help for sparc64.
2 *
3 * Copyright (C) 2001 Rusty Russell.
4 * Copyright (C) 2002 David S. Miller.
5 */
6
7#include <linux/moduleloader.h>
8#include <linux/kernel.h>
9#include <linux/elf.h>
10#include <linux/vmalloc.h>
11#include <linux/fs.h>
12#include <linux/string.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mm.h>
16
17#include <asm/processor.h>
18#include <asm/spitfire.h>
19
20static void *module_map(unsigned long size)
21{
22 struct vm_struct *area;
23
24 size = PAGE_ALIGN(size);
25 if (!size || size > MODULES_LEN)
26 return NULL;
27
28 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
29 if (!area)
30 return NULL;
31
32 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
33}
34
35void *module_alloc(unsigned long size)
36{
37 void *ret;
38
39 /* We handle the zero case fine, unlike vmalloc */
40 if (size == 0)
41 return NULL;
42
43 ret = module_map(size);
44 if (!ret)
45 ret = ERR_PTR(-ENOMEM);
46 else
47 memset(ret, 0, size);
48
49 return ret;
50}
51
52/* Free memory returned from module_core_alloc/module_init_alloc */
53void module_free(struct module *mod, void *module_region)
54{
55 vfree(module_region);
56 /* FIXME: If module_region == mod->init_region, trim exception
57 table entries. */
58}
59
60/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
61int module_frob_arch_sections(Elf_Ehdr *hdr,
62 Elf_Shdr *sechdrs,
63 char *secstrings,
64 struct module *mod)
65{
66 unsigned int symidx;
67 Elf64_Sym *sym;
68 const char *strtab;
69 int i;
70
71 for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) {
72 if (symidx == hdr->e_shnum-1) {
73 printk("%s: no symtab found.\n", mod->name);
74 return -ENOEXEC;
75 }
76 }
77 sym = (Elf64_Sym *)sechdrs[symidx].sh_addr;
78 strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
79
80 for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
81 if (sym[i].st_shndx == SHN_UNDEF &&
82 ELF64_ST_TYPE(sym[i].st_info) == STT_REGISTER)
83 sym[i].st_shndx = SHN_ABS;
84 }
85 return 0;
86}
87
88int apply_relocate(Elf64_Shdr *sechdrs,
89 const char *strtab,
90 unsigned int symindex,
91 unsigned int relsec,
92 struct module *me)
93{
94 printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n",
95 me->name);
96 return -ENOEXEC;
97}
98
99int apply_relocate_add(Elf64_Shdr *sechdrs,
100 const char *strtab,
101 unsigned int symindex,
102 unsigned int relsec,
103 struct module *me)
104{
105 unsigned int i;
106 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
107 Elf64_Sym *sym;
108 u8 *location;
109 u32 *loc32;
110
111 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
112 Elf64_Addr v;
113
114 /* This is where to make the change */
115 location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
116 + rel[i].r_offset;
117 loc32 = (u32 *) location;
118
119 BUG_ON(((u64)location >> (u64)32) != (u64)0);
120
121 /* This is the symbol it is referring to. Note that all
122 undefined symbols have been resolved. */
123 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
124 + ELF64_R_SYM(rel[i].r_info);
125 v = sym->st_value + rel[i].r_addend;
126
127 switch (ELF64_R_TYPE(rel[i].r_info) & 0xff) {
128 case R_SPARC_64:
129 location[0] = v >> 56;
130 location[1] = v >> 48;
131 location[2] = v >> 40;
132 location[3] = v >> 32;
133 location[4] = v >> 24;
134 location[5] = v >> 16;
135 location[6] = v >> 8;
136 location[7] = v >> 0;
137 break;
138
139 case R_SPARC_32:
140 location[0] = v >> 24;
141 location[1] = v >> 16;
142 location[2] = v >> 8;
143 location[3] = v >> 0;
144 break;
145
146 case R_SPARC_WDISP30:
147 v -= (Elf64_Addr) location;
148 *loc32 = (*loc32 & ~0x3fffffff) |
149 ((v >> 2) & 0x3fffffff);
150 break;
151
152 case R_SPARC_WDISP22:
153 v -= (Elf64_Addr) location;
154 *loc32 = (*loc32 & ~0x3fffff) |
155 ((v >> 2) & 0x3fffff);
156 break;
157
158 case R_SPARC_WDISP19:
159 v -= (Elf64_Addr) location;
160 *loc32 = (*loc32 & ~0x7ffff) |
161 ((v >> 2) & 0x7ffff);
162 break;
163
164 case R_SPARC_LO10:
165 *loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff);
166 break;
167
168 case R_SPARC_HI22:
169 *loc32 = (*loc32 & ~0x3fffff) |
170 ((v >> 10) & 0x3fffff);
171 break;
172
173 case R_SPARC_OLO10:
174 *loc32 = (*loc32 & ~0x1fff) |
175 (((v & 0x3ff) +
176 (ELF64_R_TYPE(rel[i].r_info) >> 8))
177 & 0x1fff);
178 break;
179
180 default:
181 printk(KERN_ERR "module %s: Unknown relocation: %x\n",
182 me->name,
183 (int) (ELF64_R_TYPE(rel[i].r_info) & 0xff));
184 return -ENOEXEC;
185 };
186 }
187 return 0;
188}
189
190int module_finalize(const Elf_Ehdr *hdr,
191 const Elf_Shdr *sechdrs,
192 struct module *me)
193{
194 /* Cheetah's I-cache is fully coherent. */
195 if (tlb_type == spitfire) {
196 unsigned long va;
197
198 flushw_all();
199 for (va = 0; va < (PAGE_SIZE << 1); va += 32)
200 spitfire_put_icache_tag(va, 0x0);
201 __asm__ __volatile__("flush %g6");
202 }
203
204 return 0;
205}
206
207void module_arch_cleanup(struct module *mod)
208{
209}
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
new file mode 100644
index 000000000000..bba140d98b1b
--- /dev/null
+++ b/arch/sparc64/kernel/pci.c
@@ -0,0 +1,805 @@
1/* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $
2 * pci.c: UltraSparc PCI controller support.
3 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/sched.h>
14#include <linux/capability.h>
15#include <linux/errno.h>
16#include <linux/smp_lock.h>
17#include <linux/init.h>
18
19#include <asm/uaccess.h>
20#include <asm/pbm.h>
21#include <asm/pgtable.h>
22#include <asm/irq.h>
23#include <asm/ebus.h>
24#include <asm/isa.h>
25
26unsigned long pci_memspace_mask = 0xffffffffUL;
27
28#ifndef CONFIG_PCI
29/* A "nop" PCI implementation. */
30asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
31 unsigned long off, unsigned long len,
32 unsigned char *buf)
33{
34 return 0;
35}
36asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn,
37 unsigned long off, unsigned long len,
38 unsigned char *buf)
39{
40 return 0;
41}
42#else
43
44/* List of all PCI controllers found in the system. */
45struct pci_controller_info *pci_controller_root = NULL;
46
47/* Each PCI controller found gets a unique index. */
48int pci_num_controllers = 0;
49
50/* At boot time the user can give the kernel a command
51 * line option which controls if and how PCI devices
52 * are reordered at PCI bus probing time.
53 */
54int pci_device_reorder = 0;
55
56volatile int pci_poke_in_progress;
57volatile int pci_poke_cpu = -1;
58volatile int pci_poke_faulted;
59
60static DEFINE_SPINLOCK(pci_poke_lock);
61
62void pci_config_read8(u8 *addr, u8 *ret)
63{
64 unsigned long flags;
65 u8 byte;
66
67 spin_lock_irqsave(&pci_poke_lock, flags);
68 pci_poke_cpu = smp_processor_id();
69 pci_poke_in_progress = 1;
70 pci_poke_faulted = 0;
71 __asm__ __volatile__("membar #Sync\n\t"
72 "lduba [%1] %2, %0\n\t"
73 "membar #Sync"
74 : "=r" (byte)
75 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
76 : "memory");
77 pci_poke_in_progress = 0;
78 pci_poke_cpu = -1;
79 if (!pci_poke_faulted)
80 *ret = byte;
81 spin_unlock_irqrestore(&pci_poke_lock, flags);
82}
83
84void pci_config_read16(u16 *addr, u16 *ret)
85{
86 unsigned long flags;
87 u16 word;
88
89 spin_lock_irqsave(&pci_poke_lock, flags);
90 pci_poke_cpu = smp_processor_id();
91 pci_poke_in_progress = 1;
92 pci_poke_faulted = 0;
93 __asm__ __volatile__("membar #Sync\n\t"
94 "lduha [%1] %2, %0\n\t"
95 "membar #Sync"
96 : "=r" (word)
97 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
98 : "memory");
99 pci_poke_in_progress = 0;
100 pci_poke_cpu = -1;
101 if (!pci_poke_faulted)
102 *ret = word;
103 spin_unlock_irqrestore(&pci_poke_lock, flags);
104}
105
106void pci_config_read32(u32 *addr, u32 *ret)
107{
108 unsigned long flags;
109 u32 dword;
110
111 spin_lock_irqsave(&pci_poke_lock, flags);
112 pci_poke_cpu = smp_processor_id();
113 pci_poke_in_progress = 1;
114 pci_poke_faulted = 0;
115 __asm__ __volatile__("membar #Sync\n\t"
116 "lduwa [%1] %2, %0\n\t"
117 "membar #Sync"
118 : "=r" (dword)
119 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
120 : "memory");
121 pci_poke_in_progress = 0;
122 pci_poke_cpu = -1;
123 if (!pci_poke_faulted)
124 *ret = dword;
125 spin_unlock_irqrestore(&pci_poke_lock, flags);
126}
127
128void pci_config_write8(u8 *addr, u8 val)
129{
130 unsigned long flags;
131
132 spin_lock_irqsave(&pci_poke_lock, flags);
133 pci_poke_cpu = smp_processor_id();
134 pci_poke_in_progress = 1;
135 pci_poke_faulted = 0;
136 __asm__ __volatile__("membar #Sync\n\t"
137 "stba %0, [%1] %2\n\t"
138 "membar #Sync"
139 : /* no outputs */
140 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
141 : "memory");
142 pci_poke_in_progress = 0;
143 pci_poke_cpu = -1;
144 spin_unlock_irqrestore(&pci_poke_lock, flags);
145}
146
147void pci_config_write16(u16 *addr, u16 val)
148{
149 unsigned long flags;
150
151 spin_lock_irqsave(&pci_poke_lock, flags);
152 pci_poke_cpu = smp_processor_id();
153 pci_poke_in_progress = 1;
154 pci_poke_faulted = 0;
155 __asm__ __volatile__("membar #Sync\n\t"
156 "stha %0, [%1] %2\n\t"
157 "membar #Sync"
158 : /* no outputs */
159 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
160 : "memory");
161 pci_poke_in_progress = 0;
162 pci_poke_cpu = -1;
163 spin_unlock_irqrestore(&pci_poke_lock, flags);
164}
165
166void pci_config_write32(u32 *addr, u32 val)
167{
168 unsigned long flags;
169
170 spin_lock_irqsave(&pci_poke_lock, flags);
171 pci_poke_cpu = smp_processor_id();
172 pci_poke_in_progress = 1;
173 pci_poke_faulted = 0;
174 __asm__ __volatile__("membar #Sync\n\t"
175 "stwa %0, [%1] %2\n\t"
176 "membar #Sync"
177 : /* no outputs */
178 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
179 : "memory");
180 pci_poke_in_progress = 0;
181 pci_poke_cpu = -1;
182 spin_unlock_irqrestore(&pci_poke_lock, flags);
183}
184
185/* Probe for all PCI controllers in the system. */
186extern void sabre_init(int, char *);
187extern void psycho_init(int, char *);
188extern void schizo_init(int, char *);
189extern void schizo_plus_init(int, char *);
190extern void tomatillo_init(int, char *);
191
192static struct {
193 char *model_name;
194 void (*init)(int, char *);
195} pci_controller_table[] __initdata = {
196 { "SUNW,sabre", sabre_init },
197 { "pci108e,a000", sabre_init },
198 { "pci108e,a001", sabre_init },
199 { "SUNW,psycho", psycho_init },
200 { "pci108e,8000", psycho_init },
201 { "SUNW,schizo", schizo_init },
202 { "pci108e,8001", schizo_init },
203 { "SUNW,schizo+", schizo_plus_init },
204 { "pci108e,8002", schizo_plus_init },
205 { "SUNW,tomatillo", tomatillo_init },
206 { "pci108e,a801", tomatillo_init },
207};
208#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
209 sizeof(pci_controller_table[0]))
210
211static int __init pci_controller_init(char *model_name, int namelen, int node)
212{
213 int i;
214
215 for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
216 if (!strncmp(model_name,
217 pci_controller_table[i].model_name,
218 namelen)) {
219 pci_controller_table[i].init(node, model_name);
220 return 1;
221 }
222 }
223 printk("PCI: Warning unknown controller, model name [%s]\n",
224 model_name);
225 printk("PCI: Ignoring controller...\n");
226
227 return 0;
228}
229
230static int __init pci_is_controller(char *model_name, int namelen, int node)
231{
232 int i;
233
234 for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
235 if (!strncmp(model_name,
236 pci_controller_table[i].model_name,
237 namelen)) {
238 return 1;
239 }
240 }
241 return 0;
242}
243
244static int __init pci_controller_scan(int (*handler)(char *, int, int))
245{
246 char namebuf[64];
247 int node;
248 int count = 0;
249
250 node = prom_getchild(prom_root_node);
251 while ((node = prom_searchsiblings(node, "pci")) != 0) {
252 int len;
253
254 if ((len = prom_getproperty(node, "model", namebuf, sizeof(namebuf))) > 0 ||
255 (len = prom_getproperty(node, "compatible", namebuf, sizeof(namebuf))) > 0) {
256 int item_len = 0;
257
258 /* Our value may be a multi-valued string in the
259 * case of some compatible properties. For sanity,
260 * only try the first one. */
261
262 while (namebuf[item_len] && len) {
263 len--;
264 item_len++;
265 }
266
267 if (handler(namebuf, item_len, node))
268 count++;
269 }
270
271 node = prom_getsibling(node);
272 if (!node)
273 break;
274 }
275
276 return count;
277}
278
279
280/* Is there some PCI controller in the system? */
281int __init pcic_present(void)
282{
283 return pci_controller_scan(pci_is_controller);
284}
285
286/* Find each controller in the system, attach and initialize
287 * software state structure for each and link into the
288 * pci_controller_root. Setup the controller enough such
289 * that bus scanning can be done.
290 */
291static void __init pci_controller_probe(void)
292{
293 printk("PCI: Probing for controllers.\n");
294
295 pci_controller_scan(pci_controller_init);
296}
297
298static void __init pci_scan_each_controller_bus(void)
299{
300 struct pci_controller_info *p;
301
302 for (p = pci_controller_root; p; p = p->next)
303 p->scan_bus(p);
304}
305
306/* Reorder the pci_dev chain, so that onboard devices come first
307 * and then come the pluggable cards.
308 */
309static void __init pci_reorder_devs(void)
310{
311 struct list_head *pci_onboard = &pci_devices;
312 struct list_head *walk = pci_onboard->next;
313
314 while (walk != pci_onboard) {
315 struct pci_dev *pdev = pci_dev_g(walk);
316 struct list_head *walk_next = walk->next;
317
318 if (pdev->irq && (__irq_ino(pdev->irq) & 0x20)) {
319 list_del(walk);
320 list_add(walk, pci_onboard);
321 }
322
323 walk = walk_next;
324 }
325}
326
327extern void clock_probe(void);
328extern void power_init(void);
329
330static int __init pcibios_init(void)
331{
332 pci_controller_probe();
333 if (pci_controller_root == NULL)
334 return 0;
335
336 pci_scan_each_controller_bus();
337
338 if (pci_device_reorder)
339 pci_reorder_devs();
340
341 isa_init();
342 ebus_init();
343 clock_probe();
344 power_init();
345
346 return 0;
347}
348
349subsys_initcall(pcibios_init);
350
351void pcibios_fixup_bus(struct pci_bus *pbus)
352{
353 struct pci_pbm_info *pbm = pbus->sysdata;
354
355 /* Generic PCI bus probing sets these to point at
356 * &io{port,mem}_resouce which is wrong for us.
357 */
358 pbus->resource[0] = &pbm->io_space;
359 pbus->resource[1] = &pbm->mem_space;
360}
361
362int pci_claim_resource(struct pci_dev *pdev, int resource)
363{
364 struct pci_pbm_info *pbm = pdev->bus->sysdata;
365 struct resource *res = &pdev->resource[resource];
366 struct resource *root;
367
368 if (!pbm)
369 return -EINVAL;
370
371 if (res->flags & IORESOURCE_IO)
372 root = &pbm->io_space;
373 else
374 root = &pbm->mem_space;
375
376 pbm->parent->resource_adjust(pdev, res, root);
377
378 return request_resource(root, res);
379}
380
381/*
382 * Given the PCI bus a device resides on, try to
383 * find an acceptable resource allocation for a
384 * specific device resource..
385 */
386static int pci_assign_bus_resource(const struct pci_bus *bus,
387 struct pci_dev *dev,
388 struct resource *res,
389 unsigned long size,
390 unsigned long min,
391 int resno)
392{
393 unsigned int type_mask;
394 int i;
395
396 type_mask = IORESOURCE_IO | IORESOURCE_MEM;
397 for (i = 0 ; i < 4; i++) {
398 struct resource *r = bus->resource[i];
399 if (!r)
400 continue;
401
402 /* type_mask must match */
403 if ((res->flags ^ r->flags) & type_mask)
404 continue;
405
406 /* Ok, try it out.. */
407 if (allocate_resource(r, res, size, min, -1, size, NULL, NULL) < 0)
408 continue;
409
410 /* PCI config space updated by caller. */
411 return 0;
412 }
413 return -EBUSY;
414}
415
416int pci_assign_resource(struct pci_dev *pdev, int resource)
417{
418 struct pcidev_cookie *pcp = pdev->sysdata;
419 struct pci_pbm_info *pbm = pcp->pbm;
420 struct resource *res = &pdev->resource[resource];
421 unsigned long min, size;
422 int err;
423
424 if (res->flags & IORESOURCE_IO)
425 min = pbm->io_space.start + 0x400UL;
426 else
427 min = pbm->mem_space.start;
428
429 size = res->end - res->start + 1;
430
431 err = pci_assign_bus_resource(pdev->bus, pdev, res, size, min, resource);
432
433 if (err < 0) {
434 printk("PCI: Failed to allocate resource %d for %s\n",
435 resource, pci_name(pdev));
436 } else {
437 /* Update PCI config space. */
438 pbm->parent->base_address_update(pdev, resource);
439 }
440
441 return err;
442}
443
444/* Sort resources by alignment */
445void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
446{
447 int i;
448
449 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
450 struct resource *r;
451 struct resource_list *list, *tmp;
452 unsigned long r_align;
453
454 r = &dev->resource[i];
455 r_align = r->end - r->start;
456
457 if (!(r->flags) || r->parent)
458 continue;
459 if (!r_align) {
460 printk(KERN_WARNING "PCI: Ignore bogus resource %d "
461 "[%lx:%lx] of %s\n",
462 i, r->start, r->end, pci_name(dev));
463 continue;
464 }
465 r_align = (i < PCI_BRIDGE_RESOURCES) ? r_align + 1 : r->start;
466 for (list = head; ; list = list->next) {
467 unsigned long align = 0;
468 struct resource_list *ln = list->next;
469 int idx;
470
471 if (ln) {
472 idx = ln->res - &ln->dev->resource[0];
473 align = (idx < PCI_BRIDGE_RESOURCES) ?
474 ln->res->end - ln->res->start + 1 :
475 ln->res->start;
476 }
477 if (r_align > align) {
478 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
479 if (!tmp)
480 panic("pdev_sort_resources(): "
481 "kmalloc() failed!\n");
482 tmp->next = ln;
483 tmp->res = r;
484 tmp->dev = dev;
485 list->next = tmp;
486 break;
487 }
488 }
489 }
490}
491
492void pcibios_update_irq(struct pci_dev *pdev, int irq)
493{
494}
495
496void pcibios_align_resource(void *data, struct resource *res,
497 unsigned long size, unsigned long align)
498{
499}
500
501int pcibios_enable_device(struct pci_dev *pdev, int mask)
502{
503 return 0;
504}
505
506void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region,
507 struct resource *res)
508{
509 struct pci_pbm_info *pbm = pdev->bus->sysdata;
510 struct resource zero_res, *root;
511
512 zero_res.start = 0;
513 zero_res.end = 0;
514 zero_res.flags = res->flags;
515
516 if (res->flags & IORESOURCE_IO)
517 root = &pbm->io_space;
518 else
519 root = &pbm->mem_space;
520
521 pbm->parent->resource_adjust(pdev, &zero_res, root);
522
523 region->start = res->start - zero_res.start;
524 region->end = res->end - zero_res.start;
525}
526
527void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
528 struct pci_bus_region *region)
529{
530 struct pci_pbm_info *pbm = pdev->bus->sysdata;
531 struct resource *root;
532
533 res->start = region->start;
534 res->end = region->end;
535
536 if (res->flags & IORESOURCE_IO)
537 root = &pbm->io_space;
538 else
539 root = &pbm->mem_space;
540
541 pbm->parent->resource_adjust(pdev, res, root);
542}
543
544char * __init pcibios_setup(char *str)
545{
546 if (!strcmp(str, "onboardfirst")) {
547 pci_device_reorder = 1;
548 return NULL;
549 }
550 if (!strcmp(str, "noreorder")) {
551 pci_device_reorder = 0;
552 return NULL;
553 }
554 return str;
555}
556
557/* Platform support for /proc/bus/pci/X/Y mmap()s. */
558
559/* If the user uses a host-bridge as the PCI device, he may use
560 * this to perform a raw mmap() of the I/O or MEM space behind
561 * that controller.
562 *
563 * This can be useful for execution of x86 PCI bios initialization code
564 * on a PCI card, like the xfree86 int10 stuff does.
565 */
566static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
567 enum pci_mmap_state mmap_state)
568{
569 struct pcidev_cookie *pcp = pdev->sysdata;
570 struct pci_pbm_info *pbm;
571 struct pci_controller_info *p;
572 unsigned long space_size, user_offset, user_size;
573
574 if (!pcp)
575 return -ENXIO;
576 pbm = pcp->pbm;
577 if (!pbm)
578 return -ENXIO;
579
580 p = pbm->parent;
581 if (p->pbms_same_domain) {
582 unsigned long lowest, highest;
583
584 lowest = ~0UL; highest = 0UL;
585 if (mmap_state == pci_mmap_io) {
586 if (p->pbm_A.io_space.flags) {
587 lowest = p->pbm_A.io_space.start;
588 highest = p->pbm_A.io_space.end + 1;
589 }
590 if (p->pbm_B.io_space.flags) {
591 if (lowest > p->pbm_B.io_space.start)
592 lowest = p->pbm_B.io_space.start;
593 if (highest < p->pbm_B.io_space.end + 1)
594 highest = p->pbm_B.io_space.end + 1;
595 }
596 space_size = highest - lowest;
597 } else {
598 if (p->pbm_A.mem_space.flags) {
599 lowest = p->pbm_A.mem_space.start;
600 highest = p->pbm_A.mem_space.end + 1;
601 }
602 if (p->pbm_B.mem_space.flags) {
603 if (lowest > p->pbm_B.mem_space.start)
604 lowest = p->pbm_B.mem_space.start;
605 if (highest < p->pbm_B.mem_space.end + 1)
606 highest = p->pbm_B.mem_space.end + 1;
607 }
608 space_size = highest - lowest;
609 }
610 } else {
611 if (mmap_state == pci_mmap_io) {
612 space_size = (pbm->io_space.end -
613 pbm->io_space.start) + 1;
614 } else {
615 space_size = (pbm->mem_space.end -
616 pbm->mem_space.start) + 1;
617 }
618 }
619
620 /* Make sure the request is in range. */
621 user_offset = vma->vm_pgoff << PAGE_SHIFT;
622 user_size = vma->vm_end - vma->vm_start;
623
624 if (user_offset >= space_size ||
625 (user_offset + user_size) > space_size)
626 return -EINVAL;
627
628 if (p->pbms_same_domain) {
629 unsigned long lowest = ~0UL;
630
631 if (mmap_state == pci_mmap_io) {
632 if (p->pbm_A.io_space.flags)
633 lowest = p->pbm_A.io_space.start;
634 if (p->pbm_B.io_space.flags &&
635 lowest > p->pbm_B.io_space.start)
636 lowest = p->pbm_B.io_space.start;
637 } else {
638 if (p->pbm_A.mem_space.flags)
639 lowest = p->pbm_A.mem_space.start;
640 if (p->pbm_B.mem_space.flags &&
641 lowest > p->pbm_B.mem_space.start)
642 lowest = p->pbm_B.mem_space.start;
643 }
644 vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
645 } else {
646 if (mmap_state == pci_mmap_io) {
647 vma->vm_pgoff = (pbm->io_space.start +
648 user_offset) >> PAGE_SHIFT;
649 } else {
650 vma->vm_pgoff = (pbm->mem_space.start +
651 user_offset) >> PAGE_SHIFT;
652 }
653 }
654
655 return 0;
656}
657
658/* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding
659 * to the 32-bit pci bus offset for DEV requested by the user.
660 *
661 * Basically, the user finds the base address for his device which he wishes
662 * to mmap. They read the 32-bit value from the config space base register,
663 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
664 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
665 *
666 * Returns negative error code on failure, zero on success.
667 */
668static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
669 enum pci_mmap_state mmap_state)
670{
671 unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT;
672 unsigned long user32 = user_offset & pci_memspace_mask;
673 unsigned long largest_base, this_base, addr32;
674 int i;
675
676 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
677 return __pci_mmap_make_offset_bus(dev, vma, mmap_state);
678
679 /* Figure out which base address this is for. */
680 largest_base = 0UL;
681 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
682 struct resource *rp = &dev->resource[i];
683
684 /* Active? */
685 if (!rp->flags)
686 continue;
687
688 /* Same type? */
689 if (i == PCI_ROM_RESOURCE) {
690 if (mmap_state != pci_mmap_mem)
691 continue;
692 } else {
693 if ((mmap_state == pci_mmap_io &&
694 (rp->flags & IORESOURCE_IO) == 0) ||
695 (mmap_state == pci_mmap_mem &&
696 (rp->flags & IORESOURCE_MEM) == 0))
697 continue;
698 }
699
700 this_base = rp->start;
701
702 addr32 = (this_base & PAGE_MASK) & pci_memspace_mask;
703
704 if (mmap_state == pci_mmap_io)
705 addr32 &= 0xffffff;
706
707 if (addr32 <= user32 && this_base > largest_base)
708 largest_base = this_base;
709 }
710
711 if (largest_base == 0UL)
712 return -EINVAL;
713
714 /* Now construct the final physical address. */
715 if (mmap_state == pci_mmap_io)
716 vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
717 else
718 vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
719
720 return 0;
721}
722
723/* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
724 * mapping.
725 */
726static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
727 enum pci_mmap_state mmap_state)
728{
729 vma->vm_flags |= (VM_IO | VM_RESERVED);
730}
731
732/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
733 * device mapping.
734 */
735static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
736 enum pci_mmap_state mmap_state)
737{
738 /* Our io_remap_page_range/io_remap_pfn_range takes care of this,
739 do nothing. */
740}
741
742/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
743 * for this architecture. The region in the process to map is described by vm_start
744 * and vm_end members of VMA, the base physical address is found in vm_pgoff.
745 * The pci device structure is provided so that architectures may make mapping
746 * decisions on a per-device or per-bus basis.
747 *
748 * Returns a negative error code on failure, zero on success.
749 */
750int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
751 enum pci_mmap_state mmap_state,
752 int write_combine)
753{
754 int ret;
755
756 ret = __pci_mmap_make_offset(dev, vma, mmap_state);
757 if (ret < 0)
758 return ret;
759
760 __pci_mmap_set_flags(dev, vma, mmap_state);
761 __pci_mmap_set_pgprot(dev, vma, mmap_state);
762
763 ret = io_remap_pfn_range(vma, vma->vm_start,
764 vma->vm_pgoff,
765 vma->vm_end - vma->vm_start,
766 vma->vm_page_prot);
767 if (ret)
768 return ret;
769
770 vma->vm_flags |= VM_IO;
771 return 0;
772}
773
774/* Return the domain nuber for this pci bus */
775
776int pci_domain_nr(struct pci_bus *pbus)
777{
778 struct pci_pbm_info *pbm = pbus->sysdata;
779 int ret;
780
781 if (pbm == NULL || pbm->parent == NULL) {
782 ret = -ENXIO;
783 } else {
784 struct pci_controller_info *p = pbm->parent;
785
786 ret = p->index;
787 if (p->pbms_same_domain == 0)
788 ret = ((ret << 1) +
789 ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
790 }
791
792 return ret;
793}
794EXPORT_SYMBOL(pci_domain_nr);
795
796int pcibios_prep_mwi(struct pci_dev *dev)
797{
798 /* We set correct PCI_CACHE_LINE_SIZE register values for every
799 * device probed on this platform. So there is nothing to check
800 * and this always succeeds.
801 */
802 return 0;
803}
804
805#endif /* !(CONFIG_PCI) */
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
new file mode 100644
index 000000000000..58310aacea28
--- /dev/null
+++ b/arch/sparc64/kernel/pci_common.c
@@ -0,0 +1,1040 @@
1/* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $
2 * pci_common.c: PCI controller common support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/string.h>
8#include <linux/slab.h>
9#include <linux/init.h>
10
11#include <asm/pbm.h>
12
13/* Fix self device of BUS and hook it into BUS->self.
14 * The pci_scan_bus does not do this for the host bridge.
15 */
16void __init pci_fixup_host_bridge_self(struct pci_bus *pbus)
17{
18 struct pci_dev *pdev;
19
20 list_for_each_entry(pdev, &pbus->devices, bus_list) {
21 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) {
22 pbus->self = pdev;
23 return;
24 }
25 }
26
27 prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n");
28 prom_halt();
29}
30
31/* Find the OBP PROM device tree node for a PCI device.
32 * Return zero if not found.
33 */
34static int __init find_device_prom_node(struct pci_pbm_info *pbm,
35 struct pci_dev *pdev,
36 int bus_prom_node,
37 struct linux_prom_pci_registers *pregs,
38 int *nregs)
39{
40 int node;
41
42 /*
43 * Return the PBM's PROM node in case we are it's PCI device,
44 * as the PBM's reg property is different to standard PCI reg
45 * properties. We would delete this device entry otherwise,
46 * which confuses XFree86's device probing...
47 */
48 if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) &&
49 (pdev->vendor == PCI_VENDOR_ID_SUN) &&
50 (pdev->device == PCI_DEVICE_ID_SUN_PBM ||
51 pdev->device == PCI_DEVICE_ID_SUN_SCHIZO ||
52 pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO ||
53 pdev->device == PCI_DEVICE_ID_SUN_SABRE ||
54 pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) {
55 *nregs = 0;
56 return bus_prom_node;
57 }
58
59 node = prom_getchild(bus_prom_node);
60 while (node != 0) {
61 int err = prom_getproperty(node, "reg",
62 (char *)pregs,
63 sizeof(*pregs) * PROMREG_MAX);
64 if (err == 0 || err == -1)
65 goto do_next_sibling;
66 if (((pregs[0].phys_hi >> 8) & 0xff) == pdev->devfn) {
67 *nregs = err / sizeof(*pregs);
68 return node;
69 }
70
71 do_next_sibling:
72 node = prom_getsibling(node);
73 }
74 return 0;
75}
76
77/* Older versions of OBP on PCI systems encode 64-bit MEM
78 * space assignments incorrectly, this fixes them up. We also
79 * take the opportunity here to hide other kinds of bogus
80 * assignments.
81 */
82static void __init fixup_obp_assignments(struct pci_dev *pdev,
83 struct pcidev_cookie *pcp)
84{
85 int i;
86
87 if (pdev->vendor == PCI_VENDOR_ID_AL &&
88 (pdev->device == PCI_DEVICE_ID_AL_M7101 ||
89 pdev->device == PCI_DEVICE_ID_AL_M1533)) {
90 int i;
91
92 /* Zap all of the normal resources, they are
93 * meaningless and generate bogus resource collision
94 * messages. This is OpenBoot's ill-fated attempt to
95 * represent the implicit resources that these devices
96 * have.
97 */
98 pcp->num_prom_assignments = 0;
99 for (i = 0; i < 6; i++) {
100 pdev->resource[i].start =
101 pdev->resource[i].end =
102 pdev->resource[i].flags = 0;
103 }
104 pdev->resource[PCI_ROM_RESOURCE].start =
105 pdev->resource[PCI_ROM_RESOURCE].end =
106 pdev->resource[PCI_ROM_RESOURCE].flags = 0;
107 return;
108 }
109
110 for (i = 0; i < pcp->num_prom_assignments; i++) {
111 struct linux_prom_pci_registers *ap;
112 int space;
113
114 ap = &pcp->prom_assignments[i];
115 space = ap->phys_hi >> 24;
116 if ((space & 0x3) == 2 &&
117 (space & 0x4) != 0) {
118 ap->phys_hi &= ~(0x7 << 24);
119 ap->phys_hi |= 0x3 << 24;
120 }
121 }
122}
123
124/* Fill in the PCI device cookie sysdata for the given
125 * PCI device. This cookie is the means by which one
126 * can get to OBP and PCI controller specific information
127 * for a PCI device.
128 */
129static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm,
130 struct pci_dev *pdev,
131 int bus_prom_node)
132{
133 struct linux_prom_pci_registers pregs[PROMREG_MAX];
134 struct pcidev_cookie *pcp;
135 int device_prom_node, nregs, err;
136
137 device_prom_node = find_device_prom_node(pbm, pdev, bus_prom_node,
138 pregs, &nregs);
139 if (device_prom_node == 0) {
140 /* If it is not in the OBP device tree then
141 * there must be a damn good reason for it.
142 *
143 * So what we do is delete the device from the
144 * PCI device tree completely. This scenario
145 * is seen, for example, on CP1500 for the
146 * second EBUS/HappyMeal pair if the external
147 * connector for it is not present.
148 */
149 pci_remove_bus_device(pdev);
150 return;
151 }
152
153 pcp = kmalloc(sizeof(*pcp), GFP_ATOMIC);
154 if (pcp == NULL) {
155 prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n");
156 prom_halt();
157 }
158 pcp->pbm = pbm;
159 pcp->prom_node = device_prom_node;
160 memcpy(pcp->prom_regs, pregs, sizeof(pcp->prom_regs));
161 pcp->num_prom_regs = nregs;
162 err = prom_getproperty(device_prom_node, "name",
163 pcp->prom_name, sizeof(pcp->prom_name));
164 if (err > 0)
165 pcp->prom_name[err] = 0;
166 else
167 pcp->prom_name[0] = 0;
168
169 err = prom_getproperty(device_prom_node,
170 "assigned-addresses",
171 (char *)pcp->prom_assignments,
172 sizeof(pcp->prom_assignments));
173 if (err == 0 || err == -1)
174 pcp->num_prom_assignments = 0;
175 else
176 pcp->num_prom_assignments =
177 (err / sizeof(pcp->prom_assignments[0]));
178
179 if (strcmp(pcp->prom_name, "ebus") == 0) {
180 struct linux_prom_ebus_ranges erng[PROM_PCIRNG_MAX];
181 int iter;
182
183 /* EBUS is special... */
184 err = prom_getproperty(device_prom_node, "ranges",
185 (char *)&erng[0], sizeof(erng));
186 if (err == 0 || err == -1) {
187 prom_printf("EBUS: Fatal error, no range property\n");
188 prom_halt();
189 }
190 err = (err / sizeof(erng[0]));
191 for(iter = 0; iter < err; iter++) {
192 struct linux_prom_ebus_ranges *ep = &erng[iter];
193 struct linux_prom_pci_registers *ap;
194
195 ap = &pcp->prom_assignments[iter];
196
197 ap->phys_hi = ep->parent_phys_hi;
198 ap->phys_mid = ep->parent_phys_mid;
199 ap->phys_lo = ep->parent_phys_lo;
200 ap->size_hi = 0;
201 ap->size_lo = ep->size;
202 }
203 pcp->num_prom_assignments = err;
204 }
205
206 fixup_obp_assignments(pdev, pcp);
207
208 pdev->sysdata = pcp;
209}
210
211void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus,
212 struct pci_pbm_info *pbm,
213 int prom_node)
214{
215 struct pci_dev *pdev, *pdev_next;
216 struct pci_bus *this_pbus, *pbus_next;
217
218 /* This must be _safe because the cookie fillin
219 routine can delete devices from the tree. */
220 list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list)
221 pdev_cookie_fillin(pbm, pdev, prom_node);
222
223 list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) {
224 struct pcidev_cookie *pcp = this_pbus->self->sysdata;
225
226 pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node);
227 }
228}
229
230static void __init bad_assignment(struct pci_dev *pdev,
231 struct linux_prom_pci_registers *ap,
232 struct resource *res,
233 int do_prom_halt)
234{
235 prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n",
236 pdev->bus->number, pdev->devfn);
237 if (ap)
238 prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n",
239 ap->phys_hi, ap->phys_mid, ap->phys_lo,
240 ap->size_hi, ap->size_lo);
241 if (res)
242 prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n",
243 res->start, res->end, res->flags);
244 prom_printf("Please email this information to davem@redhat.com\n");
245 if (do_prom_halt)
246 prom_halt();
247}
248
249static struct resource *
250__init get_root_resource(struct linux_prom_pci_registers *ap,
251 struct pci_pbm_info *pbm)
252{
253 int space = (ap->phys_hi >> 24) & 3;
254
255 switch (space) {
256 case 0:
257 /* Configuration space, silently ignore it. */
258 return NULL;
259
260 case 1:
261 /* 16-bit IO space */
262 return &pbm->io_space;
263
264 case 2:
265 /* 32-bit MEM space */
266 return &pbm->mem_space;
267
268 case 3:
269 /* 64-bit MEM space, these are allocated out of
270 * the 32-bit mem_space range for the PBM, ie.
271 * we just zero out the upper 32-bits.
272 */
273 return &pbm->mem_space;
274
275 default:
276 printk("PCI: What is resource space %x? "
277 "Tell davem@redhat.com about it!\n", space);
278 return NULL;
279 };
280}
281
282static struct resource *
283__init get_device_resource(struct linux_prom_pci_registers *ap,
284 struct pci_dev *pdev)
285{
286 struct resource *res;
287 int breg = (ap->phys_hi & 0xff);
288
289 switch (breg) {
290 case PCI_ROM_ADDRESS:
291 /* Unfortunately I have seen several cases where
292 * buggy FCODE uses a space value of '1' (I/O space)
293 * in the register property for the ROM address
294 * so disable this sanity check for now.
295 */
296#if 0
297 {
298 int space = (ap->phys_hi >> 24) & 3;
299
300 /* It had better be MEM space. */
301 if (space != 2)
302 bad_assignment(pdev, ap, NULL, 0);
303 }
304#endif
305 res = &pdev->resource[PCI_ROM_RESOURCE];
306 break;
307
308 case PCI_BASE_ADDRESS_0:
309 case PCI_BASE_ADDRESS_1:
310 case PCI_BASE_ADDRESS_2:
311 case PCI_BASE_ADDRESS_3:
312 case PCI_BASE_ADDRESS_4:
313 case PCI_BASE_ADDRESS_5:
314 res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4];
315 break;
316
317 default:
318 bad_assignment(pdev, ap, NULL, 0);
319 res = NULL;
320 break;
321 };
322
323 return res;
324}
325
326static int __init pdev_resource_collisions_expected(struct pci_dev *pdev)
327{
328 if (pdev->vendor != PCI_VENDOR_ID_SUN)
329 return 0;
330
331 if (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS ||
332 pdev->device == PCI_DEVICE_ID_SUN_RIO_1394 ||
333 pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
334 return 1;
335
336 return 0;
337}
338
339static void __init pdev_record_assignments(struct pci_pbm_info *pbm,
340 struct pci_dev *pdev)
341{
342 struct pcidev_cookie *pcp = pdev->sysdata;
343 int i;
344
345 for (i = 0; i < pcp->num_prom_assignments; i++) {
346 struct linux_prom_pci_registers *ap;
347 struct resource *root, *res;
348
349 /* The format of this property is specified in
350 * the PCI Bus Binding to IEEE1275-1994.
351 */
352 ap = &pcp->prom_assignments[i];
353 root = get_root_resource(ap, pbm);
354 res = get_device_resource(ap, pdev);
355 if (root == NULL || res == NULL ||
356 res->flags == 0)
357 continue;
358
359 /* Ok we know which resource this PROM assignment is
360 * for, sanity check it.
361 */
362 if ((res->start & 0xffffffffUL) != ap->phys_lo)
363 bad_assignment(pdev, ap, res, 1);
364
365 /* If it is a 64-bit MEM space assignment, verify that
366 * the resource is too and that the upper 32-bits match.
367 */
368 if (((ap->phys_hi >> 24) & 3) == 3) {
369 if (((res->flags & IORESOURCE_MEM) == 0) ||
370 ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
371 != PCI_BASE_ADDRESS_MEM_TYPE_64))
372 bad_assignment(pdev, ap, res, 1);
373 if ((res->start >> 32) != ap->phys_mid)
374 bad_assignment(pdev, ap, res, 1);
375
376 /* PBM cannot generate cpu initiated PIOs
377 * to the full 64-bit space. Therefore the
378 * upper 32-bits better be zero. If it is
379 * not, just skip it and we will assign it
380 * properly ourselves.
381 */
382 if ((res->start >> 32) != 0UL) {
383 printk(KERN_ERR "PCI: OBP assigns out of range MEM address "
384 "%016lx for region %ld on device %s\n",
385 res->start, (res - &pdev->resource[0]), pci_name(pdev));
386 continue;
387 }
388 }
389
390 /* Adjust the resource into the physical address space
391 * of this PBM.
392 */
393 pbm->parent->resource_adjust(pdev, res, root);
394
395 if (request_resource(root, res) < 0) {
396 /* OK, there is some conflict. But this is fine
397 * since we'll reassign it in the fixup pass.
398 *
399 * We notify the user that OBP made an error if it
400 * is a case we don't expect.
401 */
402 if (!pdev_resource_collisions_expected(pdev)) {
403 printk(KERN_ERR "PCI: Address space collision on region %ld "
404 "[%016lx:%016lx] of device %s\n",
405 (res - &pdev->resource[0]),
406 res->start, res->end,
407 pci_name(pdev));
408 }
409 }
410 }
411}
412
413void __init pci_record_assignments(struct pci_pbm_info *pbm,
414 struct pci_bus *pbus)
415{
416 struct pci_dev *dev;
417 struct pci_bus *bus;
418
419 list_for_each_entry(dev, &pbus->devices, bus_list)
420 pdev_record_assignments(pbm, dev);
421
422 list_for_each_entry(bus, &pbus->children, node)
423 pci_record_assignments(pbm, bus);
424}
425
426/* Return non-zero if PDEV has implicit I/O resources even
427 * though it may not have an I/O base address register
428 * active.
429 */
430static int __init has_implicit_io(struct pci_dev *pdev)
431{
432 int class = pdev->class >> 8;
433
434 if (class == PCI_CLASS_NOT_DEFINED ||
435 class == PCI_CLASS_NOT_DEFINED_VGA ||
436 class == PCI_CLASS_STORAGE_IDE ||
437 (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
438 return 1;
439
440 return 0;
441}
442
443static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
444 struct pci_dev *pdev)
445{
446 u32 reg;
447 u16 cmd;
448 int i, io_seen, mem_seen;
449
450 io_seen = mem_seen = 0;
451 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
452 struct resource *root, *res;
453 unsigned long size, min, max, align;
454
455 res = &pdev->resource[i];
456
457 if (res->flags & IORESOURCE_IO)
458 io_seen++;
459 else if (res->flags & IORESOURCE_MEM)
460 mem_seen++;
461
462 /* If it is already assigned or the resource does
463 * not exist, there is nothing to do.
464 */
465 if (res->parent != NULL || res->flags == 0UL)
466 continue;
467
468 /* Determine the root we allocate from. */
469 if (res->flags & IORESOURCE_IO) {
470 root = &pbm->io_space;
471 min = root->start + 0x400UL;
472 max = root->end;
473 } else {
474 root = &pbm->mem_space;
475 min = root->start;
476 max = min + 0x80000000UL;
477 }
478
479 size = res->end - res->start;
480 align = size + 1;
481 if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) {
482 /* uh oh */
483 prom_printf("PCI: Failed to allocate resource %d for %s\n",
484 i, pci_name(pdev));
485 prom_halt();
486 }
487
488 /* Update PCI config space. */
489 pbm->parent->base_address_update(pdev, i);
490 }
491
492 /* Special case, disable the ROM. Several devices
493 * act funny (ie. do not respond to memory space writes)
494 * when it is left enabled. A good example are Qlogic,ISP
495 * adapters.
496 */
497 pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &reg);
498 reg &= ~PCI_ROM_ADDRESS_ENABLE;
499 pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg);
500
501 /* If we saw I/O or MEM resources, enable appropriate
502 * bits in PCI command register.
503 */
504 if (io_seen || mem_seen) {
505 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
506 if (io_seen || has_implicit_io(pdev))
507 cmd |= PCI_COMMAND_IO;
508 if (mem_seen)
509 cmd |= PCI_COMMAND_MEMORY;
510 pci_write_config_word(pdev, PCI_COMMAND, cmd);
511 }
512
513 /* If this is a PCI bridge or an IDE controller,
514 * enable bus mastering. In the former case also
515 * set the cache line size correctly.
516 */
517 if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) ||
518 (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) &&
519 ((pdev->class & 0x80) != 0))) {
520 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
521 cmd |= PCI_COMMAND_MASTER;
522 pci_write_config_word(pdev, PCI_COMMAND, cmd);
523
524 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
525 pci_write_config_byte(pdev,
526 PCI_CACHE_LINE_SIZE,
527 (64 / sizeof(u32)));
528 }
529}
530
531void __init pci_assign_unassigned(struct pci_pbm_info *pbm,
532 struct pci_bus *pbus)
533{
534 struct pci_dev *dev;
535 struct pci_bus *bus;
536
537 list_for_each_entry(dev, &pbus->devices, bus_list)
538 pdev_assign_unassigned(pbm, dev);
539
540 list_for_each_entry(bus, &pbus->children, node)
541 pci_assign_unassigned(pbm, bus);
542}
543
544static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt)
545{
546 struct linux_prom_pci_intmap bridge_local_intmap[PROM_PCIIMAP_MAX], *intmap;
547 struct linux_prom_pci_intmask bridge_local_intmask, *intmask;
548 struct pcidev_cookie *dev_pcp = pdev->sysdata;
549 struct pci_pbm_info *pbm = dev_pcp->pbm;
550 struct linux_prom_pci_registers *pregs = dev_pcp->prom_regs;
551 unsigned int hi, mid, lo, irq;
552 int i, num_intmap, map_slot;
553
554 intmap = &pbm->pbm_intmap[0];
555 intmask = &pbm->pbm_intmask;
556 num_intmap = pbm->num_pbm_intmap;
557 map_slot = 0;
558
559 /* If we are underneath a PCI bridge, use PROM register
560 * property of the parent bridge which is closest to
561 * the PBM.
562 *
563 * However if that parent bridge has interrupt map/mask
564 * properties of its own we use the PROM register property
565 * of the next child device on the path to PDEV.
566 *
567 * In detail the two cases are (note that the 'X' below is the
568 * 'next child on the path to PDEV' mentioned above):
569 *
570 * 1) PBM --> PCI bus lacking int{map,mask} --> X ... PDEV
571 *
572 * Here we use regs of 'PCI bus' device.
573 *
574 * 2) PBM --> PCI bus with int{map,mask} --> X ... PDEV
575 *
576 * Here we use regs of 'X'. Note that X can be PDEV.
577 */
578 if (pdev->bus->number != pbm->pci_first_busno) {
579 struct pcidev_cookie *bus_pcp, *regs_pcp;
580 struct pci_dev *bus_dev, *regs_dev;
581 int plen;
582
583 bus_dev = pdev->bus->self;
584 regs_dev = pdev;
585
586 while (bus_dev->bus &&
587 bus_dev->bus->number != pbm->pci_first_busno) {
588 regs_dev = bus_dev;
589 bus_dev = bus_dev->bus->self;
590 }
591
592 regs_pcp = regs_dev->sysdata;
593 pregs = regs_pcp->prom_regs;
594
595 bus_pcp = bus_dev->sysdata;
596
597 /* But if the PCI bridge has it's own interrupt map
598 * and mask properties, use that and the regs of the
599 * PCI entity at the next level down on the path to the
600 * device.
601 */
602 plen = prom_getproperty(bus_pcp->prom_node, "interrupt-map",
603 (char *) &bridge_local_intmap[0],
604 sizeof(bridge_local_intmap));
605 if (plen != -1) {
606 intmap = &bridge_local_intmap[0];
607 num_intmap = plen / sizeof(struct linux_prom_pci_intmap);
608 plen = prom_getproperty(bus_pcp->prom_node,
609 "interrupt-map-mask",
610 (char *) &bridge_local_intmask,
611 sizeof(bridge_local_intmask));
612 if (plen == -1) {
613 printk("pci_intmap_match: Warning! Bridge has intmap "
614 "but no intmask.\n");
615 printk("pci_intmap_match: Trying to recover.\n");
616 return 0;
617 }
618
619 if (pdev->bus->self != bus_dev)
620 map_slot = 1;
621 } else {
622 pregs = bus_pcp->prom_regs;
623 map_slot = 1;
624 }
625 }
626
627 if (map_slot) {
628 *interrupt = ((*interrupt
629 - 1
630 + PCI_SLOT(pdev->devfn)) & 0x3) + 1;
631 }
632
633 hi = pregs->phys_hi & intmask->phys_hi;
634 mid = pregs->phys_mid & intmask->phys_mid;
635 lo = pregs->phys_lo & intmask->phys_lo;
636 irq = *interrupt & intmask->interrupt;
637
638 for (i = 0; i < num_intmap; i++) {
639 if (intmap[i].phys_hi == hi &&
640 intmap[i].phys_mid == mid &&
641 intmap[i].phys_lo == lo &&
642 intmap[i].interrupt == irq) {
643 *interrupt = intmap[i].cinterrupt;
644 printk("PCI-IRQ: Routing bus[%2x] slot[%2x] map[%d] to INO[%02x]\n",
645 pdev->bus->number, PCI_SLOT(pdev->devfn),
646 map_slot, *interrupt);
647 return 1;
648 }
649 }
650
651 /* We will run this code even if pbm->num_pbm_intmap is zero, just so
652 * we can apply the slot mapping to the PROM interrupt property value.
653 * So do not spit out these warnings in that case.
654 */
655 if (num_intmap != 0) {
656 /* Print it both to OBP console and kernel one so that if bootup
657 * hangs here the user has the information to report.
658 */
659 prom_printf("pci_intmap_match: bus %02x, devfn %02x: ",
660 pdev->bus->number, pdev->devfn);
661 prom_printf("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n",
662 pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
663 prom_printf("Please email this information to davem@redhat.com\n");
664
665 printk("pci_intmap_match: bus %02x, devfn %02x: ",
666 pdev->bus->number, pdev->devfn);
667 printk("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n",
668 pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
669 printk("Please email this information to davem@redhat.com\n");
670 }
671
672 return 0;
673}
674
675static void __init pdev_fixup_irq(struct pci_dev *pdev)
676{
677 struct pcidev_cookie *pcp = pdev->sysdata;
678 struct pci_pbm_info *pbm = pcp->pbm;
679 struct pci_controller_info *p = pbm->parent;
680 unsigned int portid = pbm->portid;
681 unsigned int prom_irq;
682 int prom_node = pcp->prom_node;
683 int err;
684
685 /* If this is an empty EBUS device, sometimes OBP fails to
686 * give it a valid fully specified interrupts property.
687 * The EBUS hooked up to SunHME on PCI I/O boards of
688 * Ex000 systems is one such case.
689 *
690 * The interrupt is not important so just ignore it.
691 */
692 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
693 pdev->device == PCI_DEVICE_ID_SUN_EBUS &&
694 !prom_getchild(prom_node)) {
695 pdev->irq = 0;
696 return;
697 }
698
699 err = prom_getproperty(prom_node, "interrupts",
700 (char *)&prom_irq, sizeof(prom_irq));
701 if (err == 0 || err == -1) {
702 pdev->irq = 0;
703 return;
704 }
705
706 /* Fully specified already? */
707 if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) {
708 pdev->irq = p->irq_build(pbm, pdev, prom_irq);
709 goto have_irq;
710 }
711
712 /* An onboard device? (bit 5 set) */
713 if ((prom_irq & PCI_IRQ_INO) & 0x20) {
714 pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq));
715 goto have_irq;
716 }
717
718 /* Can we find a matching entry in the interrupt-map? */
719 if (pci_intmap_match(pdev, &prom_irq)) {
720 pdev->irq = p->irq_build(pbm, pdev, (portid << 6) | prom_irq);
721 goto have_irq;
722 }
723
724 /* Ok, we have to do it the hard way. */
725 {
726 unsigned int bus, slot, line;
727
728 bus = (pbm == &pbm->parent->pbm_B) ? (1 << 4) : 0;
729
730 /* If we have a legal interrupt property, use it as
731 * the IRQ line.
732 */
733 if (prom_irq > 0 && prom_irq < 5) {
734 line = ((prom_irq - 1) & 3);
735 } else {
736 u8 pci_irq_line;
737
738 /* Else just directly consult PCI config space. */
739 pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pci_irq_line);
740 line = ((pci_irq_line - 1) & 3);
741 }
742
743 /* Now figure out the slot.
744 *
745 * Basically, device number zero on the top-level bus is
746 * always the PCI host controller. Slot 0 is then device 1.
747 * PBM A supports two external slots (0 and 1), and PBM B
748 * supports 4 external slots (0, 1, 2, and 3). On-board PCI
749 * devices are wired to device numbers outside of these
750 * ranges. -DaveM
751 */
752 if (pdev->bus->number == pbm->pci_first_busno) {
753 slot = PCI_SLOT(pdev->devfn) - pbm->pci_first_slot;
754 } else {
755 struct pci_dev *bus_dev;
756
757 /* Underneath a bridge, use slot number of parent
758 * bridge which is closest to the PBM.
759 */
760 bus_dev = pdev->bus->self;
761 while (bus_dev->bus &&
762 bus_dev->bus->number != pbm->pci_first_busno)
763 bus_dev = bus_dev->bus->self;
764
765 slot = PCI_SLOT(bus_dev->devfn) - pbm->pci_first_slot;
766 }
767 slot = slot << 2;
768
769 pdev->irq = p->irq_build(pbm, pdev,
770 ((portid << 6) & PCI_IRQ_IGN) |
771 (bus | slot | line));
772 }
773
774have_irq:
775 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE,
776 pdev->irq & PCI_IRQ_INO);
777}
778
779void __init pci_fixup_irq(struct pci_pbm_info *pbm,
780 struct pci_bus *pbus)
781{
782 struct pci_dev *dev;
783 struct pci_bus *bus;
784
785 list_for_each_entry(dev, &pbus->devices, bus_list)
786 pdev_fixup_irq(dev);
787
788 list_for_each_entry(bus, &pbus->children, node)
789 pci_fixup_irq(pbm, bus);
790}
791
792static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz)
793{
794 u16 cmd;
795 u8 hdr_type, min_gnt, ltimer;
796
797 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
798 cmd |= PCI_COMMAND_MASTER;
799 pci_write_config_word(pdev, PCI_COMMAND, cmd);
800
801 /* Read it back, if the mastering bit did not
802 * get set, the device does not support bus
803 * mastering so we have nothing to do here.
804 */
805 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
806 if ((cmd & PCI_COMMAND_MASTER) == 0)
807 return;
808
809 /* Set correct cache line size, 64-byte on all
810 * Sparc64 PCI systems. Note that the value is
811 * measured in 32-bit words.
812 */
813 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
814 64 / sizeof(u32));
815
816 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type);
817 hdr_type &= ~0x80;
818 if (hdr_type != PCI_HEADER_TYPE_NORMAL)
819 return;
820
821 /* If the latency timer is already programmed with a non-zero
822 * value, assume whoever set it (OBP or whoever) knows what
823 * they are doing.
824 */
825 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ltimer);
826 if (ltimer != 0)
827 return;
828
829 /* XXX Since I'm tipping off the min grant value to
830 * XXX choose a suitable latency timer value, I also
831 * XXX considered making use of the max latency value
832 * XXX as well. Unfortunately I've seen too many bogusly
833 * XXX low settings for it to the point where it lacks
834 * XXX any usefulness. In one case, an ethernet card
835 * XXX claimed a min grant of 10 and a max latency of 5.
836 * XXX Now, if I had two such cards on the same bus I
837 * XXX could not set the desired burst period (calculated
838 * XXX from min grant) without violating the max latency
839 * XXX bound. Duh...
840 * XXX
841 * XXX I blame dumb PC bios implementors for stuff like
842 * XXX this, most of them don't even try to do something
843 * XXX sensible with latency timer values and just set some
844 * XXX default value (usually 32) into every device.
845 */
846
847 pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt);
848
849 if (min_gnt == 0) {
850 /* If no min_gnt setting then use a default
851 * value.
852 */
853 if (is_66mhz)
854 ltimer = 16;
855 else
856 ltimer = 32;
857 } else {
858 int shift_factor;
859
860 if (is_66mhz)
861 shift_factor = 2;
862 else
863 shift_factor = 3;
864
865 /* Use a default value when the min_gnt value
866 * is erroneously high.
867 */
868 if (((unsigned int) min_gnt << shift_factor) > 512 ||
869 ((min_gnt << shift_factor) & 0xff) == 0) {
870 ltimer = 8 << shift_factor;
871 } else {
872 ltimer = min_gnt << shift_factor;
873 }
874 }
875
876 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer);
877}
878
879void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
880 struct pci_bus *pbus)
881{
882 struct pci_dev *pdev;
883 int all_are_66mhz;
884 u16 status;
885
886 if (pbm->is_66mhz_capable == 0) {
887 all_are_66mhz = 0;
888 goto out;
889 }
890
891 all_are_66mhz = 1;
892 list_for_each_entry(pdev, &pbus->devices, bus_list) {
893 pci_read_config_word(pdev, PCI_STATUS, &status);
894 if (!(status & PCI_STATUS_66MHZ)) {
895 all_are_66mhz = 0;
896 break;
897 }
898 }
899out:
900 pbm->all_devs_66mhz = all_are_66mhz;
901
902 printk("PCI%d(PBM%c): Bus running at %dMHz\n",
903 pbm->parent->index,
904 (pbm == &pbm->parent->pbm_A) ? 'A' : 'B',
905 (all_are_66mhz ? 66 : 33));
906}
907
908void pci_setup_busmastering(struct pci_pbm_info *pbm,
909 struct pci_bus *pbus)
910{
911 struct pci_dev *dev;
912 struct pci_bus *bus;
913 int is_66mhz;
914
915 is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz;
916
917 list_for_each_entry(dev, &pbus->devices, bus_list)
918 pdev_setup_busmastering(dev, is_66mhz);
919
920 list_for_each_entry(bus, &pbus->children, node)
921 pci_setup_busmastering(pbm, bus);
922}
923
924void pci_register_legacy_regions(struct resource *io_res,
925 struct resource *mem_res)
926{
927 struct resource *p;
928
929 /* VGA Video RAM. */
930 p = kmalloc(sizeof(*p), GFP_KERNEL);
931 if (!p)
932 return;
933
934 memset(p, 0, sizeof(*p));
935 p->name = "Video RAM area";
936 p->start = mem_res->start + 0xa0000UL;
937 p->end = p->start + 0x1ffffUL;
938 p->flags = IORESOURCE_BUSY;
939 request_resource(mem_res, p);
940
941 p = kmalloc(sizeof(*p), GFP_KERNEL);
942 if (!p)
943 return;
944
945 memset(p, 0, sizeof(*p));
946 p->name = "System ROM";
947 p->start = mem_res->start + 0xf0000UL;
948 p->end = p->start + 0xffffUL;
949 p->flags = IORESOURCE_BUSY;
950 request_resource(mem_res, p);
951
952 p = kmalloc(sizeof(*p), GFP_KERNEL);
953 if (!p)
954 return;
955
956 memset(p, 0, sizeof(*p));
957 p->name = "Video ROM";
958 p->start = mem_res->start + 0xc0000UL;
959 p->end = p->start + 0x7fffUL;
960 p->flags = IORESOURCE_BUSY;
961 request_resource(mem_res, p);
962}
963
964/* Generic helper routines for PCI error reporting. */
965void pci_scan_for_target_abort(struct pci_controller_info *p,
966 struct pci_pbm_info *pbm,
967 struct pci_bus *pbus)
968{
969 struct pci_dev *pdev;
970 struct pci_bus *bus;
971
972 list_for_each_entry(pdev, &pbus->devices, bus_list) {
973 u16 status, error_bits;
974
975 pci_read_config_word(pdev, PCI_STATUS, &status);
976 error_bits =
977 (status & (PCI_STATUS_SIG_TARGET_ABORT |
978 PCI_STATUS_REC_TARGET_ABORT));
979 if (error_bits) {
980 pci_write_config_word(pdev, PCI_STATUS, error_bits);
981 printk("PCI%d(PBM%c): Device [%s] saw Target Abort [%016x]\n",
982 p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
983 pci_name(pdev), status);
984 }
985 }
986
987 list_for_each_entry(bus, &pbus->children, node)
988 pci_scan_for_target_abort(p, pbm, bus);
989}
990
991void pci_scan_for_master_abort(struct pci_controller_info *p,
992 struct pci_pbm_info *pbm,
993 struct pci_bus *pbus)
994{
995 struct pci_dev *pdev;
996 struct pci_bus *bus;
997
998 list_for_each_entry(pdev, &pbus->devices, bus_list) {
999 u16 status, error_bits;
1000
1001 pci_read_config_word(pdev, PCI_STATUS, &status);
1002 error_bits =
1003 (status & (PCI_STATUS_REC_MASTER_ABORT));
1004 if (error_bits) {
1005 pci_write_config_word(pdev, PCI_STATUS, error_bits);
1006 printk("PCI%d(PBM%c): Device [%s] received Master Abort [%016x]\n",
1007 p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
1008 pci_name(pdev), status);
1009 }
1010 }
1011
1012 list_for_each_entry(bus, &pbus->children, node)
1013 pci_scan_for_master_abort(p, pbm, bus);
1014}
1015
1016void pci_scan_for_parity_error(struct pci_controller_info *p,
1017 struct pci_pbm_info *pbm,
1018 struct pci_bus *pbus)
1019{
1020 struct pci_dev *pdev;
1021 struct pci_bus *bus;
1022
1023 list_for_each_entry(pdev, &pbus->devices, bus_list) {
1024 u16 status, error_bits;
1025
1026 pci_read_config_word(pdev, PCI_STATUS, &status);
1027 error_bits =
1028 (status & (PCI_STATUS_PARITY |
1029 PCI_STATUS_DETECTED_PARITY));
1030 if (error_bits) {
1031 pci_write_config_word(pdev, PCI_STATUS, error_bits);
1032 printk("PCI%d(PBM%c): Device [%s] saw Parity Error [%016x]\n",
1033 p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
1034 pci_name(pdev), status);
1035 }
1036 }
1037
1038 list_for_each_entry(bus, &pbus->children, node)
1039 pci_scan_for_parity_error(p, pbm, bus);
1040}
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
new file mode 100644
index 000000000000..6c3205962544
--- /dev/null
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -0,0 +1,49 @@
1/* $Id: pci_impl.h,v 1.9 2001/06/13 06:34:30 davem Exp $
2 * pci_impl.h: Helper definitions for PCI controller support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#ifndef PCI_IMPL_H
8#define PCI_IMPL_H
9
10#include <linux/types.h>
11#include <linux/spinlock.h>
12#include <asm/io.h>
13
14extern struct pci_controller_info *pci_controller_root;
15
16extern int pci_num_controllers;
17
18/* PCI bus scanning and fixup support. */
19extern void pci_fixup_host_bridge_self(struct pci_bus *pbus);
20extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus,
21 struct pci_pbm_info *pbm,
22 int prom_node);
23extern void pci_record_assignments(struct pci_pbm_info *pbm,
24 struct pci_bus *pbus);
25extern void pci_assign_unassigned(struct pci_pbm_info *pbm,
26 struct pci_bus *pbus);
27extern void pci_fixup_irq(struct pci_pbm_info *pbm,
28 struct pci_bus *pbus);
29extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
30 struct pci_bus *pbus);
31extern void pci_setup_busmastering(struct pci_pbm_info *pbm,
32 struct pci_bus *pbus);
33extern void pci_register_legacy_regions(struct resource *io_res,
34 struct resource *mem_res);
35
36/* Error reporting support. */
37extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
38extern void pci_scan_for_master_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
39extern void pci_scan_for_parity_error(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
40
41/* Configuration space access. */
42extern void pci_config_read8(u8 *addr, u8 *ret);
43extern void pci_config_read16(u16 *addr, u16 *ret);
44extern void pci_config_read32(u32 *addr, u32 *ret);
45extern void pci_config_write8(u8 *addr, u8 val);
46extern void pci_config_write16(u16 *addr, u16 val);
47extern void pci_config_write32(u32 *addr, u32 val);
48
49#endif /* !(PCI_IMPL_H) */
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
new file mode 100644
index 000000000000..292983413ae2
--- /dev/null
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -0,0 +1,855 @@
1/* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/mm.h>
11
12#include <asm/pbm.h>
13
14#include "iommu_common.h"
15
16#define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
17 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
18
19/* Accessing IOMMU and Streaming Buffer registers.
20 * REG parameter is a physical address. All registers
21 * are 64-bits in size.
22 */
23#define pci_iommu_read(__reg) \
24({ u64 __ret; \
25 __asm__ __volatile__("ldxa [%1] %2, %0" \
26 : "=r" (__ret) \
27 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
28 : "memory"); \
29 __ret; \
30})
31#define pci_iommu_write(__reg, __val) \
32 __asm__ __volatile__("stxa %0, [%1] %2" \
33 : /* no outputs */ \
34 : "r" (__val), "r" (__reg), \
35 "i" (ASI_PHYS_BYPASS_EC_E))
36
37/* Must be invoked under the IOMMU lock. */
38static void __iommu_flushall(struct pci_iommu *iommu)
39{
40 unsigned long tag;
41 int entry;
42
43 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
44 for (entry = 0; entry < 16; entry++) {
45 pci_iommu_write(tag, 0);
46 tag += 8;
47 }
48
49 /* Ensure completion of previous PIO writes. */
50 (void) pci_iommu_read(iommu->write_complete_reg);
51
52 /* Now update everyone's flush point. */
53 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
54 iommu->alloc_info[entry].flush =
55 iommu->alloc_info[entry].next;
56 }
57}
58
59#define IOPTE_CONSISTENT(CTX) \
60 (IOPTE_VALID | IOPTE_CACHE | \
61 (((CTX) << 47) & IOPTE_CONTEXT))
62
63#define IOPTE_STREAMING(CTX) \
64 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
65
66/* Existing mappings are never marked invalid, instead they
67 * are pointed to a dummy page.
68 */
69#define IOPTE_IS_DUMMY(iommu, iopte) \
70 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
71
72static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
73{
74 unsigned long val = iopte_val(*iopte);
75
76 val &= ~IOPTE_PAGE;
77 val |= iommu->dummy_page_pa;
78
79 iopte_val(*iopte) = val;
80}
81
82void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
83{
84 int i;
85
86 tsbsize /= sizeof(iopte_t);
87
88 for (i = 0; i < tsbsize; i++)
89 iopte_make_dummy(iommu, &iommu->page_table[i]);
90}
91
92static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
93{
94 iopte_t *iopte, *limit, *first;
95 unsigned long cnum, ent, flush_point;
96
97 cnum = 0;
98 while ((1UL << cnum) < npages)
99 cnum++;
100 iopte = (iommu->page_table +
101 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
102
103 if (cnum == 0)
104 limit = (iommu->page_table +
105 iommu->lowest_consistent_map);
106 else
107 limit = (iopte +
108 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
109
110 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
111 flush_point = iommu->alloc_info[cnum].flush;
112
113 first = iopte;
114 for (;;) {
115 if (IOPTE_IS_DUMMY(iommu, iopte)) {
116 if ((iopte + (1 << cnum)) >= limit)
117 ent = 0;
118 else
119 ent = ent + 1;
120 iommu->alloc_info[cnum].next = ent;
121 if (ent == flush_point)
122 __iommu_flushall(iommu);
123 break;
124 }
125 iopte += (1 << cnum);
126 ent++;
127 if (iopte >= limit) {
128 iopte = (iommu->page_table +
129 (cnum <<
130 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
131 ent = 0;
132 }
133 if (ent == flush_point)
134 __iommu_flushall(iommu);
135 if (iopte == first)
136 goto bad;
137 }
138
139 /* I've got your streaming cluster right here buddy boy... */
140 return iopte;
141
142bad:
143 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
144 npages);
145 return NULL;
146}
147
148static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
149 unsigned long npages, unsigned long ctx)
150{
151 unsigned long cnum, ent;
152
153 cnum = 0;
154 while ((1UL << cnum) < npages)
155 cnum++;
156
157 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
158 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
159
160 /* If the global flush might not have caught this entry,
161 * adjust the flush point such that we will flush before
162 * ever trying to reuse it.
163 */
164#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
165 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
166 iommu->alloc_info[cnum].flush = ent;
167#undef between
168}
169
170/* We allocate consistent mappings from the end of cluster zero. */
171static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
172{
173 iopte_t *iopte;
174
175 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
176 while (iopte > iommu->page_table) {
177 iopte--;
178 if (IOPTE_IS_DUMMY(iommu, iopte)) {
179 unsigned long tmp = npages;
180
181 while (--tmp) {
182 iopte--;
183 if (!IOPTE_IS_DUMMY(iommu, iopte))
184 break;
185 }
186 if (tmp == 0) {
187 u32 entry = (iopte - iommu->page_table);
188
189 if (entry < iommu->lowest_consistent_map)
190 iommu->lowest_consistent_map = entry;
191 return iopte;
192 }
193 }
194 }
195 return NULL;
196}
197
198/* Allocate and map kernel buffer of size SIZE using consistent mode
199 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
200 * successful and set *DMA_ADDRP to the PCI side dma address.
201 */
202void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
203{
204 struct pcidev_cookie *pcp;
205 struct pci_iommu *iommu;
206 iopte_t *iopte;
207 unsigned long flags, order, first_page, ctx;
208 void *ret;
209 int npages;
210
211 size = IO_PAGE_ALIGN(size);
212 order = get_order(size);
213 if (order >= 10)
214 return NULL;
215
216 first_page = __get_free_pages(GFP_ATOMIC, order);
217 if (first_page == 0UL)
218 return NULL;
219 memset((char *)first_page, 0, PAGE_SIZE << order);
220
221 pcp = pdev->sysdata;
222 iommu = pcp->pbm->iommu;
223
224 spin_lock_irqsave(&iommu->lock, flags);
225 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
226 if (iopte == NULL) {
227 spin_unlock_irqrestore(&iommu->lock, flags);
228 free_pages(first_page, order);
229 return NULL;
230 }
231
232 *dma_addrp = (iommu->page_table_map_base +
233 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
234 ret = (void *) first_page;
235 npages = size >> IO_PAGE_SHIFT;
236 ctx = 0;
237 if (iommu->iommu_ctxflush)
238 ctx = iommu->iommu_cur_ctx++;
239 first_page = __pa(first_page);
240 while (npages--) {
241 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
242 IOPTE_WRITE |
243 (first_page & IOPTE_PAGE));
244 iopte++;
245 first_page += IO_PAGE_SIZE;
246 }
247
248 {
249 int i;
250 u32 daddr = *dma_addrp;
251
252 npages = size >> IO_PAGE_SHIFT;
253 for (i = 0; i < npages; i++) {
254 pci_iommu_write(iommu->iommu_flush, daddr);
255 daddr += IO_PAGE_SIZE;
256 }
257 }
258
259 spin_unlock_irqrestore(&iommu->lock, flags);
260
261 return ret;
262}
263
264/* Free and unmap a consistent DMA translation. */
265void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
266{
267 struct pcidev_cookie *pcp;
268 struct pci_iommu *iommu;
269 iopte_t *iopte;
270 unsigned long flags, order, npages, i, ctx;
271
272 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
273 pcp = pdev->sysdata;
274 iommu = pcp->pbm->iommu;
275 iopte = iommu->page_table +
276 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
277
278 spin_lock_irqsave(&iommu->lock, flags);
279
280 if ((iopte - iommu->page_table) ==
281 iommu->lowest_consistent_map) {
282 iopte_t *walk = iopte + npages;
283 iopte_t *limit;
284
285 limit = (iommu->page_table +
286 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
287 while (walk < limit) {
288 if (!IOPTE_IS_DUMMY(iommu, walk))
289 break;
290 walk++;
291 }
292 iommu->lowest_consistent_map =
293 (walk - iommu->page_table);
294 }
295
296 /* Data for consistent mappings cannot enter the streaming
297 * buffers, so we only need to update the TSB. We flush
298 * the IOMMU here as well to prevent conflicts with the
299 * streaming mapping deferred tlb flush scheme.
300 */
301
302 ctx = 0;
303 if (iommu->iommu_ctxflush)
304 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
305
306 for (i = 0; i < npages; i++, iopte++)
307 iopte_make_dummy(iommu, iopte);
308
309 if (iommu->iommu_ctxflush) {
310 pci_iommu_write(iommu->iommu_ctxflush, ctx);
311 } else {
312 for (i = 0; i < npages; i++) {
313 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
314
315 pci_iommu_write(iommu->iommu_flush, daddr);
316 }
317 }
318
319 spin_unlock_irqrestore(&iommu->lock, flags);
320
321 order = get_order(size);
322 if (order < 10)
323 free_pages((unsigned long)cpu, order);
324}
325
326/* Map a single buffer at PTR of SZ bytes for PCI DMA
327 * in streaming mode.
328 */
329dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
330{
331 struct pcidev_cookie *pcp;
332 struct pci_iommu *iommu;
333 struct pci_strbuf *strbuf;
334 iopte_t *base;
335 unsigned long flags, npages, oaddr;
336 unsigned long i, base_paddr, ctx;
337 u32 bus_addr, ret;
338 unsigned long iopte_protection;
339
340 pcp = pdev->sysdata;
341 iommu = pcp->pbm->iommu;
342 strbuf = &pcp->pbm->stc;
343
344 if (direction == PCI_DMA_NONE)
345 BUG();
346
347 oaddr = (unsigned long)ptr;
348 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
349 npages >>= IO_PAGE_SHIFT;
350
351 spin_lock_irqsave(&iommu->lock, flags);
352
353 base = alloc_streaming_cluster(iommu, npages);
354 if (base == NULL)
355 goto bad;
356 bus_addr = (iommu->page_table_map_base +
357 ((base - iommu->page_table) << IO_PAGE_SHIFT));
358 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
359 base_paddr = __pa(oaddr & IO_PAGE_MASK);
360 ctx = 0;
361 if (iommu->iommu_ctxflush)
362 ctx = iommu->iommu_cur_ctx++;
363 if (strbuf->strbuf_enabled)
364 iopte_protection = IOPTE_STREAMING(ctx);
365 else
366 iopte_protection = IOPTE_CONSISTENT(ctx);
367 if (direction != PCI_DMA_TODEVICE)
368 iopte_protection |= IOPTE_WRITE;
369
370 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
371 iopte_val(*base) = iopte_protection | base_paddr;
372
373 spin_unlock_irqrestore(&iommu->lock, flags);
374
375 return ret;
376
377bad:
378 spin_unlock_irqrestore(&iommu->lock, flags);
379 return PCI_DMA_ERROR_CODE;
380}
381
382/* Unmap a single streaming mode DMA translation. */
383void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
384{
385 struct pcidev_cookie *pcp;
386 struct pci_iommu *iommu;
387 struct pci_strbuf *strbuf;
388 iopte_t *base;
389 unsigned long flags, npages, i, ctx;
390
391 if (direction == PCI_DMA_NONE)
392 BUG();
393
394 pcp = pdev->sysdata;
395 iommu = pcp->pbm->iommu;
396 strbuf = &pcp->pbm->stc;
397
398 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
399 npages >>= IO_PAGE_SHIFT;
400 base = iommu->page_table +
401 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
402#ifdef DEBUG_PCI_IOMMU
403 if (IOPTE_IS_DUMMY(iommu, base))
404 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
405 bus_addr, sz, __builtin_return_address(0));
406#endif
407 bus_addr &= IO_PAGE_MASK;
408
409 spin_lock_irqsave(&iommu->lock, flags);
410
411 /* Record the context, if any. */
412 ctx = 0;
413 if (iommu->iommu_ctxflush)
414 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
415
416 /* Step 1: Kick data out of streaming buffers if necessary. */
417 if (strbuf->strbuf_enabled) {
418 u32 vaddr = bus_addr;
419
420 PCI_STC_FLUSHFLAG_INIT(strbuf);
421 if (strbuf->strbuf_ctxflush &&
422 iommu->iommu_ctxflush) {
423 unsigned long matchreg, flushreg;
424
425 flushreg = strbuf->strbuf_ctxflush;
426 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
427 do {
428 pci_iommu_write(flushreg, ctx);
429 } while(((long)pci_iommu_read(matchreg)) < 0L);
430 } else {
431 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
432 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
433 }
434
435 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
436 (void) pci_iommu_read(iommu->write_complete_reg);
437 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
438 membar("#LoadLoad");
439 }
440
441 /* Step 2: Clear out first TSB entry. */
442 iopte_make_dummy(iommu, base);
443
444 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
445 npages, ctx);
446
447 spin_unlock_irqrestore(&iommu->lock, flags);
448}
449
450#define SG_ENT_PHYS_ADDRESS(SG) \
451 (__pa(page_address((SG)->page)) + (SG)->offset)
452
453static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
454 int nused, int nelems, unsigned long iopte_protection)
455{
456 struct scatterlist *dma_sg = sg;
457 struct scatterlist *sg_end = sg + nelems;
458 int i;
459
460 for (i = 0; i < nused; i++) {
461 unsigned long pteval = ~0UL;
462 u32 dma_npages;
463
464 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
465 dma_sg->dma_length +
466 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
467 do {
468 unsigned long offset;
469 signed int len;
470
471 /* If we are here, we know we have at least one
472 * more page to map. So walk forward until we
473 * hit a page crossing, and begin creating new
474 * mappings from that spot.
475 */
476 for (;;) {
477 unsigned long tmp;
478
479 tmp = SG_ENT_PHYS_ADDRESS(sg);
480 len = sg->length;
481 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
482 pteval = tmp & IO_PAGE_MASK;
483 offset = tmp & (IO_PAGE_SIZE - 1UL);
484 break;
485 }
486 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
487 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
488 offset = 0UL;
489 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
490 break;
491 }
492 sg++;
493 }
494
495 pteval = iopte_protection | (pteval & IOPTE_PAGE);
496 while (len > 0) {
497 *iopte++ = __iopte(pteval);
498 pteval += IO_PAGE_SIZE;
499 len -= (IO_PAGE_SIZE - offset);
500 offset = 0;
501 dma_npages--;
502 }
503
504 pteval = (pteval & IOPTE_PAGE) + len;
505 sg++;
506
507 /* Skip over any tail mappings we've fully mapped,
508 * adjusting pteval along the way. Stop when we
509 * detect a page crossing event.
510 */
511 while (sg < sg_end &&
512 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
513 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
514 ((pteval ^
515 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
516 pteval += sg->length;
517 sg++;
518 }
519 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
520 pteval = ~0UL;
521 } while (dma_npages != 0);
522 dma_sg++;
523 }
524}
525
526/* Map a set of buffers described by SGLIST with NELEMS array
527 * elements in streaming mode for PCI DMA.
528 * When making changes here, inspect the assembly output. I was having
529 * hard time to kepp this routine out of using stack slots for holding variables.
530 */
531int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
532{
533 struct pcidev_cookie *pcp;
534 struct pci_iommu *iommu;
535 struct pci_strbuf *strbuf;
536 unsigned long flags, ctx, npages, iopte_protection;
537 iopte_t *base;
538 u32 dma_base;
539 struct scatterlist *sgtmp;
540 int used;
541
542 /* Fast path single entry scatterlists. */
543 if (nelems == 1) {
544 sglist->dma_address =
545 pci_map_single(pdev,
546 (page_address(sglist->page) + sglist->offset),
547 sglist->length, direction);
548 sglist->dma_length = sglist->length;
549 return 1;
550 }
551
552 pcp = pdev->sysdata;
553 iommu = pcp->pbm->iommu;
554 strbuf = &pcp->pbm->stc;
555
556 if (direction == PCI_DMA_NONE)
557 BUG();
558
559 /* Step 1: Prepare scatter list. */
560
561 npages = prepare_sg(sglist, nelems);
562
563 /* Step 2: Allocate a cluster. */
564
565 spin_lock_irqsave(&iommu->lock, flags);
566
567 base = alloc_streaming_cluster(iommu, npages);
568 if (base == NULL)
569 goto bad;
570 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
571
572 /* Step 3: Normalize DMA addresses. */
573 used = nelems;
574
575 sgtmp = sglist;
576 while (used && sgtmp->dma_length) {
577 sgtmp->dma_address += dma_base;
578 sgtmp++;
579 used--;
580 }
581 used = nelems - used;
582
583 /* Step 4: Choose a context if necessary. */
584 ctx = 0;
585 if (iommu->iommu_ctxflush)
586 ctx = iommu->iommu_cur_ctx++;
587
588 /* Step 5: Create the mappings. */
589 if (strbuf->strbuf_enabled)
590 iopte_protection = IOPTE_STREAMING(ctx);
591 else
592 iopte_protection = IOPTE_CONSISTENT(ctx);
593 if (direction != PCI_DMA_TODEVICE)
594 iopte_protection |= IOPTE_WRITE;
595 fill_sg (base, sglist, used, nelems, iopte_protection);
596#ifdef VERIFY_SG
597 verify_sglist(sglist, nelems, base, npages);
598#endif
599
600 spin_unlock_irqrestore(&iommu->lock, flags);
601
602 return used;
603
604bad:
605 spin_unlock_irqrestore(&iommu->lock, flags);
606 return PCI_DMA_ERROR_CODE;
607}
608
609/* Unmap a set of streaming mode DMA translations. */
610void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
611{
612 struct pcidev_cookie *pcp;
613 struct pci_iommu *iommu;
614 struct pci_strbuf *strbuf;
615 iopte_t *base;
616 unsigned long flags, ctx, i, npages;
617 u32 bus_addr;
618
619 if (direction == PCI_DMA_NONE)
620 BUG();
621
622 pcp = pdev->sysdata;
623 iommu = pcp->pbm->iommu;
624 strbuf = &pcp->pbm->stc;
625
626 bus_addr = sglist->dma_address & IO_PAGE_MASK;
627
628 for (i = 1; i < nelems; i++)
629 if (sglist[i].dma_length == 0)
630 break;
631 i--;
632 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
633
634 base = iommu->page_table +
635 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
636
637#ifdef DEBUG_PCI_IOMMU
638 if (IOPTE_IS_DUMMY(iommu, base))
639 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
640#endif
641
642 spin_lock_irqsave(&iommu->lock, flags);
643
644 /* Record the context, if any. */
645 ctx = 0;
646 if (iommu->iommu_ctxflush)
647 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
648
649 /* Step 1: Kick data out of streaming buffers if necessary. */
650 if (strbuf->strbuf_enabled) {
651 u32 vaddr = (u32) bus_addr;
652
653 PCI_STC_FLUSHFLAG_INIT(strbuf);
654 if (strbuf->strbuf_ctxflush &&
655 iommu->iommu_ctxflush) {
656 unsigned long matchreg, flushreg;
657
658 flushreg = strbuf->strbuf_ctxflush;
659 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
660 do {
661 pci_iommu_write(flushreg, ctx);
662 } while(((long)pci_iommu_read(matchreg)) < 0L);
663 } else {
664 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
665 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
666 }
667
668 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
669 (void) pci_iommu_read(iommu->write_complete_reg);
670 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
671 membar("#LoadLoad");
672 }
673
674 /* Step 2: Clear out first TSB entry. */
675 iopte_make_dummy(iommu, base);
676
677 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
678 npages, ctx);
679
680 spin_unlock_irqrestore(&iommu->lock, flags);
681}
682
683/* Make physical memory consistent for a single
684 * streaming mode DMA translation after a transfer.
685 */
686void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
687{
688 struct pcidev_cookie *pcp;
689 struct pci_iommu *iommu;
690 struct pci_strbuf *strbuf;
691 unsigned long flags, ctx, npages;
692
693 pcp = pdev->sysdata;
694 iommu = pcp->pbm->iommu;
695 strbuf = &pcp->pbm->stc;
696
697 if (!strbuf->strbuf_enabled)
698 return;
699
700 spin_lock_irqsave(&iommu->lock, flags);
701
702 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
703 npages >>= IO_PAGE_SHIFT;
704 bus_addr &= IO_PAGE_MASK;
705
706 /* Step 1: Record the context, if any. */
707 ctx = 0;
708 if (iommu->iommu_ctxflush &&
709 strbuf->strbuf_ctxflush) {
710 iopte_t *iopte;
711
712 iopte = iommu->page_table +
713 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
714 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
715 }
716
717 /* Step 2: Kick data out of streaming buffers. */
718 PCI_STC_FLUSHFLAG_INIT(strbuf);
719 if (iommu->iommu_ctxflush &&
720 strbuf->strbuf_ctxflush) {
721 unsigned long matchreg, flushreg;
722
723 flushreg = strbuf->strbuf_ctxflush;
724 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
725 do {
726 pci_iommu_write(flushreg, ctx);
727 } while(((long)pci_iommu_read(matchreg)) < 0L);
728 } else {
729 unsigned long i;
730
731 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
732 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
733 }
734
735 /* Step 3: Perform flush synchronization sequence. */
736 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
737 (void) pci_iommu_read(iommu->write_complete_reg);
738 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
739 membar("#LoadLoad");
740
741 spin_unlock_irqrestore(&iommu->lock, flags);
742}
743
744/* Make physical memory consistent for a set of streaming
745 * mode DMA translations after a transfer.
746 */
747void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
748{
749 struct pcidev_cookie *pcp;
750 struct pci_iommu *iommu;
751 struct pci_strbuf *strbuf;
752 unsigned long flags, ctx;
753
754 pcp = pdev->sysdata;
755 iommu = pcp->pbm->iommu;
756 strbuf = &pcp->pbm->stc;
757
758 if (!strbuf->strbuf_enabled)
759 return;
760
761 spin_lock_irqsave(&iommu->lock, flags);
762
763 /* Step 1: Record the context, if any. */
764 ctx = 0;
765 if (iommu->iommu_ctxflush &&
766 strbuf->strbuf_ctxflush) {
767 iopte_t *iopte;
768
769 iopte = iommu->page_table +
770 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
771 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
772 }
773
774 /* Step 2: Kick data out of streaming buffers. */
775 PCI_STC_FLUSHFLAG_INIT(strbuf);
776 if (iommu->iommu_ctxflush &&
777 strbuf->strbuf_ctxflush) {
778 unsigned long matchreg, flushreg;
779
780 flushreg = strbuf->strbuf_ctxflush;
781 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
782 do {
783 pci_iommu_write(flushreg, ctx);
784 } while (((long)pci_iommu_read(matchreg)) < 0L);
785 } else {
786 unsigned long i, npages;
787 u32 bus_addr;
788
789 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
790
791 for(i = 1; i < nelems; i++)
792 if (!sglist[i].dma_length)
793 break;
794 i--;
795 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
796 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
797 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
798 }
799
800 /* Step 3: Perform flush synchronization sequence. */
801 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
802 (void) pci_iommu_read(iommu->write_complete_reg);
803 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
804 membar("#LoadLoad");
805
806 spin_unlock_irqrestore(&iommu->lock, flags);
807}
808
809static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
810{
811 struct pci_dev *ali_isa_bridge;
812 u8 val;
813
814 /* ALI sound chips generate 31-bits of DMA, a special register
815 * determines what bit 31 is emitted as.
816 */
817 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
818 PCI_DEVICE_ID_AL_M1533,
819 NULL);
820
821 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
822 if (set_bit)
823 val |= 0x01;
824 else
825 val &= ~0x01;
826 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
827 pci_dev_put(ali_isa_bridge);
828}
829
830int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
831{
832 struct pcidev_cookie *pcp = pdev->sysdata;
833 u64 dma_addr_mask;
834
835 if (pdev == NULL) {
836 dma_addr_mask = 0xffffffff;
837 } else {
838 struct pci_iommu *iommu = pcp->pbm->iommu;
839
840 dma_addr_mask = iommu->dma_addr_mask;
841
842 if (pdev->vendor == PCI_VENDOR_ID_AL &&
843 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
844 device_mask == 0x7fffffff) {
845 ali_sound_dma_hack(pdev,
846 (dma_addr_mask & 0x80000000) != 0);
847 return 1;
848 }
849 }
850
851 if (device_mask >= (1UL << 32UL))
852 return 0;
853
854 return (device_mask & dma_addr_mask) == dma_addr_mask;
855}
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
new file mode 100644
index 000000000000..3567fa879e1f
--- /dev/null
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -0,0 +1,1560 @@
1/* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $
2 * pci_psycho.c: PSYCHO/U2P specific PCI controller support.
3 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
7 */
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/pci.h>
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <linux/interrupt.h>
15
16#include <asm/pbm.h>
17#include <asm/iommu.h>
18#include <asm/irq.h>
19#include <asm/starfire.h>
20
21#include "pci_impl.h"
22#include "iommu_common.h"
23
24/* All PSYCHO registers are 64-bits. The following accessor
25 * routines are how they are accessed. The REG parameter
26 * is a physical address.
27 */
28#define psycho_read(__reg) \
29({ u64 __ret; \
30 __asm__ __volatile__("ldxa [%1] %2, %0" \
31 : "=r" (__ret) \
32 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
33 : "memory"); \
34 __ret; \
35})
36#define psycho_write(__reg, __val) \
37 __asm__ __volatile__("stxa %0, [%1] %2" \
38 : /* no outputs */ \
39 : "r" (__val), "r" (__reg), \
40 "i" (ASI_PHYS_BYPASS_EC_E) \
41 : "memory")
42
43/* Misc. PSYCHO PCI controller register offsets and definitions. */
44#define PSYCHO_CONTROL 0x0010UL
45#define PSYCHO_CONTROL_IMPL 0xf000000000000000UL /* Implementation of this PSYCHO*/
46#define PSYCHO_CONTROL_VER 0x0f00000000000000UL /* Version of this PSYCHO */
47#define PSYCHO_CONTROL_MID 0x00f8000000000000UL /* UPA Module ID of PSYCHO */
48#define PSYCHO_CONTROL_IGN 0x0007c00000000000UL /* Interrupt Group Number */
49#define PSYCHO_CONTROL_RESV 0x00003ffffffffff0UL /* Reserved */
50#define PSYCHO_CONTROL_APCKEN 0x0000000000000008UL /* Address Parity Check Enable */
51#define PSYCHO_CONTROL_APERR 0x0000000000000004UL /* Incoming System Addr Parerr */
52#define PSYCHO_CONTROL_IAP 0x0000000000000002UL /* Invert UPA Parity */
53#define PSYCHO_CONTROL_MODE 0x0000000000000001UL /* PSYCHO clock mode */
54#define PSYCHO_PCIA_CTRL 0x2000UL
55#define PSYCHO_PCIB_CTRL 0x4000UL
56#define PSYCHO_PCICTRL_RESV1 0xfffffff000000000UL /* Reserved */
57#define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL /* Streaming byte hole error */
58#define PSYCHO_PCICTRL_SERR 0x0000000400000000UL /* SERR signal asserted */
59#define PSYCHO_PCICTRL_SPEED 0x0000000200000000UL /* PCI speed (1 is U2P clock) */
60#define PSYCHO_PCICTRL_RESV2 0x00000001ffc00000UL /* Reserved */
61#define PSYCHO_PCICTRL_ARB_PARK 0x0000000000200000UL /* PCI arbitration parking */
62#define PSYCHO_PCICTRL_RESV3 0x00000000001ff800UL /* Reserved */
63#define PSYCHO_PCICTRL_SBH_INT 0x0000000000000400UL /* Streaming byte hole int enab */
64#define PSYCHO_PCICTRL_WEN 0x0000000000000200UL /* Power Mgmt Wake Enable */
65#define PSYCHO_PCICTRL_EEN 0x0000000000000100UL /* PCI Error Interrupt Enable */
66#define PSYCHO_PCICTRL_RESV4 0x00000000000000c0UL /* Reserved */
67#define PSYCHO_PCICTRL_AEN 0x000000000000003fUL /* PCI DVMA Arbitration Enable */
68
69/* U2P Programmer's Manual, page 13-55, configuration space
70 * address format:
71 *
72 * 32 24 23 16 15 11 10 8 7 2 1 0
73 * ---------------------------------------------------------
74 * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
75 * ---------------------------------------------------------
76 */
77#define PSYCHO_CONFIG_BASE(PBM) \
78 ((PBM)->config_space | (1UL << 24))
79#define PSYCHO_CONFIG_ENCODE(BUS, DEVFN, REG) \
80 (((unsigned long)(BUS) << 16) | \
81 ((unsigned long)(DEVFN) << 8) | \
82 ((unsigned long)(REG)))
83
84static void *psycho_pci_config_mkaddr(struct pci_pbm_info *pbm,
85 unsigned char bus,
86 unsigned int devfn,
87 int where)
88{
89 if (!pbm)
90 return NULL;
91 return (void *)
92 (PSYCHO_CONFIG_BASE(pbm) |
93 PSYCHO_CONFIG_ENCODE(bus, devfn, where));
94}
95
96static int psycho_out_of_range(struct pci_pbm_info *pbm,
97 unsigned char bus,
98 unsigned char devfn)
99{
100 return ((pbm->parent == 0) ||
101 ((pbm == &pbm->parent->pbm_B) &&
102 (bus == pbm->pci_first_busno) &&
103 PCI_SLOT(devfn) > 8) ||
104 ((pbm == &pbm->parent->pbm_A) &&
105 (bus == pbm->pci_first_busno) &&
106 PCI_SLOT(devfn) > 8));
107}
108
109/* PSYCHO PCI configuration space accessors. */
110
111static int psycho_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
112 int where, int size, u32 *value)
113{
114 struct pci_pbm_info *pbm = bus_dev->sysdata;
115 unsigned char bus = bus_dev->number;
116 u32 *addr;
117 u16 tmp16;
118 u8 tmp8;
119
120 switch (size) {
121 case 1:
122 *value = 0xff;
123 break;
124 case 2:
125 *value = 0xffff;
126 break;
127 case 4:
128 *value = 0xffffffff;
129 break;
130 }
131
132 addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
133 if (!addr)
134 return PCIBIOS_SUCCESSFUL;
135
136 if (psycho_out_of_range(pbm, bus, devfn))
137 return PCIBIOS_SUCCESSFUL;
138 switch (size) {
139 case 1:
140 pci_config_read8((u8 *)addr, &tmp8);
141 *value = (u32) tmp8;
142 break;
143
144 case 2:
145 if (where & 0x01) {
146 printk("pci_read_config_word: misaligned reg [%x]\n",
147 where);
148 return PCIBIOS_SUCCESSFUL;
149 }
150 pci_config_read16((u16 *)addr, &tmp16);
151 *value = (u32) tmp16;
152 break;
153
154 case 4:
155 if (where & 0x03) {
156 printk("pci_read_config_dword: misaligned reg [%x]\n",
157 where);
158 return PCIBIOS_SUCCESSFUL;
159 }
160 pci_config_read32(addr, value);
161 break;
162 }
163 return PCIBIOS_SUCCESSFUL;
164}
165
166static int psycho_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
167 int where, int size, u32 value)
168{
169 struct pci_pbm_info *pbm = bus_dev->sysdata;
170 unsigned char bus = bus_dev->number;
171 u32 *addr;
172
173 addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
174 if (!addr)
175 return PCIBIOS_SUCCESSFUL;
176
177 if (psycho_out_of_range(pbm, bus, devfn))
178 return PCIBIOS_SUCCESSFUL;
179
180 switch (size) {
181 case 1:
182 pci_config_write8((u8 *)addr, value);
183 break;
184
185 case 2:
186 if (where & 0x01) {
187 printk("pci_write_config_word: misaligned reg [%x]\n",
188 where);
189 return PCIBIOS_SUCCESSFUL;
190 }
191 pci_config_write16((u16 *)addr, value);
192 break;
193
194 case 4:
195 if (where & 0x03) {
196 printk("pci_write_config_dword: misaligned reg [%x]\n",
197 where);
198 return PCIBIOS_SUCCESSFUL;
199 }
200 pci_config_write32(addr, value);
201 }
202 return PCIBIOS_SUCCESSFUL;
203}
204
205static struct pci_ops psycho_ops = {
206 .read = psycho_read_pci_cfg,
207 .write = psycho_write_pci_cfg,
208};
209
210/* PSYCHO interrupt mapping support. */
211#define PSYCHO_IMAP_A_SLOT0 0x0c00UL
212#define PSYCHO_IMAP_B_SLOT0 0x0c20UL
213static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
214{
215 unsigned int bus = (ino & 0x10) >> 4;
216 unsigned int slot = (ino & 0x0c) >> 2;
217
218 if (bus == 0)
219 return PSYCHO_IMAP_A_SLOT0 + (slot * 8);
220 else
221 return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
222}
223
224#define PSYCHO_IMAP_SCSI 0x1000UL
225#define PSYCHO_IMAP_ETH 0x1008UL
226#define PSYCHO_IMAP_BPP 0x1010UL
227#define PSYCHO_IMAP_AU_REC 0x1018UL
228#define PSYCHO_IMAP_AU_PLAY 0x1020UL
229#define PSYCHO_IMAP_PFAIL 0x1028UL
230#define PSYCHO_IMAP_KMS 0x1030UL
231#define PSYCHO_IMAP_FLPY 0x1038UL
232#define PSYCHO_IMAP_SHW 0x1040UL
233#define PSYCHO_IMAP_KBD 0x1048UL
234#define PSYCHO_IMAP_MS 0x1050UL
235#define PSYCHO_IMAP_SER 0x1058UL
236#define PSYCHO_IMAP_TIM0 0x1060UL
237#define PSYCHO_IMAP_TIM1 0x1068UL
238#define PSYCHO_IMAP_UE 0x1070UL
239#define PSYCHO_IMAP_CE 0x1078UL
240#define PSYCHO_IMAP_A_ERR 0x1080UL
241#define PSYCHO_IMAP_B_ERR 0x1088UL
242#define PSYCHO_IMAP_PMGMT 0x1090UL
243#define PSYCHO_IMAP_GFX 0x1098UL
244#define PSYCHO_IMAP_EUPA 0x10a0UL
245
246static unsigned long __onboard_imap_off[] = {
247/*0x20*/ PSYCHO_IMAP_SCSI,
248/*0x21*/ PSYCHO_IMAP_ETH,
249/*0x22*/ PSYCHO_IMAP_BPP,
250/*0x23*/ PSYCHO_IMAP_AU_REC,
251/*0x24*/ PSYCHO_IMAP_AU_PLAY,
252/*0x25*/ PSYCHO_IMAP_PFAIL,
253/*0x26*/ PSYCHO_IMAP_KMS,
254/*0x27*/ PSYCHO_IMAP_FLPY,
255/*0x28*/ PSYCHO_IMAP_SHW,
256/*0x29*/ PSYCHO_IMAP_KBD,
257/*0x2a*/ PSYCHO_IMAP_MS,
258/*0x2b*/ PSYCHO_IMAP_SER,
259/*0x2c*/ PSYCHO_IMAP_TIM0,
260/*0x2d*/ PSYCHO_IMAP_TIM1,
261/*0x2e*/ PSYCHO_IMAP_UE,
262/*0x2f*/ PSYCHO_IMAP_CE,
263/*0x30*/ PSYCHO_IMAP_A_ERR,
264/*0x31*/ PSYCHO_IMAP_B_ERR,
265/*0x32*/ PSYCHO_IMAP_PMGMT
266};
267#define PSYCHO_ONBOARD_IRQ_BASE 0x20
268#define PSYCHO_ONBOARD_IRQ_LAST 0x32
269#define psycho_onboard_imap_offset(__ino) \
270 __onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
271
272#define PSYCHO_ICLR_A_SLOT0 0x1400UL
273#define PSYCHO_ICLR_SCSI 0x1800UL
274
275#define psycho_iclr_offset(ino) \
276 ((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
277 (PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
278
279/* PCI PSYCHO INO number to Sparc PIL level. */
280static unsigned char psycho_pil_table[] = {
281/*0x00*/0, 0, 0, 0, /* PCI A slot 0 Int A, B, C, D */
282/*0x04*/0, 0, 0, 0, /* PCI A slot 1 Int A, B, C, D */
283/*0x08*/0, 0, 0, 0, /* PCI A slot 2 Int A, B, C, D */
284/*0x0c*/0, 0, 0, 0, /* PCI A slot 3 Int A, B, C, D */
285/*0x10*/0, 0, 0, 0, /* PCI B slot 0 Int A, B, C, D */
286/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */
287/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */
288/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */
289/*0x20*/4, /* SCSI */
290/*0x21*/5, /* Ethernet */
291/*0x22*/8, /* Parallel Port */
292/*0x23*/13, /* Audio Record */
293/*0x24*/14, /* Audio Playback */
294/*0x25*/15, /* PowerFail */
295/*0x26*/4, /* second SCSI */
296/*0x27*/11, /* Floppy */
297/*0x28*/4, /* Spare Hardware */
298/*0x29*/9, /* Keyboard */
299/*0x2a*/4, /* Mouse */
300/*0x2b*/12, /* Serial */
301/*0x2c*/10, /* Timer 0 */
302/*0x2d*/11, /* Timer 1 */
303/*0x2e*/15, /* Uncorrectable ECC */
304/*0x2f*/15, /* Correctable ECC */
305/*0x30*/15, /* PCI Bus A Error */
306/*0x31*/15, /* PCI Bus B Error */
307/*0x32*/15, /* Power Management */
308};
309
310static int __init psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
311{
312 int ret;
313
314 ret = psycho_pil_table[ino];
315 if (ret == 0 && pdev == NULL) {
316 ret = 4;
317 } else if (ret == 0) {
318 switch ((pdev->class >> 16) & 0xff) {
319 case PCI_BASE_CLASS_STORAGE:
320 ret = 4;
321 break;
322
323 case PCI_BASE_CLASS_NETWORK:
324 ret = 6;
325 break;
326
327 case PCI_BASE_CLASS_DISPLAY:
328 ret = 9;
329 break;
330
331 case PCI_BASE_CLASS_MULTIMEDIA:
332 case PCI_BASE_CLASS_MEMORY:
333 case PCI_BASE_CLASS_BRIDGE:
334 case PCI_BASE_CLASS_SERIAL:
335 ret = 10;
336 break;
337
338 default:
339 ret = 4;
340 break;
341 };
342 }
343
344 return ret;
345}
346
347static unsigned int __init psycho_irq_build(struct pci_pbm_info *pbm,
348 struct pci_dev *pdev,
349 unsigned int ino)
350{
351 struct ino_bucket *bucket;
352 unsigned long imap, iclr;
353 unsigned long imap_off, iclr_off;
354 int pil, inofixup = 0;
355
356 ino &= PCI_IRQ_INO;
357 if (ino < PSYCHO_ONBOARD_IRQ_BASE) {
358 /* PCI slot */
359 imap_off = psycho_pcislot_imap_offset(ino);
360 } else {
361 /* Onboard device */
362 if (ino > PSYCHO_ONBOARD_IRQ_LAST) {
363 prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino);
364 prom_halt();
365 }
366 imap_off = psycho_onboard_imap_offset(ino);
367 }
368
369 /* Now build the IRQ bucket. */
370 pil = psycho_ino_to_pil(pdev, ino);
371
372 if (PIL_RESERVED(pil))
373 BUG();
374
375 imap = pbm->controller_regs + imap_off;
376 imap += 4;
377
378 iclr_off = psycho_iclr_offset(ino);
379 iclr = pbm->controller_regs + iclr_off;
380 iclr += 4;
381
382 if ((ino & 0x20) == 0)
383 inofixup = ino & 0x03;
384
385 bucket = __bucket(build_irq(pil, inofixup, iclr, imap));
386 bucket->flags |= IBF_PCI;
387
388 return __irq(bucket);
389}
390
391/* PSYCHO error handling support. */
392enum psycho_error_type {
393 UE_ERR, CE_ERR, PCI_ERR
394};
395
396/* Helper function of IOMMU error checking, which checks out
397 * the state of the streaming buffers. The IOMMU lock is
398 * held when this is called.
399 *
400 * For the PCI error case we know which PBM (and thus which
401 * streaming buffer) caused the error, but for the uncorrectable
402 * error case we do not. So we always check both streaming caches.
403 */
404#define PSYCHO_STRBUF_CONTROL_A 0x2800UL
405#define PSYCHO_STRBUF_CONTROL_B 0x4800UL
406#define PSYCHO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
407#define PSYCHO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
408#define PSYCHO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
409#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
410#define PSYCHO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
411#define PSYCHO_STRBUF_FLUSH_A 0x2808UL
412#define PSYCHO_STRBUF_FLUSH_B 0x4808UL
413#define PSYCHO_STRBUF_FSYNC_A 0x2810UL
414#define PSYCHO_STRBUF_FSYNC_B 0x4810UL
415#define PSYCHO_STC_DATA_A 0xb000UL
416#define PSYCHO_STC_DATA_B 0xc000UL
417#define PSYCHO_STC_ERR_A 0xb400UL
418#define PSYCHO_STC_ERR_B 0xc400UL
419#define PSYCHO_STCERR_WRITE 0x0000000000000002UL /* Write Error */
420#define PSYCHO_STCERR_READ 0x0000000000000001UL /* Read Error */
421#define PSYCHO_STC_TAG_A 0xb800UL
422#define PSYCHO_STC_TAG_B 0xc800UL
423#define PSYCHO_STCTAG_PPN 0x0fffffff00000000UL /* Physical Page Number */
424#define PSYCHO_STCTAG_VPN 0x00000000ffffe000UL /* Virtual Page Number */
425#define PSYCHO_STCTAG_VALID 0x0000000000000002UL /* Valid */
426#define PSYCHO_STCTAG_WRITE 0x0000000000000001UL /* Writable */
427#define PSYCHO_STC_LINE_A 0xb900UL
428#define PSYCHO_STC_LINE_B 0xc900UL
429#define PSYCHO_STCLINE_LINDX 0x0000000001e00000UL /* LRU Index */
430#define PSYCHO_STCLINE_SPTR 0x00000000001f8000UL /* Dirty Data Start Pointer */
431#define PSYCHO_STCLINE_LADDR 0x0000000000007f00UL /* Line Address */
432#define PSYCHO_STCLINE_EPTR 0x00000000000000fcUL /* Dirty Data End Pointer */
433#define PSYCHO_STCLINE_VALID 0x0000000000000002UL /* Valid */
434#define PSYCHO_STCLINE_FOFN 0x0000000000000001UL /* Fetch Outstanding / Flush Necessary */
435
436static DEFINE_SPINLOCK(stc_buf_lock);
437static unsigned long stc_error_buf[128];
438static unsigned long stc_tag_buf[16];
439static unsigned long stc_line_buf[16];
440
441static void __psycho_check_one_stc(struct pci_controller_info *p,
442 struct pci_pbm_info *pbm,
443 int is_pbm_a)
444{
445 struct pci_strbuf *strbuf = &pbm->stc;
446 unsigned long regbase = p->pbm_A.controller_regs;
447 unsigned long err_base, tag_base, line_base;
448 u64 control;
449 int i;
450
451 if (is_pbm_a) {
452 err_base = regbase + PSYCHO_STC_ERR_A;
453 tag_base = regbase + PSYCHO_STC_TAG_A;
454 line_base = regbase + PSYCHO_STC_LINE_A;
455 } else {
456 err_base = regbase + PSYCHO_STC_ERR_B;
457 tag_base = regbase + PSYCHO_STC_TAG_B;
458 line_base = regbase + PSYCHO_STC_LINE_B;
459 }
460
461 spin_lock(&stc_buf_lock);
462
463 /* This is __REALLY__ dangerous. When we put the
464 * streaming buffer into diagnostic mode to probe
465 * it's tags and error status, we _must_ clear all
466 * of the line tag valid bits before re-enabling
467 * the streaming buffer. If any dirty data lives
468 * in the STC when we do this, we will end up
469 * invalidating it before it has a chance to reach
470 * main memory.
471 */
472 control = psycho_read(strbuf->strbuf_control);
473 psycho_write(strbuf->strbuf_control,
474 (control | PSYCHO_STRBUF_CTRL_DENAB));
475 for (i = 0; i < 128; i++) {
476 unsigned long val;
477
478 val = psycho_read(err_base + (i * 8UL));
479 psycho_write(err_base + (i * 8UL), 0UL);
480 stc_error_buf[i] = val;
481 }
482 for (i = 0; i < 16; i++) {
483 stc_tag_buf[i] = psycho_read(tag_base + (i * 8UL));
484 stc_line_buf[i] = psycho_read(line_base + (i * 8UL));
485 psycho_write(tag_base + (i * 8UL), 0UL);
486 psycho_write(line_base + (i * 8UL), 0UL);
487 }
488
489 /* OK, state is logged, exit diagnostic mode. */
490 psycho_write(strbuf->strbuf_control, control);
491
492 for (i = 0; i < 16; i++) {
493 int j, saw_error, first, last;
494
495 saw_error = 0;
496 first = i * 8;
497 last = first + 8;
498 for (j = first; j < last; j++) {
499 unsigned long errval = stc_error_buf[j];
500 if (errval != 0) {
501 saw_error++;
502 printk("PSYCHO%d(PBM%c): STC_ERR(%d)[wr(%d)rd(%d)]\n",
503 p->index,
504 (is_pbm_a ? 'A' : 'B'),
505 j,
506 (errval & PSYCHO_STCERR_WRITE) ? 1 : 0,
507 (errval & PSYCHO_STCERR_READ) ? 1 : 0);
508 }
509 }
510 if (saw_error != 0) {
511 unsigned long tagval = stc_tag_buf[i];
512 unsigned long lineval = stc_line_buf[i];
513 printk("PSYCHO%d(PBM%c): STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)W(%d)]\n",
514 p->index,
515 (is_pbm_a ? 'A' : 'B'),
516 i,
517 ((tagval & PSYCHO_STCTAG_PPN) >> 19UL),
518 (tagval & PSYCHO_STCTAG_VPN),
519 ((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0),
520 ((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0));
521 printk("PSYCHO%d(PBM%c): STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
522 "V(%d)FOFN(%d)]\n",
523 p->index,
524 (is_pbm_a ? 'A' : 'B'),
525 i,
526 ((lineval & PSYCHO_STCLINE_LINDX) >> 21UL),
527 ((lineval & PSYCHO_STCLINE_SPTR) >> 15UL),
528 ((lineval & PSYCHO_STCLINE_LADDR) >> 8UL),
529 ((lineval & PSYCHO_STCLINE_EPTR) >> 2UL),
530 ((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0),
531 ((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0));
532 }
533 }
534
535 spin_unlock(&stc_buf_lock);
536}
537
538static void __psycho_check_stc_error(struct pci_controller_info *p,
539 unsigned long afsr,
540 unsigned long afar,
541 enum psycho_error_type type)
542{
543 struct pci_pbm_info *pbm;
544
545 pbm = &p->pbm_A;
546 if (pbm->stc.strbuf_enabled)
547 __psycho_check_one_stc(p, pbm, 1);
548
549 pbm = &p->pbm_B;
550 if (pbm->stc.strbuf_enabled)
551 __psycho_check_one_stc(p, pbm, 0);
552}
553
554/* When an Uncorrectable Error or a PCI Error happens, we
555 * interrogate the IOMMU state to see if it is the cause.
556 */
557#define PSYCHO_IOMMU_CONTROL 0x0200UL
558#define PSYCHO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
559#define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
560#define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
561#define PSYCHO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
562#define PSYCHO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
563#define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
564#define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
565#define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
566#define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
567#define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
568#define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
569#define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
570#define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
571#define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
572#define PSYCHO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
573#define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
574#define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
575#define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
576#define PSYCHO_IOMMU_TSBBASE 0x0208UL
577#define PSYCHO_IOMMU_FLUSH 0x0210UL
578#define PSYCHO_IOMMU_TAG 0xa580UL
579#define PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL)
580#define PSYCHO_IOMMU_TAG_ERR (0x1UL << 22UL)
581#define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL)
582#define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL)
583#define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL)
584#define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffUL
585#define PSYCHO_IOMMU_DATA 0xa600UL
586#define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL)
587#define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL)
588#define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffUL
589static void psycho_check_iommu_error(struct pci_controller_info *p,
590 unsigned long afsr,
591 unsigned long afar,
592 enum psycho_error_type type)
593{
594 struct pci_iommu *iommu = p->pbm_A.iommu;
595 unsigned long iommu_tag[16];
596 unsigned long iommu_data[16];
597 unsigned long flags;
598 u64 control;
599 int i;
600
601 spin_lock_irqsave(&iommu->lock, flags);
602 control = psycho_read(iommu->iommu_control);
603 if (control & PSYCHO_IOMMU_CTRL_XLTEERR) {
604 char *type_string;
605
606 /* Clear the error encountered bit. */
607 control &= ~PSYCHO_IOMMU_CTRL_XLTEERR;
608 psycho_write(iommu->iommu_control, control);
609
610 switch((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
611 case 0:
612 type_string = "Protection Error";
613 break;
614 case 1:
615 type_string = "Invalid Error";
616 break;
617 case 2:
618 type_string = "TimeOut Error";
619 break;
620 case 3:
621 default:
622 type_string = "ECC Error";
623 break;
624 };
625 printk("PSYCHO%d: IOMMU Error, type[%s]\n",
626 p->index, type_string);
627
628 /* Put the IOMMU into diagnostic mode and probe
629 * it's TLB for entries with error status.
630 *
631 * It is very possible for another DVMA to occur
632 * while we do this probe, and corrupt the system
633 * further. But we are so screwed at this point
634 * that we are likely to crash hard anyways, so
635 * get as much diagnostic information to the
636 * console as we can.
637 */
638 psycho_write(iommu->iommu_control,
639 control | PSYCHO_IOMMU_CTRL_DENAB);
640 for (i = 0; i < 16; i++) {
641 unsigned long base = p->pbm_A.controller_regs;
642
643 iommu_tag[i] =
644 psycho_read(base + PSYCHO_IOMMU_TAG + (i * 8UL));
645 iommu_data[i] =
646 psycho_read(base + PSYCHO_IOMMU_DATA + (i * 8UL));
647
648 /* Now clear out the entry. */
649 psycho_write(base + PSYCHO_IOMMU_TAG + (i * 8UL), 0);
650 psycho_write(base + PSYCHO_IOMMU_DATA + (i * 8UL), 0);
651 }
652
653 /* Leave diagnostic mode. */
654 psycho_write(iommu->iommu_control, control);
655
656 for (i = 0; i < 16; i++) {
657 unsigned long tag, data;
658
659 tag = iommu_tag[i];
660 if (!(tag & PSYCHO_IOMMU_TAG_ERR))
661 continue;
662
663 data = iommu_data[i];
664 switch((tag & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) {
665 case 0:
666 type_string = "Protection Error";
667 break;
668 case 1:
669 type_string = "Invalid Error";
670 break;
671 case 2:
672 type_string = "TimeOut Error";
673 break;
674 case 3:
675 default:
676 type_string = "ECC Error";
677 break;
678 };
679 printk("PSYCHO%d: IOMMU TAG(%d)[error(%s) wr(%d) str(%d) sz(%dK) vpg(%08lx)]\n",
680 p->index, i, type_string,
681 ((tag & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0),
682 ((tag & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0),
683 ((tag & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8),
684 (tag & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
685 printk("PSYCHO%d: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
686 p->index, i,
687 ((data & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0),
688 ((data & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0),
689 (data & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
690 }
691 }
692 __psycho_check_stc_error(p, afsr, afar, type);
693 spin_unlock_irqrestore(&iommu->lock, flags);
694}
695
696/* Uncorrectable Errors. Cause of the error and the address are
697 * recorded in the UE_AFSR and UE_AFAR of PSYCHO. They are errors
698 * relating to UPA interface transactions.
699 */
700#define PSYCHO_UE_AFSR 0x0030UL
701#define PSYCHO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
702#define PSYCHO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
703#define PSYCHO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
704#define PSYCHO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
705#define PSYCHO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
706#define PSYCHO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
707#define PSYCHO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
708#define PSYCHO_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
709#define PSYCHO_UEAFSR_DOFF 0x00000000e0000000UL /* Doubleword Offset */
710#define PSYCHO_UEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
711#define PSYCHO_UEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
712#define PSYCHO_UEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
713#define PSYCHO_UE_AFAR 0x0038UL
714
715static irqreturn_t psycho_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
716{
717 struct pci_controller_info *p = dev_id;
718 unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFSR;
719 unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFAR;
720 unsigned long afsr, afar, error_bits;
721 int reported;
722
723 /* Latch uncorrectable error status. */
724 afar = psycho_read(afar_reg);
725 afsr = psycho_read(afsr_reg);
726
727 /* Clear the primary/secondary error status bits. */
728 error_bits = afsr &
729 (PSYCHO_UEAFSR_PPIO | PSYCHO_UEAFSR_PDRD | PSYCHO_UEAFSR_PDWR |
730 PSYCHO_UEAFSR_SPIO | PSYCHO_UEAFSR_SDRD | PSYCHO_UEAFSR_SDWR);
731 if (!error_bits)
732 return IRQ_NONE;
733 psycho_write(afsr_reg, error_bits);
734
735 /* Log the error. */
736 printk("PSYCHO%d: Uncorrectable Error, primary error type[%s]\n",
737 p->index,
738 (((error_bits & PSYCHO_UEAFSR_PPIO) ?
739 "PIO" :
740 ((error_bits & PSYCHO_UEAFSR_PDRD) ?
741 "DMA Read" :
742 ((error_bits & PSYCHO_UEAFSR_PDWR) ?
743 "DMA Write" : "???")))));
744 printk("PSYCHO%d: bytemask[%04lx] dword_offset[%lx] UPA_MID[%02lx] was_block(%d)\n",
745 p->index,
746 (afsr & PSYCHO_UEAFSR_BMSK) >> 32UL,
747 (afsr & PSYCHO_UEAFSR_DOFF) >> 29UL,
748 (afsr & PSYCHO_UEAFSR_MID) >> 24UL,
749 ((afsr & PSYCHO_UEAFSR_BLK) ? 1 : 0));
750 printk("PSYCHO%d: UE AFAR [%016lx]\n", p->index, afar);
751 printk("PSYCHO%d: UE Secondary errors [", p->index);
752 reported = 0;
753 if (afsr & PSYCHO_UEAFSR_SPIO) {
754 reported++;
755 printk("(PIO)");
756 }
757 if (afsr & PSYCHO_UEAFSR_SDRD) {
758 reported++;
759 printk("(DMA Read)");
760 }
761 if (afsr & PSYCHO_UEAFSR_SDWR) {
762 reported++;
763 printk("(DMA Write)");
764 }
765 if (!reported)
766 printk("(none)");
767 printk("]\n");
768
769 /* Interrogate IOMMU for error status. */
770 psycho_check_iommu_error(p, afsr, afar, UE_ERR);
771
772 return IRQ_HANDLED;
773}
774
775/* Correctable Errors. */
776#define PSYCHO_CE_AFSR 0x0040UL
777#define PSYCHO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
778#define PSYCHO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
779#define PSYCHO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
780#define PSYCHO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
781#define PSYCHO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
782#define PSYCHO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
783#define PSYCHO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
784#define PSYCHO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
785#define PSYCHO_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
786#define PSYCHO_CEAFSR_DOFF 0x00000000e0000000UL /* Double Offset */
787#define PSYCHO_CEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
788#define PSYCHO_CEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
789#define PSYCHO_CEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
790#define PSYCHO_CE_AFAR 0x0040UL
791
792static irqreturn_t psycho_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
793{
794 struct pci_controller_info *p = dev_id;
795 unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFSR;
796 unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFAR;
797 unsigned long afsr, afar, error_bits;
798 int reported;
799
800 /* Latch error status. */
801 afar = psycho_read(afar_reg);
802 afsr = psycho_read(afsr_reg);
803
804 /* Clear primary/secondary error status bits. */
805 error_bits = afsr &
806 (PSYCHO_CEAFSR_PPIO | PSYCHO_CEAFSR_PDRD | PSYCHO_CEAFSR_PDWR |
807 PSYCHO_CEAFSR_SPIO | PSYCHO_CEAFSR_SDRD | PSYCHO_CEAFSR_SDWR);
808 if (!error_bits)
809 return IRQ_NONE;
810 psycho_write(afsr_reg, error_bits);
811
812 /* Log the error. */
813 printk("PSYCHO%d: Correctable Error, primary error type[%s]\n",
814 p->index,
815 (((error_bits & PSYCHO_CEAFSR_PPIO) ?
816 "PIO" :
817 ((error_bits & PSYCHO_CEAFSR_PDRD) ?
818 "DMA Read" :
819 ((error_bits & PSYCHO_CEAFSR_PDWR) ?
820 "DMA Write" : "???")))));
821
822 /* XXX Use syndrome and afar to print out module string just like
823 * XXX UDB CE trap handler does... -DaveM
824 */
825 printk("PSYCHO%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
826 "UPA_MID[%02lx] was_block(%d)\n",
827 p->index,
828 (afsr & PSYCHO_CEAFSR_ESYND) >> 48UL,
829 (afsr & PSYCHO_CEAFSR_BMSK) >> 32UL,
830 (afsr & PSYCHO_CEAFSR_DOFF) >> 29UL,
831 (afsr & PSYCHO_CEAFSR_MID) >> 24UL,
832 ((afsr & PSYCHO_CEAFSR_BLK) ? 1 : 0));
833 printk("PSYCHO%d: CE AFAR [%016lx]\n", p->index, afar);
834 printk("PSYCHO%d: CE Secondary errors [", p->index);
835 reported = 0;
836 if (afsr & PSYCHO_CEAFSR_SPIO) {
837 reported++;
838 printk("(PIO)");
839 }
840 if (afsr & PSYCHO_CEAFSR_SDRD) {
841 reported++;
842 printk("(DMA Read)");
843 }
844 if (afsr & PSYCHO_CEAFSR_SDWR) {
845 reported++;
846 printk("(DMA Write)");
847 }
848 if (!reported)
849 printk("(none)");
850 printk("]\n");
851
852 return IRQ_HANDLED;
853}
854
855/* PCI Errors. They are signalled by the PCI bus module since they
856 * are associated with a specific bus segment.
857 */
858#define PSYCHO_PCI_AFSR_A 0x2010UL
859#define PSYCHO_PCI_AFSR_B 0x4010UL
860#define PSYCHO_PCIAFSR_PMA 0x8000000000000000UL /* Primary Master Abort Error */
861#define PSYCHO_PCIAFSR_PTA 0x4000000000000000UL /* Primary Target Abort Error */
862#define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */
863#define PSYCHO_PCIAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */
864#define PSYCHO_PCIAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort Error */
865#define PSYCHO_PCIAFSR_STA 0x0400000000000000UL /* Secondary Target Abort Error */
866#define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */
867#define PSYCHO_PCIAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */
868#define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000UL /* Reserved */
869#define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
870#define PSYCHO_PCIAFSR_BLK 0x0000000080000000UL /* Trans was block operation */
871#define PSYCHO_PCIAFSR_RESV2 0x0000000040000000UL /* Reserved */
872#define PSYCHO_PCIAFSR_MID 0x000000003e000000UL /* MID causing the error */
873#define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffUL /* Reserved */
874#define PSYCHO_PCI_AFAR_A 0x2018UL
875#define PSYCHO_PCI_AFAR_B 0x4018UL
876
877static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm_a)
878{
879 unsigned long csr_reg, csr, csr_error_bits;
880 irqreturn_t ret = IRQ_NONE;
881 u16 stat;
882
883 if (is_pbm_a) {
884 csr_reg = pbm->controller_regs + PSYCHO_PCIA_CTRL;
885 } else {
886 csr_reg = pbm->controller_regs + PSYCHO_PCIB_CTRL;
887 }
888 csr = psycho_read(csr_reg);
889 csr_error_bits =
890 csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR);
891 if (csr_error_bits) {
892 /* Clear the errors. */
893 psycho_write(csr_reg, csr);
894
895 /* Log 'em. */
896 if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR)
897 printk("%s: PCI streaming byte hole error asserted.\n",
898 pbm->name);
899 if (csr_error_bits & PSYCHO_PCICTRL_SERR)
900 printk("%s: PCI SERR signal asserted.\n", pbm->name);
901 ret = IRQ_HANDLED;
902 }
903 pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
904 if (stat & (PCI_STATUS_PARITY |
905 PCI_STATUS_SIG_TARGET_ABORT |
906 PCI_STATUS_REC_TARGET_ABORT |
907 PCI_STATUS_REC_MASTER_ABORT |
908 PCI_STATUS_SIG_SYSTEM_ERROR)) {
909 printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
910 pbm->name, stat);
911 pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
912 ret = IRQ_HANDLED;
913 }
914 return ret;
915}
916
917static irqreturn_t psycho_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
918{
919 struct pci_pbm_info *pbm = dev_id;
920 struct pci_controller_info *p = pbm->parent;
921 unsigned long afsr_reg, afar_reg;
922 unsigned long afsr, afar, error_bits;
923 int is_pbm_a, reported;
924
925 is_pbm_a = (pbm == &pbm->parent->pbm_A);
926 if (is_pbm_a) {
927 afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_A;
928 afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_A;
929 } else {
930 afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_B;
931 afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_B;
932 }
933
934 /* Latch error status. */
935 afar = psycho_read(afar_reg);
936 afsr = psycho_read(afsr_reg);
937
938 /* Clear primary/secondary error status bits. */
939 error_bits = afsr &
940 (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA |
941 PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR |
942 PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA |
943 PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR);
944 if (!error_bits)
945 return psycho_pcierr_intr_other(pbm, is_pbm_a);
946 psycho_write(afsr_reg, error_bits);
947
948 /* Log the error. */
949 printk("PSYCHO%d(PBM%c): PCI Error, primary error type[%s]\n",
950 p->index, (is_pbm_a ? 'A' : 'B'),
951 (((error_bits & PSYCHO_PCIAFSR_PMA) ?
952 "Master Abort" :
953 ((error_bits & PSYCHO_PCIAFSR_PTA) ?
954 "Target Abort" :
955 ((error_bits & PSYCHO_PCIAFSR_PRTRY) ?
956 "Excessive Retries" :
957 ((error_bits & PSYCHO_PCIAFSR_PPERR) ?
958 "Parity Error" : "???"))))));
959 printk("PSYCHO%d(PBM%c): bytemask[%04lx] UPA_MID[%02lx] was_block(%d)\n",
960 p->index, (is_pbm_a ? 'A' : 'B'),
961 (afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL,
962 (afsr & PSYCHO_PCIAFSR_MID) >> 25UL,
963 (afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0);
964 printk("PSYCHO%d(PBM%c): PCI AFAR [%016lx]\n",
965 p->index, (is_pbm_a ? 'A' : 'B'), afar);
966 printk("PSYCHO%d(PBM%c): PCI Secondary errors [",
967 p->index, (is_pbm_a ? 'A' : 'B'));
968 reported = 0;
969 if (afsr & PSYCHO_PCIAFSR_SMA) {
970 reported++;
971 printk("(Master Abort)");
972 }
973 if (afsr & PSYCHO_PCIAFSR_STA) {
974 reported++;
975 printk("(Target Abort)");
976 }
977 if (afsr & PSYCHO_PCIAFSR_SRTRY) {
978 reported++;
979 printk("(Excessive Retries)");
980 }
981 if (afsr & PSYCHO_PCIAFSR_SPERR) {
982 reported++;
983 printk("(Parity Error)");
984 }
985 if (!reported)
986 printk("(none)");
987 printk("]\n");
988
989 /* For the error types shown, scan PBM's PCI bus for devices
990 * which have logged that error type.
991 */
992
993 /* If we see a Target Abort, this could be the result of an
994 * IOMMU translation error of some sort. It is extremely
995 * useful to log this information as usually it indicates
996 * a bug in the IOMMU support code or a PCI device driver.
997 */
998 if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) {
999 psycho_check_iommu_error(p, afsr, afar, PCI_ERR);
1000 pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
1001 }
1002 if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA))
1003 pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
1004
1005 /* For excessive retries, PSYCHO/PBM will abort the device
1006 * and there is no way to specifically check for excessive
1007 * retries in the config space status registers. So what
1008 * we hope is that we'll catch it via the master/target
1009 * abort events.
1010 */
1011
1012 if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR))
1013 pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
1014
1015 return IRQ_HANDLED;
1016}
1017
1018/* XXX What about PowerFail/PowerManagement??? -DaveM */
1019#define PSYCHO_ECC_CTRL 0x0020
1020#define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
1021#define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
1022#define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
1023#define PSYCHO_UE_INO 0x2e
1024#define PSYCHO_CE_INO 0x2f
1025#define PSYCHO_PCIERR_A_INO 0x30
1026#define PSYCHO_PCIERR_B_INO 0x31
1027static void __init psycho_register_error_handlers(struct pci_controller_info *p)
1028{
1029 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
1030 unsigned long base = p->pbm_A.controller_regs;
1031 unsigned int irq, portid = pbm->portid;
1032 u64 tmp;
1033
1034 /* Build IRQs and register handlers. */
1035 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_UE_INO);
1036 if (request_irq(irq, psycho_ue_intr,
1037 SA_SHIRQ, "PSYCHO UE", p) < 0) {
1038 prom_printf("PSYCHO%d: Cannot register UE interrupt.\n",
1039 p->index);
1040 prom_halt();
1041 }
1042
1043 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_CE_INO);
1044 if (request_irq(irq, psycho_ce_intr,
1045 SA_SHIRQ, "PSYCHO CE", p) < 0) {
1046 prom_printf("PSYCHO%d: Cannot register CE interrupt.\n",
1047 p->index);
1048 prom_halt();
1049 }
1050
1051 pbm = &p->pbm_A;
1052 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_A_INO);
1053 if (request_irq(irq, psycho_pcierr_intr,
1054 SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_A) < 0) {
1055 prom_printf("PSYCHO%d(PBMA): Cannot register PciERR interrupt.\n",
1056 p->index);
1057 prom_halt();
1058 }
1059
1060 pbm = &p->pbm_B;
1061 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_B_INO);
1062 if (request_irq(irq, psycho_pcierr_intr,
1063 SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_B) < 0) {
1064 prom_printf("PSYCHO%d(PBMB): Cannot register PciERR interrupt.\n",
1065 p->index);
1066 prom_halt();
1067 }
1068
1069 /* Enable UE and CE interrupts for controller. */
1070 psycho_write(base + PSYCHO_ECC_CTRL,
1071 (PSYCHO_ECCCTRL_EE |
1072 PSYCHO_ECCCTRL_UE |
1073 PSYCHO_ECCCTRL_CE));
1074
1075 /* Enable PCI Error interrupts and clear error
1076 * bits for each PBM.
1077 */
1078 tmp = psycho_read(base + PSYCHO_PCIA_CTRL);
1079 tmp |= (PSYCHO_PCICTRL_SERR |
1080 PSYCHO_PCICTRL_SBH_ERR |
1081 PSYCHO_PCICTRL_EEN);
1082 tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
1083 psycho_write(base + PSYCHO_PCIA_CTRL, tmp);
1084
1085 tmp = psycho_read(base + PSYCHO_PCIB_CTRL);
1086 tmp |= (PSYCHO_PCICTRL_SERR |
1087 PSYCHO_PCICTRL_SBH_ERR |
1088 PSYCHO_PCICTRL_EEN);
1089 tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
1090 psycho_write(base + PSYCHO_PCIB_CTRL, tmp);
1091}
1092
1093/* PSYCHO boot time probing and initialization. */
1094static void __init psycho_resource_adjust(struct pci_dev *pdev,
1095 struct resource *res,
1096 struct resource *root)
1097{
1098 res->start += root->start;
1099 res->end += root->start;
1100}
1101
1102static void __init psycho_base_address_update(struct pci_dev *pdev, int resource)
1103{
1104 struct pcidev_cookie *pcp = pdev->sysdata;
1105 struct pci_pbm_info *pbm = pcp->pbm;
1106 struct resource *res, *root;
1107 u32 reg;
1108 int where, size, is_64bit;
1109
1110 res = &pdev->resource[resource];
1111 if (resource < 6) {
1112 where = PCI_BASE_ADDRESS_0 + (resource * 4);
1113 } else if (resource == PCI_ROM_RESOURCE) {
1114 where = pdev->rom_base_reg;
1115 } else {
1116 /* Somebody might have asked allocation of a non-standard resource */
1117 return;
1118 }
1119
1120 is_64bit = 0;
1121 if (res->flags & IORESOURCE_IO)
1122 root = &pbm->io_space;
1123 else {
1124 root = &pbm->mem_space;
1125 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
1126 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1127 is_64bit = 1;
1128 }
1129
1130 size = res->end - res->start;
1131 pci_read_config_dword(pdev, where, &reg);
1132 reg = ((reg & size) |
1133 (((u32)(res->start - root->start)) & ~size));
1134 if (resource == PCI_ROM_RESOURCE) {
1135 reg |= PCI_ROM_ADDRESS_ENABLE;
1136 res->flags |= IORESOURCE_ROM_ENABLE;
1137 }
1138 pci_write_config_dword(pdev, where, reg);
1139
1140 /* This knows that the upper 32-bits of the address
1141 * must be zero. Our PCI common layer enforces this.
1142 */
1143 if (is_64bit)
1144 pci_write_config_dword(pdev, where + 4, 0);
1145}
1146
1147static void __init pbm_config_busmastering(struct pci_pbm_info *pbm)
1148{
1149 u8 *addr;
1150
1151 /* Set cache-line size to 64 bytes, this is actually
1152 * a nop but I do it for completeness.
1153 */
1154 addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1155 0, PCI_CACHE_LINE_SIZE);
1156 pci_config_write8(addr, 64 / sizeof(u32));
1157
1158 /* Set PBM latency timer to 64 PCI clocks. */
1159 addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1160 0, PCI_LATENCY_TIMER);
1161 pci_config_write8(addr, 64);
1162}
1163
1164static void __init pbm_scan_bus(struct pci_controller_info *p,
1165 struct pci_pbm_info *pbm)
1166{
1167 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
1168
1169 if (!cookie) {
1170 prom_printf("PSYCHO: Critical allocation failure.\n");
1171 prom_halt();
1172 }
1173
1174 /* All we care about is the PBM. */
1175 memset(cookie, 0, sizeof(*cookie));
1176 cookie->pbm = pbm;
1177
1178 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
1179 p->pci_ops,
1180 pbm);
1181 pci_fixup_host_bridge_self(pbm->pci_bus);
1182 pbm->pci_bus->self->sysdata = cookie;
1183
1184 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
1185 pci_record_assignments(pbm, pbm->pci_bus);
1186 pci_assign_unassigned(pbm, pbm->pci_bus);
1187 pci_fixup_irq(pbm, pbm->pci_bus);
1188 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
1189 pci_setup_busmastering(pbm, pbm->pci_bus);
1190}
1191
1192static void __init psycho_scan_bus(struct pci_controller_info *p)
1193{
1194 pbm_config_busmastering(&p->pbm_B);
1195 p->pbm_B.is_66mhz_capable = 0;
1196 pbm_config_busmastering(&p->pbm_A);
1197 p->pbm_A.is_66mhz_capable = 1;
1198 pbm_scan_bus(p, &p->pbm_B);
1199 pbm_scan_bus(p, &p->pbm_A);
1200
1201 /* After the PCI bus scan is complete, we can register
1202 * the error interrupt handlers.
1203 */
1204 psycho_register_error_handlers(p);
1205}
1206
1207static void __init psycho_iommu_init(struct pci_controller_info *p)
1208{
1209 struct pci_iommu *iommu = p->pbm_A.iommu;
1210 unsigned long tsbbase, i;
1211 u64 control;
1212
1213 /* Setup initial software IOMMU state. */
1214 spin_lock_init(&iommu->lock);
1215 iommu->iommu_cur_ctx = 0;
1216
1217 /* Register addresses. */
1218 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
1219 iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
1220 iommu->iommu_flush = p->pbm_A.controller_regs + PSYCHO_IOMMU_FLUSH;
1221 /* PSYCHO's IOMMU lacks ctx flushing. */
1222 iommu->iommu_ctxflush = 0;
1223
1224 /* We use the main control register of PSYCHO as the write
1225 * completion register.
1226 */
1227 iommu->write_complete_reg = p->pbm_A.controller_regs + PSYCHO_CONTROL;
1228
1229 /*
1230 * Invalidate TLB Entries.
1231 */
1232 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
1233 control |= PSYCHO_IOMMU_CTRL_DENAB;
1234 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
1235 for(i = 0; i < 16; i++) {
1236 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TAG + (i * 8UL), 0);
1237 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_DATA + (i * 8UL), 0);
1238 }
1239
1240 /* Leave diag mode enabled for full-flushing done
1241 * in pci_iommu.c
1242 */
1243
1244 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
1245 if (!iommu->dummy_page) {
1246 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1247 prom_halt();
1248 }
1249 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1250 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1251
1252 /* Using assumed page size 8K with 128K entries we need 1MB iommu page
1253 * table (128K ioptes * 8 bytes per iopte). This is
1254 * page order 7 on UltraSparc.
1255 */
1256 tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE));
1257 if (!tsbbase) {
1258 prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
1259 prom_halt();
1260 }
1261 iommu->page_table = (iopte_t *)tsbbase;
1262 iommu->page_table_sz_bits = 17;
1263 iommu->page_table_map_base = 0xc0000000;
1264 iommu->dma_addr_mask = 0xffffffff;
1265 pci_iommu_table_init(iommu, IO_TSB_SIZE);
1266
1267 /* We start with no consistent mappings. */
1268 iommu->lowest_consistent_map =
1269 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1270
1271 for (i = 0; i < PBM_NCLUSTERS; i++) {
1272 iommu->alloc_info[i].flush = 0;
1273 iommu->alloc_info[i].next = 0;
1274 }
1275
1276 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
1277
1278 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
1279 control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
1280 control |= (PSYCHO_IOMMU_TSBSZ_128K | PSYCHO_IOMMU_CTRL_ENAB);
1281 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
1282
1283 /* If necessary, hook us up for starfire IRQ translations. */
1284 if(this_is_starfire)
1285 p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
1286 else
1287 p->starfire_cookie = NULL;
1288}
1289
1290#define PSYCHO_IRQ_RETRY 0x1a00UL
1291#define PSYCHO_PCIA_DIAG 0x2020UL
1292#define PSYCHO_PCIB_DIAG 0x4020UL
1293#define PSYCHO_PCIDIAG_RESV 0xffffffffffffff80UL /* Reserved */
1294#define PSYCHO_PCIDIAG_DRETRY 0x0000000000000040UL /* Disable retry limit */
1295#define PSYCHO_PCIDIAG_DISYNC 0x0000000000000020UL /* Disable DMA wr / irq sync */
1296#define PSYCHO_PCIDIAG_DDWSYNC 0x0000000000000010UL /* Disable DMA wr / PIO rd sync */
1297#define PSYCHO_PCIDIAG_IDDPAR 0x0000000000000008UL /* Invert DMA data parity */
1298#define PSYCHO_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO data parity */
1299#define PSYCHO_PCIDIAG_IPAPAR 0x0000000000000002UL /* Invert PIO address parity */
1300#define PSYCHO_PCIDIAG_LPBACK 0x0000000000000001UL /* Enable loopback mode */
1301
1302static void psycho_controller_hwinit(struct pci_controller_info *p)
1303{
1304 u64 tmp;
1305
1306 /* PROM sets the IRQ retry value too low, increase it. */
1307 psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 0xff);
1308
1309 /* Enable arbiter for all PCI slots. */
1310 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL);
1311 tmp |= PSYCHO_PCICTRL_AEN;
1312 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL, tmp);
1313
1314 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL);
1315 tmp |= PSYCHO_PCICTRL_AEN;
1316 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL, tmp);
1317
1318 /* Disable DMA write / PIO read synchronization on
1319 * both PCI bus segments.
1320 * [ U2P Erratum 1243770, STP2223BGA data sheet ]
1321 */
1322 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG);
1323 tmp |= PSYCHO_PCIDIAG_DDWSYNC;
1324 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG, tmp);
1325
1326 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG);
1327 tmp |= PSYCHO_PCIDIAG_DDWSYNC;
1328 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp);
1329}
1330
1331static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
1332 struct pci_pbm_info *pbm)
1333{
1334 char *name = pbm->name;
1335
1336 sprintf(name, "PSYCHO%d PBM%c",
1337 p->index,
1338 (pbm == &p->pbm_A ? 'A' : 'B'));
1339 pbm->io_space.name = pbm->mem_space.name = name;
1340
1341 request_resource(&ioport_resource, &pbm->io_space);
1342 request_resource(&iomem_resource, &pbm->mem_space);
1343 pci_register_legacy_regions(&pbm->io_space,
1344 &pbm->mem_space);
1345}
1346
1347static void psycho_pbm_strbuf_init(struct pci_controller_info *p,
1348 struct pci_pbm_info *pbm,
1349 int is_pbm_a)
1350{
1351 unsigned long base = pbm->controller_regs;
1352 u64 control;
1353
1354 if (is_pbm_a) {
1355 pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A;
1356 pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_A;
1357 pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_A;
1358 } else {
1359 pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_B;
1360 pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_B;
1361 pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_B;
1362 }
1363 /* PSYCHO's streaming buffer lacks ctx flushing. */
1364 pbm->stc.strbuf_ctxflush = 0;
1365 pbm->stc.strbuf_ctxmatch_base = 0;
1366
1367 pbm->stc.strbuf_flushflag = (volatile unsigned long *)
1368 ((((unsigned long)&pbm->stc.__flushflag_buf[0])
1369 + 63UL)
1370 & ~63UL);
1371 pbm->stc.strbuf_flushflag_pa = (unsigned long)
1372 __pa(pbm->stc.strbuf_flushflag);
1373
1374 /* Enable the streaming buffer. We have to be careful
1375 * just in case OBP left it with LRU locking enabled.
1376 *
1377 * It is possible to control if PBM will be rerun on
1378 * line misses. Currently I just retain whatever setting
1379 * OBP left us with. All checks so far show it having
1380 * a value of zero.
1381 */
1382#undef PSYCHO_STRBUF_RERUN_ENABLE
1383#undef PSYCHO_STRBUF_RERUN_DISABLE
1384 control = psycho_read(pbm->stc.strbuf_control);
1385 control |= PSYCHO_STRBUF_CTRL_ENAB;
1386 control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR);
1387#ifdef PSYCHO_STRBUF_RERUN_ENABLE
1388 control &= ~(PSYCHO_STRBUF_CTRL_RRDIS);
1389#else
1390#ifdef PSYCHO_STRBUF_RERUN_DISABLE
1391 control |= PSYCHO_STRBUF_CTRL_RRDIS;
1392#endif
1393#endif
1394 psycho_write(pbm->stc.strbuf_control, control);
1395
1396 pbm->stc.strbuf_enabled = 1;
1397}
1398
1399#define PSYCHO_IOSPACE_A 0x002000000UL
1400#define PSYCHO_IOSPACE_B 0x002010000UL
1401#define PSYCHO_IOSPACE_SIZE 0x00000ffffUL
1402#define PSYCHO_MEMSPACE_A 0x100000000UL
1403#define PSYCHO_MEMSPACE_B 0x180000000UL
1404#define PSYCHO_MEMSPACE_SIZE 0x07fffffffUL
1405
1406static void psycho_pbm_init(struct pci_controller_info *p,
1407 int prom_node, int is_pbm_a)
1408{
1409 unsigned int busrange[2];
1410 struct pci_pbm_info *pbm;
1411 int err;
1412
1413 if (is_pbm_a) {
1414 pbm = &p->pbm_A;
1415 pbm->pci_first_slot = 1;
1416 pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_A;
1417 pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_A;
1418 } else {
1419 pbm = &p->pbm_B;
1420 pbm->pci_first_slot = 2;
1421 pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_B;
1422 pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_B;
1423 }
1424
1425 pbm->chip_type = PBM_CHIP_TYPE_PSYCHO;
1426 pbm->chip_version =
1427 prom_getintdefault(prom_node, "version#", 0);
1428 pbm->chip_revision =
1429 prom_getintdefault(prom_node, "module-revision#", 0);
1430
1431 pbm->io_space.end = pbm->io_space.start + PSYCHO_IOSPACE_SIZE;
1432 pbm->io_space.flags = IORESOURCE_IO;
1433 pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE;
1434 pbm->mem_space.flags = IORESOURCE_MEM;
1435 pbm_register_toplevel_resources(p, pbm);
1436
1437 pbm->parent = p;
1438 pbm->prom_node = prom_node;
1439 prom_getstring(prom_node, "name",
1440 pbm->prom_name,
1441 sizeof(pbm->prom_name));
1442
1443 err = prom_getproperty(prom_node, "ranges",
1444 (char *)pbm->pbm_ranges,
1445 sizeof(pbm->pbm_ranges));
1446 if (err != -1)
1447 pbm->num_pbm_ranges =
1448 (err / sizeof(struct linux_prom_pci_ranges));
1449 else
1450 pbm->num_pbm_ranges = 0;
1451
1452 err = prom_getproperty(prom_node, "interrupt-map",
1453 (char *)pbm->pbm_intmap,
1454 sizeof(pbm->pbm_intmap));
1455 if (err != -1) {
1456 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
1457 err = prom_getproperty(prom_node, "interrupt-map-mask",
1458 (char *)&pbm->pbm_intmask,
1459 sizeof(pbm->pbm_intmask));
1460 if (err == -1) {
1461 prom_printf("PSYCHO-PBM: Fatal error, no "
1462 "interrupt-map-mask.\n");
1463 prom_halt();
1464 }
1465 } else {
1466 pbm->num_pbm_intmap = 0;
1467 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
1468 }
1469
1470 err = prom_getproperty(prom_node, "bus-range",
1471 (char *)&busrange[0],
1472 sizeof(busrange));
1473 if (err == 0 || err == -1) {
1474 prom_printf("PSYCHO-PBM: Fatal error, no bus-range.\n");
1475 prom_halt();
1476 }
1477 pbm->pci_first_busno = busrange[0];
1478 pbm->pci_last_busno = busrange[1];
1479
1480 psycho_pbm_strbuf_init(p, pbm, is_pbm_a);
1481}
1482
1483#define PSYCHO_CONFIGSPACE 0x001000000UL
1484
1485void __init psycho_init(int node, char *model_name)
1486{
1487 struct linux_prom64_registers pr_regs[3];
1488 struct pci_controller_info *p;
1489 struct pci_iommu *iommu;
1490 u32 upa_portid;
1491 int is_pbm_a, err;
1492
1493 upa_portid = prom_getintdefault(node, "upa-portid", 0xff);
1494
1495 for(p = pci_controller_root; p; p = p->next) {
1496 if (p->pbm_A.portid == upa_portid) {
1497 is_pbm_a = (p->pbm_A.prom_node == 0);
1498 psycho_pbm_init(p, node, is_pbm_a);
1499 return;
1500 }
1501 }
1502
1503 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1504 if (!p) {
1505 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1506 prom_halt();
1507 }
1508 memset(p, 0, sizeof(*p));
1509 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1510 if (!iommu) {
1511 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1512 prom_halt();
1513 }
1514 memset(iommu, 0, sizeof(*iommu));
1515 p->pbm_A.iommu = p->pbm_B.iommu = iommu;
1516
1517 p->next = pci_controller_root;
1518 pci_controller_root = p;
1519
1520 p->pbm_A.portid = upa_portid;
1521 p->pbm_B.portid = upa_portid;
1522 p->index = pci_num_controllers++;
1523 p->pbms_same_domain = 0;
1524 p->scan_bus = psycho_scan_bus;
1525 p->irq_build = psycho_irq_build;
1526 p->base_address_update = psycho_base_address_update;
1527 p->resource_adjust = psycho_resource_adjust;
1528 p->pci_ops = &psycho_ops;
1529
1530 err = prom_getproperty(node, "reg",
1531 (char *)&pr_regs[0],
1532 sizeof(pr_regs));
1533 if (err == 0 || err == -1) {
1534 prom_printf("PSYCHO: Fatal error, no reg property.\n");
1535 prom_halt();
1536 }
1537
1538 p->pbm_A.controller_regs = pr_regs[2].phys_addr;
1539 p->pbm_B.controller_regs = pr_regs[2].phys_addr;
1540 printk("PCI: Found PSYCHO, control regs at %016lx\n",
1541 p->pbm_A.controller_regs);
1542
1543 p->pbm_A.config_space = p->pbm_B.config_space =
1544 (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
1545 printk("PSYCHO: Shared PCI config space at %016lx\n",
1546 p->pbm_A.config_space);
1547
1548 /*
1549 * Psycho's PCI MEM space is mapped to a 2GB aligned area, so
1550 * we need to adjust our MEM space mask.
1551 */
1552 pci_memspace_mask = 0x7fffffffUL;
1553
1554 psycho_controller_hwinit(p);
1555
1556 psycho_iommu_init(p);
1557
1558 is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000);
1559 psycho_pbm_init(p, node, is_pbm_a);
1560}
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
new file mode 100644
index 000000000000..5525d1ec4af8
--- /dev/null
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -0,0 +1,1702 @@
1/* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $
2 * pci_sabre.c: Sabre specific PCI controller support.
3 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
7 */
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/pci.h>
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <linux/interrupt.h>
15
16#include <asm/apb.h>
17#include <asm/pbm.h>
18#include <asm/iommu.h>
19#include <asm/irq.h>
20#include <asm/smp.h>
21#include <asm/oplib.h>
22
23#include "pci_impl.h"
24#include "iommu_common.h"
25
26/* All SABRE registers are 64-bits. The following accessor
27 * routines are how they are accessed. The REG parameter
28 * is a physical address.
29 */
30#define sabre_read(__reg) \
31({ u64 __ret; \
32 __asm__ __volatile__("ldxa [%1] %2, %0" \
33 : "=r" (__ret) \
34 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
35 : "memory"); \
36 __ret; \
37})
38#define sabre_write(__reg, __val) \
39 __asm__ __volatile__("stxa %0, [%1] %2" \
40 : /* no outputs */ \
41 : "r" (__val), "r" (__reg), \
42 "i" (ASI_PHYS_BYPASS_EC_E) \
43 : "memory")
44
45/* SABRE PCI controller register offsets and definitions. */
46#define SABRE_UE_AFSR 0x0030UL
47#define SABRE_UEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
48#define SABRE_UEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
49#define SABRE_UEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
50#define SABRE_UEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
51#define SABRE_UEAFSR_SDTE 0x0200000000000000UL /* Secondary DMA Translation Error */
52#define SABRE_UEAFSR_PDTE 0x0100000000000000UL /* Primary DMA Translation Error */
53#define SABRE_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
54#define SABRE_UEAFSR_OFF 0x00000000e0000000UL /* Offset (AFAR bits [5:3] */
55#define SABRE_UEAFSR_BLK 0x0000000000800000UL /* Was block operation */
56#define SABRE_UECE_AFAR 0x0038UL
57#define SABRE_CE_AFSR 0x0040UL
58#define SABRE_CEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
59#define SABRE_CEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
60#define SABRE_CEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
61#define SABRE_CEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
62#define SABRE_CEAFSR_ESYND 0x00ff000000000000UL /* ECC Syndrome */
63#define SABRE_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
64#define SABRE_CEAFSR_OFF 0x00000000e0000000UL /* Offset */
65#define SABRE_CEAFSR_BLK 0x0000000000800000UL /* Was block operation */
66#define SABRE_UECE_AFAR_ALIAS 0x0048UL /* Aliases to 0x0038 */
67#define SABRE_IOMMU_CONTROL 0x0200UL
68#define SABRE_IOMMUCTRL_ERRSTS 0x0000000006000000UL /* Error status bits */
69#define SABRE_IOMMUCTRL_ERR 0x0000000001000000UL /* Error present in IOTLB */
70#define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */
71#define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */
72#define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
73#define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000
74#define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000
75#define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000
76#define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000
77#define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000
78#define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000
79#define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000
80#define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000
81#define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */
82#define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
83#define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
84#define SABRE_IOMMU_TSBBASE 0x0208UL
85#define SABRE_IOMMU_FLUSH 0x0210UL
86#define SABRE_IMAP_A_SLOT0 0x0c00UL
87#define SABRE_IMAP_B_SLOT0 0x0c20UL
88#define SABRE_IMAP_SCSI 0x1000UL
89#define SABRE_IMAP_ETH 0x1008UL
90#define SABRE_IMAP_BPP 0x1010UL
91#define SABRE_IMAP_AU_REC 0x1018UL
92#define SABRE_IMAP_AU_PLAY 0x1020UL
93#define SABRE_IMAP_PFAIL 0x1028UL
94#define SABRE_IMAP_KMS 0x1030UL
95#define SABRE_IMAP_FLPY 0x1038UL
96#define SABRE_IMAP_SHW 0x1040UL
97#define SABRE_IMAP_KBD 0x1048UL
98#define SABRE_IMAP_MS 0x1050UL
99#define SABRE_IMAP_SER 0x1058UL
100#define SABRE_IMAP_UE 0x1070UL
101#define SABRE_IMAP_CE 0x1078UL
102#define SABRE_IMAP_PCIERR 0x1080UL
103#define SABRE_IMAP_GFX 0x1098UL
104#define SABRE_IMAP_EUPA 0x10a0UL
105#define SABRE_ICLR_A_SLOT0 0x1400UL
106#define SABRE_ICLR_B_SLOT0 0x1480UL
107#define SABRE_ICLR_SCSI 0x1800UL
108#define SABRE_ICLR_ETH 0x1808UL
109#define SABRE_ICLR_BPP 0x1810UL
110#define SABRE_ICLR_AU_REC 0x1818UL
111#define SABRE_ICLR_AU_PLAY 0x1820UL
112#define SABRE_ICLR_PFAIL 0x1828UL
113#define SABRE_ICLR_KMS 0x1830UL
114#define SABRE_ICLR_FLPY 0x1838UL
115#define SABRE_ICLR_SHW 0x1840UL
116#define SABRE_ICLR_KBD 0x1848UL
117#define SABRE_ICLR_MS 0x1850UL
118#define SABRE_ICLR_SER 0x1858UL
119#define SABRE_ICLR_UE 0x1870UL
120#define SABRE_ICLR_CE 0x1878UL
121#define SABRE_ICLR_PCIERR 0x1880UL
122#define SABRE_WRSYNC 0x1c20UL
123#define SABRE_PCICTRL 0x2000UL
124#define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */
125#define SABRE_PCICTRL_SERR 0x0000000400000000UL /* Set when SERR asserted on PCI bus */
126#define SABRE_PCICTRL_ARBPARK 0x0000000000200000UL /* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */
127#define SABRE_PCICTRL_CPUPRIO 0x0000000000100000UL /* Ultra-IIi granted every other bus cycle */
128#define SABRE_PCICTRL_ARBPRIO 0x00000000000f0000UL /* Slot which is granted every other bus cycle */
129#define SABRE_PCICTRL_ERREN 0x0000000000000100UL /* PCI Error Interrupt Enable */
130#define SABRE_PCICTRL_RTRYWE 0x0000000000000080UL /* DMA Flow Control 0=wait-if-possible 1=retry */
131#define SABRE_PCICTRL_AEN 0x000000000000000fUL /* Slot PCI arbitration enables */
132#define SABRE_PIOAFSR 0x2010UL
133#define SABRE_PIOAFSR_PMA 0x8000000000000000UL /* Primary Master Abort */
134#define SABRE_PIOAFSR_PTA 0x4000000000000000UL /* Primary Target Abort */
135#define SABRE_PIOAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */
136#define SABRE_PIOAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */
137#define SABRE_PIOAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort */
138#define SABRE_PIOAFSR_STA 0x0400000000000000UL /* Secondary Target Abort */
139#define SABRE_PIOAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */
140#define SABRE_PIOAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */
141#define SABRE_PIOAFSR_BMSK 0x0000ffff00000000UL /* Byte Mask */
142#define SABRE_PIOAFSR_BLK 0x0000000080000000UL /* Was Block Operation */
143#define SABRE_PIOAFAR 0x2018UL
144#define SABRE_PCIDIAG 0x2020UL
145#define SABRE_PCIDIAG_DRTRY 0x0000000000000040UL /* Disable PIO Retry Limit */
146#define SABRE_PCIDIAG_IPAPAR 0x0000000000000008UL /* Invert PIO Address Parity */
147#define SABRE_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO Data Parity */
148#define SABRE_PCIDIAG_IDDPAR 0x0000000000000002UL /* Invert DMA Data Parity */
149#define SABRE_PCIDIAG_ELPBK 0x0000000000000001UL /* Loopback Enable - not supported */
150#define SABRE_PCITASR 0x2028UL
151#define SABRE_PCITASR_EF 0x0000000000000080UL /* Respond to 0xe0000000-0xffffffff */
152#define SABRE_PCITASR_CD 0x0000000000000040UL /* Respond to 0xc0000000-0xdfffffff */
153#define SABRE_PCITASR_AB 0x0000000000000020UL /* Respond to 0xa0000000-0xbfffffff */
154#define SABRE_PCITASR_89 0x0000000000000010UL /* Respond to 0x80000000-0x9fffffff */
155#define SABRE_PCITASR_67 0x0000000000000008UL /* Respond to 0x60000000-0x7fffffff */
156#define SABRE_PCITASR_45 0x0000000000000004UL /* Respond to 0x40000000-0x5fffffff */
157#define SABRE_PCITASR_23 0x0000000000000002UL /* Respond to 0x20000000-0x3fffffff */
158#define SABRE_PCITASR_01 0x0000000000000001UL /* Respond to 0x00000000-0x1fffffff */
159#define SABRE_PIOBUF_DIAG 0x5000UL
160#define SABRE_DMABUF_DIAGLO 0x5100UL
161#define SABRE_DMABUF_DIAGHI 0x51c0UL
162#define SABRE_IMAP_GFX_ALIAS 0x6000UL /* Aliases to 0x1098 */
163#define SABRE_IMAP_EUPA_ALIAS 0x8000UL /* Aliases to 0x10a0 */
164#define SABRE_IOMMU_VADIAG 0xa400UL
165#define SABRE_IOMMU_TCDIAG 0xa408UL
166#define SABRE_IOMMU_TAG 0xa580UL
167#define SABRE_IOMMUTAG_ERRSTS 0x0000000001800000UL /* Error status bits */
168#define SABRE_IOMMUTAG_ERR 0x0000000000400000UL /* Error present */
169#define SABRE_IOMMUTAG_WRITE 0x0000000000200000UL /* Page is writable */
170#define SABRE_IOMMUTAG_STREAM 0x0000000000100000UL /* Streamable bit - unused */
171#define SABRE_IOMMUTAG_SIZE 0x0000000000080000UL /* 0=8k 1=16k */
172#define SABRE_IOMMUTAG_VPN 0x000000000007ffffUL /* Virtual Page Number [31:13] */
173#define SABRE_IOMMU_DATA 0xa600UL
174#define SABRE_IOMMUDATA_VALID 0x0000000040000000UL /* Valid */
175#define SABRE_IOMMUDATA_USED 0x0000000020000000UL /* Used (for LRU algorithm) */
176#define SABRE_IOMMUDATA_CACHE 0x0000000010000000UL /* Cacheable */
177#define SABRE_IOMMUDATA_PPN 0x00000000001fffffUL /* Physical Page Number [33:13] */
178#define SABRE_PCI_IRQSTATE 0xa800UL
179#define SABRE_OBIO_IRQSTATE 0xa808UL
180#define SABRE_FFBCFG 0xf000UL
181#define SABRE_FFBCFG_SPRQS 0x000000000f000000 /* Slave P_RQST queue size */
182#define SABRE_FFBCFG_ONEREAD 0x0000000000004000 /* Slave supports one outstanding read */
183#define SABRE_MCCTRL0 0xf010UL
184#define SABRE_MCCTRL0_RENAB 0x0000000080000000 /* Refresh Enable */
185#define SABRE_MCCTRL0_EENAB 0x0000000010000000 /* Enable all ECC functions */
186#define SABRE_MCCTRL0_11BIT 0x0000000000001000 /* Enable 11-bit column addressing */
187#define SABRE_MCCTRL0_DPP 0x0000000000000f00 /* DIMM Pair Present Bits */
188#define SABRE_MCCTRL0_RINTVL 0x00000000000000ff /* Refresh Interval */
189#define SABRE_MCCTRL1 0xf018UL
190#define SABRE_MCCTRL1_AMDC 0x0000000038000000 /* Advance Memdata Clock */
191#define SABRE_MCCTRL1_ARDC 0x0000000007000000 /* Advance DRAM Read Data Clock */
192#define SABRE_MCCTRL1_CSR 0x0000000000e00000 /* CAS to RAS delay for CBR refresh */
193#define SABRE_MCCTRL1_CASRW 0x00000000001c0000 /* CAS length for read/write */
194#define SABRE_MCCTRL1_RCD 0x0000000000038000 /* RAS to CAS delay */
195#define SABRE_MCCTRL1_CP 0x0000000000007000 /* CAS Precharge */
196#define SABRE_MCCTRL1_RP 0x0000000000000e00 /* RAS Precharge */
197#define SABRE_MCCTRL1_RAS 0x00000000000001c0 /* Length of RAS for refresh */
198#define SABRE_MCCTRL1_CASRW2 0x0000000000000038 /* Must be same as CASRW */
199#define SABRE_MCCTRL1_RSC 0x0000000000000007 /* RAS after CAS hold time */
200#define SABRE_RESETCTRL 0xf020UL
201
202#define SABRE_CONFIGSPACE 0x001000000UL
203#define SABRE_IOSPACE 0x002000000UL
204#define SABRE_IOSPACE_SIZE 0x000ffffffUL
205#define SABRE_MEMSPACE 0x100000000UL
206#define SABRE_MEMSPACE_SIZE 0x07fffffffUL
207
208/* UltraSparc-IIi Programmer's Manual, page 325, PCI
209 * configuration space address format:
210 *
211 * 32 24 23 16 15 11 10 8 7 2 1 0
212 * ---------------------------------------------------------
213 * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
214 * ---------------------------------------------------------
215 */
216#define SABRE_CONFIG_BASE(PBM) \
217 ((PBM)->config_space | (1UL << 24))
218#define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \
219 (((unsigned long)(BUS) << 16) | \
220 ((unsigned long)(DEVFN) << 8) | \
221 ((unsigned long)(REG)))
222
223static int hummingbird_p;
224static struct pci_bus *sabre_root_bus;
225
226static void *sabre_pci_config_mkaddr(struct pci_pbm_info *pbm,
227 unsigned char bus,
228 unsigned int devfn,
229 int where)
230{
231 if (!pbm)
232 return NULL;
233 return (void *)
234 (SABRE_CONFIG_BASE(pbm) |
235 SABRE_CONFIG_ENCODE(bus, devfn, where));
236}
237
238static int sabre_out_of_range(unsigned char devfn)
239{
240 if (hummingbird_p)
241 return 0;
242
243 return (((PCI_SLOT(devfn) == 0) && (PCI_FUNC(devfn) > 0)) ||
244 ((PCI_SLOT(devfn) == 1) && (PCI_FUNC(devfn) > 1)) ||
245 (PCI_SLOT(devfn) > 1));
246}
247
248static int __sabre_out_of_range(struct pci_pbm_info *pbm,
249 unsigned char bus,
250 unsigned char devfn)
251{
252 if (hummingbird_p)
253 return 0;
254
255 return ((pbm->parent == 0) ||
256 ((pbm == &pbm->parent->pbm_B) &&
257 (bus == pbm->pci_first_busno) &&
258 PCI_SLOT(devfn) > 8) ||
259 ((pbm == &pbm->parent->pbm_A) &&
260 (bus == pbm->pci_first_busno) &&
261 PCI_SLOT(devfn) > 8));
262}
263
264static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
265 int where, int size, u32 *value)
266{
267 struct pci_pbm_info *pbm = bus_dev->sysdata;
268 unsigned char bus = bus_dev->number;
269 u32 *addr;
270 u16 tmp16;
271 u8 tmp8;
272
273 switch (size) {
274 case 1:
275 *value = 0xff;
276 break;
277 case 2:
278 *value = 0xffff;
279 break;
280 case 4:
281 *value = 0xffffffff;
282 break;
283 }
284
285 addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where);
286 if (!addr)
287 return PCIBIOS_SUCCESSFUL;
288
289 if (__sabre_out_of_range(pbm, bus, devfn))
290 return PCIBIOS_SUCCESSFUL;
291
292 switch (size) {
293 case 1:
294 pci_config_read8((u8 *) addr, &tmp8);
295 *value = tmp8;
296 break;
297
298 case 2:
299 if (where & 0x01) {
300 printk("pci_read_config_word: misaligned reg [%x]\n",
301 where);
302 return PCIBIOS_SUCCESSFUL;
303 }
304 pci_config_read16((u16 *) addr, &tmp16);
305 *value = tmp16;
306 break;
307
308 case 4:
309 if (where & 0x03) {
310 printk("pci_read_config_dword: misaligned reg [%x]\n",
311 where);
312 return PCIBIOS_SUCCESSFUL;
313 }
314 pci_config_read32(addr, value);
315 break;
316 }
317
318 return PCIBIOS_SUCCESSFUL;
319}
320
321static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn,
322 int where, int size, u32 *value)
323{
324 if (!bus->number && sabre_out_of_range(devfn)) {
325 switch (size) {
326 case 1:
327 *value = 0xff;
328 break;
329 case 2:
330 *value = 0xffff;
331 break;
332 case 4:
333 *value = 0xffffffff;
334 break;
335 }
336 return PCIBIOS_SUCCESSFUL;
337 }
338
339 if (bus->number || PCI_SLOT(devfn))
340 return __sabre_read_pci_cfg(bus, devfn, where, size, value);
341
342 /* When accessing PCI config space of the PCI controller itself (bus
343 * 0, device slot 0, function 0) there are restrictions. Each
344 * register must be accessed as it's natural size. Thus, for example
345 * the Vendor ID must be accessed as a 16-bit quantity.
346 */
347
348 switch (size) {
349 case 1:
350 if (where < 8) {
351 u32 tmp32;
352 u16 tmp16;
353
354 __sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32);
355 tmp16 = (u16) tmp32;
356 if (where & 1)
357 *value = tmp16 >> 8;
358 else
359 *value = tmp16 & 0xff;
360 } else
361 return __sabre_read_pci_cfg(bus, devfn, where, 1, value);
362 break;
363
364 case 2:
365 if (where < 8)
366 return __sabre_read_pci_cfg(bus, devfn, where, 2, value);
367 else {
368 u32 tmp32;
369 u8 tmp8;
370
371 __sabre_read_pci_cfg(bus, devfn, where, 1, &tmp32);
372 tmp8 = (u8) tmp32;
373 *value = tmp8;
374 __sabre_read_pci_cfg(bus, devfn, where + 1, 1, &tmp32);
375 tmp8 = (u8) tmp32;
376 *value |= tmp8 << 8;
377 }
378 break;
379
380 case 4: {
381 u32 tmp32;
382 u16 tmp16;
383
384 sabre_read_pci_cfg(bus, devfn, where, 2, &tmp32);
385 tmp16 = (u16) tmp32;
386 *value = tmp16;
387 sabre_read_pci_cfg(bus, devfn, where + 2, 2, &tmp32);
388 tmp16 = (u16) tmp32;
389 *value |= tmp16 << 16;
390 break;
391 }
392 }
393 return PCIBIOS_SUCCESSFUL;
394}
395
396static int __sabre_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
397 int where, int size, u32 value)
398{
399 struct pci_pbm_info *pbm = bus_dev->sysdata;
400 unsigned char bus = bus_dev->number;
401 u32 *addr;
402
403 addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where);
404 if (!addr)
405 return PCIBIOS_SUCCESSFUL;
406
407 if (__sabre_out_of_range(pbm, bus, devfn))
408 return PCIBIOS_SUCCESSFUL;
409
410 switch (size) {
411 case 1:
412 pci_config_write8((u8 *) addr, value);
413 break;
414
415 case 2:
416 if (where & 0x01) {
417 printk("pci_write_config_word: misaligned reg [%x]\n",
418 where);
419 return PCIBIOS_SUCCESSFUL;
420 }
421 pci_config_write16((u16 *) addr, value);
422 break;
423
424 case 4:
425 if (where & 0x03) {
426 printk("pci_write_config_dword: misaligned reg [%x]\n",
427 where);
428 return PCIBIOS_SUCCESSFUL;
429 }
430 pci_config_write32(addr, value);
431 break;
432 }
433
434 return PCIBIOS_SUCCESSFUL;
435}
436
437static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn,
438 int where, int size, u32 value)
439{
440 if (bus->number)
441 return __sabre_write_pci_cfg(bus, devfn, where, size, value);
442
443 if (sabre_out_of_range(devfn))
444 return PCIBIOS_SUCCESSFUL;
445
446 switch (size) {
447 case 1:
448 if (where < 8) {
449 u32 tmp32;
450 u16 tmp16;
451
452 __sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32);
453 tmp16 = (u16) tmp32;
454 if (where & 1) {
455 value &= 0x00ff;
456 value |= tmp16 << 8;
457 } else {
458 value &= 0xff00;
459 value |= tmp16;
460 }
461 tmp32 = (u32) tmp16;
462 return __sabre_write_pci_cfg(bus, devfn, where & ~1, 2, tmp32);
463 } else
464 return __sabre_write_pci_cfg(bus, devfn, where, 1, value);
465 break;
466 case 2:
467 if (where < 8)
468 return __sabre_write_pci_cfg(bus, devfn, where, 2, value);
469 else {
470 __sabre_write_pci_cfg(bus, devfn, where, 1, value & 0xff);
471 __sabre_write_pci_cfg(bus, devfn, where + 1, 1, value >> 8);
472 }
473 break;
474 case 4:
475 sabre_write_pci_cfg(bus, devfn, where, 2, value & 0xffff);
476 sabre_write_pci_cfg(bus, devfn, where + 2, 2, value >> 16);
477 break;
478 }
479 return PCIBIOS_SUCCESSFUL;
480}
481
482static struct pci_ops sabre_ops = {
483 .read = sabre_read_pci_cfg,
484 .write = sabre_write_pci_cfg,
485};
486
487static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
488{
489 unsigned int bus = (ino & 0x10) >> 4;
490 unsigned int slot = (ino & 0x0c) >> 2;
491
492 if (bus == 0)
493 return SABRE_IMAP_A_SLOT0 + (slot * 8);
494 else
495 return SABRE_IMAP_B_SLOT0 + (slot * 8);
496}
497
498static unsigned long __onboard_imap_off[] = {
499/*0x20*/ SABRE_IMAP_SCSI,
500/*0x21*/ SABRE_IMAP_ETH,
501/*0x22*/ SABRE_IMAP_BPP,
502/*0x23*/ SABRE_IMAP_AU_REC,
503/*0x24*/ SABRE_IMAP_AU_PLAY,
504/*0x25*/ SABRE_IMAP_PFAIL,
505/*0x26*/ SABRE_IMAP_KMS,
506/*0x27*/ SABRE_IMAP_FLPY,
507/*0x28*/ SABRE_IMAP_SHW,
508/*0x29*/ SABRE_IMAP_KBD,
509/*0x2a*/ SABRE_IMAP_MS,
510/*0x2b*/ SABRE_IMAP_SER,
511/*0x2c*/ 0 /* reserved */,
512/*0x2d*/ 0 /* reserved */,
513/*0x2e*/ SABRE_IMAP_UE,
514/*0x2f*/ SABRE_IMAP_CE,
515/*0x30*/ SABRE_IMAP_PCIERR,
516};
517#define SABRE_ONBOARD_IRQ_BASE 0x20
518#define SABRE_ONBOARD_IRQ_LAST 0x30
519#define sabre_onboard_imap_offset(__ino) \
520 __onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE]
521
522#define sabre_iclr_offset(ino) \
523 ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
524 (SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
525
526/* PCI SABRE INO number to Sparc PIL level. */
527static unsigned char sabre_pil_table[] = {
528/*0x00*/0, 0, 0, 0, /* PCI A slot 0 Int A, B, C, D */
529/*0x04*/0, 0, 0, 0, /* PCI A slot 1 Int A, B, C, D */
530/*0x08*/0, 0, 0, 0, /* PCI A slot 2 Int A, B, C, D */
531/*0x0c*/0, 0, 0, 0, /* PCI A slot 3 Int A, B, C, D */
532/*0x10*/0, 0, 0, 0, /* PCI B slot 0 Int A, B, C, D */
533/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */
534/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */
535/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */
536/*0x20*/4, /* SCSI */
537/*0x21*/5, /* Ethernet */
538/*0x22*/8, /* Parallel Port */
539/*0x23*/13, /* Audio Record */
540/*0x24*/14, /* Audio Playback */
541/*0x25*/15, /* PowerFail */
542/*0x26*/4, /* second SCSI */
543/*0x27*/11, /* Floppy */
544/*0x28*/4, /* Spare Hardware */
545/*0x29*/9, /* Keyboard */
546/*0x2a*/4, /* Mouse */
547/*0x2b*/12, /* Serial */
548/*0x2c*/10, /* Timer 0 */
549/*0x2d*/11, /* Timer 1 */
550/*0x2e*/15, /* Uncorrectable ECC */
551/*0x2f*/15, /* Correctable ECC */
552/*0x30*/15, /* PCI Bus A Error */
553/*0x31*/15, /* PCI Bus B Error */
554/*0x32*/15, /* Power Management */
555};
556
557static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
558{
559 int ret;
560
561 if (pdev &&
562 pdev->vendor == PCI_VENDOR_ID_SUN &&
563 pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
564 return 9;
565
566 ret = sabre_pil_table[ino];
567 if (ret == 0 && pdev == NULL) {
568 ret = 4;
569 } else if (ret == 0) {
570 switch ((pdev->class >> 16) & 0xff) {
571 case PCI_BASE_CLASS_STORAGE:
572 ret = 4;
573 break;
574
575 case PCI_BASE_CLASS_NETWORK:
576 ret = 6;
577 break;
578
579 case PCI_BASE_CLASS_DISPLAY:
580 ret = 9;
581 break;
582
583 case PCI_BASE_CLASS_MULTIMEDIA:
584 case PCI_BASE_CLASS_MEMORY:
585 case PCI_BASE_CLASS_BRIDGE:
586 case PCI_BASE_CLASS_SERIAL:
587 ret = 10;
588 break;
589
590 default:
591 ret = 4;
592 break;
593 };
594 }
595 return ret;
596}
597
598static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
599 struct pci_dev *pdev,
600 unsigned int ino)
601{
602 struct ino_bucket *bucket;
603 unsigned long imap, iclr;
604 unsigned long imap_off, iclr_off;
605 int pil, inofixup = 0;
606
607 ino &= PCI_IRQ_INO;
608 if (ino < SABRE_ONBOARD_IRQ_BASE) {
609 /* PCI slot */
610 imap_off = sabre_pcislot_imap_offset(ino);
611 } else {
612 /* onboard device */
613 if (ino > SABRE_ONBOARD_IRQ_LAST) {
614 prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino);
615 prom_halt();
616 }
617 imap_off = sabre_onboard_imap_offset(ino);
618 }
619
620 /* Now build the IRQ bucket. */
621 pil = sabre_ino_to_pil(pdev, ino);
622
623 if (PIL_RESERVED(pil))
624 BUG();
625
626 imap = pbm->controller_regs + imap_off;
627 imap += 4;
628
629 iclr_off = sabre_iclr_offset(ino);
630 iclr = pbm->controller_regs + iclr_off;
631 iclr += 4;
632
633 if ((ino & 0x20) == 0)
634 inofixup = ino & 0x03;
635
636 bucket = __bucket(build_irq(pil, inofixup, iclr, imap));
637 bucket->flags |= IBF_PCI;
638
639 if (pdev) {
640 struct pcidev_cookie *pcp = pdev->sysdata;
641
642 /* When a device lives behind a bridge deeper in the
643 * PCI bus topology than APB, a special sequence must
644 * run to make sure all pending DMA transfers at the
645 * time of IRQ delivery are visible in the coherency
646 * domain by the cpu. This sequence is to perform
647 * a read on the far side of the non-APB bridge, then
648 * perform a read of Sabre's DMA write-sync register.
649 *
650 * Currently, the PCI_CONFIG register for the device
651 * is used for this read from the far side of the bridge.
652 */
653 if (pdev->bus->number != pcp->pbm->pci_first_busno) {
654 bucket->flags |= IBF_DMA_SYNC;
655 bucket->synctab_ent = dma_sync_reg_table_entry++;
656 dma_sync_reg_table[bucket->synctab_ent] =
657 (unsigned long) sabre_pci_config_mkaddr(
658 pcp->pbm,
659 pdev->bus->number, pdev->devfn, PCI_COMMAND);
660 }
661 }
662 return __irq(bucket);
663}
664
665/* SABRE error handling support. */
666static void sabre_check_iommu_error(struct pci_controller_info *p,
667 unsigned long afsr,
668 unsigned long afar)
669{
670 struct pci_iommu *iommu = p->pbm_A.iommu;
671 unsigned long iommu_tag[16];
672 unsigned long iommu_data[16];
673 unsigned long flags;
674 u64 control;
675 int i;
676
677 spin_lock_irqsave(&iommu->lock, flags);
678 control = sabre_read(iommu->iommu_control);
679 if (control & SABRE_IOMMUCTRL_ERR) {
680 char *type_string;
681
682 /* Clear the error encountered bit.
683 * NOTE: On Sabre this is write 1 to clear,
684 * which is different from Psycho.
685 */
686 sabre_write(iommu->iommu_control, control);
687 switch((control & SABRE_IOMMUCTRL_ERRSTS) >> 25UL) {
688 case 1:
689 type_string = "Invalid Error";
690 break;
691 case 3:
692 type_string = "ECC Error";
693 break;
694 default:
695 type_string = "Unknown";
696 break;
697 };
698 printk("SABRE%d: IOMMU Error, type[%s]\n",
699 p->index, type_string);
700
701 /* Enter diagnostic mode and probe for error'd
702 * entries in the IOTLB.
703 */
704 control &= ~(SABRE_IOMMUCTRL_ERRSTS | SABRE_IOMMUCTRL_ERR);
705 sabre_write(iommu->iommu_control,
706 (control | SABRE_IOMMUCTRL_DENAB));
707 for (i = 0; i < 16; i++) {
708 unsigned long base = p->pbm_A.controller_regs;
709
710 iommu_tag[i] =
711 sabre_read(base + SABRE_IOMMU_TAG + (i * 8UL));
712 iommu_data[i] =
713 sabre_read(base + SABRE_IOMMU_DATA + (i * 8UL));
714 sabre_write(base + SABRE_IOMMU_TAG + (i * 8UL), 0);
715 sabre_write(base + SABRE_IOMMU_DATA + (i * 8UL), 0);
716 }
717 sabre_write(iommu->iommu_control, control);
718
719 for (i = 0; i < 16; i++) {
720 unsigned long tag, data;
721
722 tag = iommu_tag[i];
723 if (!(tag & SABRE_IOMMUTAG_ERR))
724 continue;
725
726 data = iommu_data[i];
727 switch((tag & SABRE_IOMMUTAG_ERRSTS) >> 23UL) {
728 case 1:
729 type_string = "Invalid Error";
730 break;
731 case 3:
732 type_string = "ECC Error";
733 break;
734 default:
735 type_string = "Unknown";
736 break;
737 };
738 printk("SABRE%d: IOMMU TAG(%d)[RAW(%016lx)error(%s)wr(%d)sz(%dK)vpg(%08lx)]\n",
739 p->index, i, tag, type_string,
740 ((tag & SABRE_IOMMUTAG_WRITE) ? 1 : 0),
741 ((tag & SABRE_IOMMUTAG_SIZE) ? 64 : 8),
742 ((tag & SABRE_IOMMUTAG_VPN) << IOMMU_PAGE_SHIFT));
743 printk("SABRE%d: IOMMU DATA(%d)[RAW(%016lx)valid(%d)used(%d)cache(%d)ppg(%016lx)\n",
744 p->index, i, data,
745 ((data & SABRE_IOMMUDATA_VALID) ? 1 : 0),
746 ((data & SABRE_IOMMUDATA_USED) ? 1 : 0),
747 ((data & SABRE_IOMMUDATA_CACHE) ? 1 : 0),
748 ((data & SABRE_IOMMUDATA_PPN) << IOMMU_PAGE_SHIFT));
749 }
750 }
751 spin_unlock_irqrestore(&iommu->lock, flags);
752}
753
754static irqreturn_t sabre_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
755{
756 struct pci_controller_info *p = dev_id;
757 unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_UE_AFSR;
758 unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR;
759 unsigned long afsr, afar, error_bits;
760 int reported;
761
762 /* Latch uncorrectable error status. */
763 afar = sabre_read(afar_reg);
764 afsr = sabre_read(afsr_reg);
765
766 /* Clear the primary/secondary error status bits. */
767 error_bits = afsr &
768 (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
769 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
770 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE);
771 if (!error_bits)
772 return IRQ_NONE;
773 sabre_write(afsr_reg, error_bits);
774
775 /* Log the error. */
776 printk("SABRE%d: Uncorrectable Error, primary error type[%s%s]\n",
777 p->index,
778 ((error_bits & SABRE_UEAFSR_PDRD) ?
779 "DMA Read" :
780 ((error_bits & SABRE_UEAFSR_PDWR) ?
781 "DMA Write" : "???")),
782 ((error_bits & SABRE_UEAFSR_PDTE) ?
783 ":Translation Error" : ""));
784 printk("SABRE%d: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n",
785 p->index,
786 (afsr & SABRE_UEAFSR_BMSK) >> 32UL,
787 (afsr & SABRE_UEAFSR_OFF) >> 29UL,
788 ((afsr & SABRE_UEAFSR_BLK) ? 1 : 0));
789 printk("SABRE%d: UE AFAR [%016lx]\n", p->index, afar);
790 printk("SABRE%d: UE Secondary errors [", p->index);
791 reported = 0;
792 if (afsr & SABRE_UEAFSR_SDRD) {
793 reported++;
794 printk("(DMA Read)");
795 }
796 if (afsr & SABRE_UEAFSR_SDWR) {
797 reported++;
798 printk("(DMA Write)");
799 }
800 if (afsr & SABRE_UEAFSR_SDTE) {
801 reported++;
802 printk("(Translation Error)");
803 }
804 if (!reported)
805 printk("(none)");
806 printk("]\n");
807
808 /* Interrogate IOMMU for error status. */
809 sabre_check_iommu_error(p, afsr, afar);
810
811 return IRQ_HANDLED;
812}
813
814static irqreturn_t sabre_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
815{
816 struct pci_controller_info *p = dev_id;
817 unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_CE_AFSR;
818 unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR;
819 unsigned long afsr, afar, error_bits;
820 int reported;
821
822 /* Latch error status. */
823 afar = sabre_read(afar_reg);
824 afsr = sabre_read(afsr_reg);
825
826 /* Clear primary/secondary error status bits. */
827 error_bits = afsr &
828 (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
829 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR);
830 if (!error_bits)
831 return IRQ_NONE;
832 sabre_write(afsr_reg, error_bits);
833
834 /* Log the error. */
835 printk("SABRE%d: Correctable Error, primary error type[%s]\n",
836 p->index,
837 ((error_bits & SABRE_CEAFSR_PDRD) ?
838 "DMA Read" :
839 ((error_bits & SABRE_CEAFSR_PDWR) ?
840 "DMA Write" : "???")));
841
842 /* XXX Use syndrome and afar to print out module string just like
843 * XXX UDB CE trap handler does... -DaveM
844 */
845 printk("SABRE%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
846 "was_block(%d)\n",
847 p->index,
848 (afsr & SABRE_CEAFSR_ESYND) >> 48UL,
849 (afsr & SABRE_CEAFSR_BMSK) >> 32UL,
850 (afsr & SABRE_CEAFSR_OFF) >> 29UL,
851 ((afsr & SABRE_CEAFSR_BLK) ? 1 : 0));
852 printk("SABRE%d: CE AFAR [%016lx]\n", p->index, afar);
853 printk("SABRE%d: CE Secondary errors [", p->index);
854 reported = 0;
855 if (afsr & SABRE_CEAFSR_SDRD) {
856 reported++;
857 printk("(DMA Read)");
858 }
859 if (afsr & SABRE_CEAFSR_SDWR) {
860 reported++;
861 printk("(DMA Write)");
862 }
863 if (!reported)
864 printk("(none)");
865 printk("]\n");
866
867 return IRQ_HANDLED;
868}
869
870static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
871{
872 unsigned long csr_reg, csr, csr_error_bits;
873 irqreturn_t ret = IRQ_NONE;
874 u16 stat;
875
876 csr_reg = p->pbm_A.controller_regs + SABRE_PCICTRL;
877 csr = sabre_read(csr_reg);
878 csr_error_bits =
879 csr & SABRE_PCICTRL_SERR;
880 if (csr_error_bits) {
881 /* Clear the errors. */
882 sabre_write(csr_reg, csr);
883
884 /* Log 'em. */
885 if (csr_error_bits & SABRE_PCICTRL_SERR)
886 printk("SABRE%d: PCI SERR signal asserted.\n",
887 p->index);
888 ret = IRQ_HANDLED;
889 }
890 pci_read_config_word(sabre_root_bus->self,
891 PCI_STATUS, &stat);
892 if (stat & (PCI_STATUS_PARITY |
893 PCI_STATUS_SIG_TARGET_ABORT |
894 PCI_STATUS_REC_TARGET_ABORT |
895 PCI_STATUS_REC_MASTER_ABORT |
896 PCI_STATUS_SIG_SYSTEM_ERROR)) {
897 printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n",
898 p->index, stat);
899 pci_write_config_word(sabre_root_bus->self,
900 PCI_STATUS, 0xffff);
901 ret = IRQ_HANDLED;
902 }
903 return ret;
904}
905
906static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
907{
908 struct pci_controller_info *p = dev_id;
909 unsigned long afsr_reg, afar_reg;
910 unsigned long afsr, afar, error_bits;
911 int reported;
912
913 afsr_reg = p->pbm_A.controller_regs + SABRE_PIOAFSR;
914 afar_reg = p->pbm_A.controller_regs + SABRE_PIOAFAR;
915
916 /* Latch error status. */
917 afar = sabre_read(afar_reg);
918 afsr = sabre_read(afsr_reg);
919
920 /* Clear primary/secondary error status bits. */
921 error_bits = afsr &
922 (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_PTA |
923 SABRE_PIOAFSR_PRTRY | SABRE_PIOAFSR_PPERR |
924 SABRE_PIOAFSR_SMA | SABRE_PIOAFSR_STA |
925 SABRE_PIOAFSR_SRTRY | SABRE_PIOAFSR_SPERR);
926 if (!error_bits)
927 return sabre_pcierr_intr_other(p);
928 sabre_write(afsr_reg, error_bits);
929
930 /* Log the error. */
931 printk("SABRE%d: PCI Error, primary error type[%s]\n",
932 p->index,
933 (((error_bits & SABRE_PIOAFSR_PMA) ?
934 "Master Abort" :
935 ((error_bits & SABRE_PIOAFSR_PTA) ?
936 "Target Abort" :
937 ((error_bits & SABRE_PIOAFSR_PRTRY) ?
938 "Excessive Retries" :
939 ((error_bits & SABRE_PIOAFSR_PPERR) ?
940 "Parity Error" : "???"))))));
941 printk("SABRE%d: bytemask[%04lx] was_block(%d)\n",
942 p->index,
943 (afsr & SABRE_PIOAFSR_BMSK) >> 32UL,
944 (afsr & SABRE_PIOAFSR_BLK) ? 1 : 0);
945 printk("SABRE%d: PCI AFAR [%016lx]\n", p->index, afar);
946 printk("SABRE%d: PCI Secondary errors [", p->index);
947 reported = 0;
948 if (afsr & SABRE_PIOAFSR_SMA) {
949 reported++;
950 printk("(Master Abort)");
951 }
952 if (afsr & SABRE_PIOAFSR_STA) {
953 reported++;
954 printk("(Target Abort)");
955 }
956 if (afsr & SABRE_PIOAFSR_SRTRY) {
957 reported++;
958 printk("(Excessive Retries)");
959 }
960 if (afsr & SABRE_PIOAFSR_SPERR) {
961 reported++;
962 printk("(Parity Error)");
963 }
964 if (!reported)
965 printk("(none)");
966 printk("]\n");
967
968 /* For the error types shown, scan both PCI buses for devices
969 * which have logged that error type.
970 */
971
972 /* If we see a Target Abort, this could be the result of an
973 * IOMMU translation error of some sort. It is extremely
974 * useful to log this information as usually it indicates
975 * a bug in the IOMMU support code or a PCI device driver.
976 */
977 if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) {
978 sabre_check_iommu_error(p, afsr, afar);
979 pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
980 pci_scan_for_target_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
981 }
982 if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) {
983 pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
984 pci_scan_for_master_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
985 }
986 /* For excessive retries, SABRE/PBM will abort the device
987 * and there is no way to specifically check for excessive
988 * retries in the config space status registers. So what
989 * we hope is that we'll catch it via the master/target
990 * abort events.
991 */
992
993 if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) {
994 pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus);
995 pci_scan_for_parity_error(p, &p->pbm_B, p->pbm_B.pci_bus);
996 }
997
998 return IRQ_HANDLED;
999}
1000
1001/* XXX What about PowerFail/PowerManagement??? -DaveM */
1002#define SABRE_UE_INO 0x2e
1003#define SABRE_CE_INO 0x2f
1004#define SABRE_PCIERR_INO 0x30
1005static void __init sabre_register_error_handlers(struct pci_controller_info *p)
1006{
1007 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
1008 unsigned long base = pbm->controller_regs;
1009 unsigned long irq, portid = pbm->portid;
1010 u64 tmp;
1011
1012 /* We clear the error bits in the appropriate AFSR before
1013 * registering the handler so that we don't get spurious
1014 * interrupts.
1015 */
1016 sabre_write(base + SABRE_UE_AFSR,
1017 (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
1018 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
1019 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE));
1020 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_UE_INO);
1021 if (request_irq(irq, sabre_ue_intr,
1022 SA_SHIRQ, "SABRE UE", p) < 0) {
1023 prom_printf("SABRE%d: Cannot register UE interrupt.\n",
1024 p->index);
1025 prom_halt();
1026 }
1027
1028 sabre_write(base + SABRE_CE_AFSR,
1029 (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
1030 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR));
1031 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_CE_INO);
1032 if (request_irq(irq, sabre_ce_intr,
1033 SA_SHIRQ, "SABRE CE", p) < 0) {
1034 prom_printf("SABRE%d: Cannot register CE interrupt.\n",
1035 p->index);
1036 prom_halt();
1037 }
1038
1039 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_PCIERR_INO);
1040 if (request_irq(irq, sabre_pcierr_intr,
1041 SA_SHIRQ, "SABRE PCIERR", p) < 0) {
1042 prom_printf("SABRE%d: Cannot register PciERR interrupt.\n",
1043 p->index);
1044 prom_halt();
1045 }
1046
1047 tmp = sabre_read(base + SABRE_PCICTRL);
1048 tmp |= SABRE_PCICTRL_ERREN;
1049 sabre_write(base + SABRE_PCICTRL, tmp);
1050}
1051
1052static void __init sabre_resource_adjust(struct pci_dev *pdev,
1053 struct resource *res,
1054 struct resource *root)
1055{
1056 struct pci_pbm_info *pbm = pdev->bus->sysdata;
1057 unsigned long base;
1058
1059 if (res->flags & IORESOURCE_IO)
1060 base = pbm->controller_regs + SABRE_IOSPACE;
1061 else
1062 base = pbm->controller_regs + SABRE_MEMSPACE;
1063
1064 res->start += base;
1065 res->end += base;
1066}
1067
1068static void __init sabre_base_address_update(struct pci_dev *pdev, int resource)
1069{
1070 struct pcidev_cookie *pcp = pdev->sysdata;
1071 struct pci_pbm_info *pbm = pcp->pbm;
1072 struct resource *res;
1073 unsigned long base;
1074 u32 reg;
1075 int where, size, is_64bit;
1076
1077 res = &pdev->resource[resource];
1078 if (resource < 6) {
1079 where = PCI_BASE_ADDRESS_0 + (resource * 4);
1080 } else if (resource == PCI_ROM_RESOURCE) {
1081 where = pdev->rom_base_reg;
1082 } else {
1083 /* Somebody might have asked allocation of a non-standard resource */
1084 return;
1085 }
1086
1087 is_64bit = 0;
1088 if (res->flags & IORESOURCE_IO)
1089 base = pbm->controller_regs + SABRE_IOSPACE;
1090 else {
1091 base = pbm->controller_regs + SABRE_MEMSPACE;
1092 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
1093 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1094 is_64bit = 1;
1095 }
1096
1097 size = res->end - res->start;
1098 pci_read_config_dword(pdev, where, &reg);
1099 reg = ((reg & size) |
1100 (((u32)(res->start - base)) & ~size));
1101 if (resource == PCI_ROM_RESOURCE) {
1102 reg |= PCI_ROM_ADDRESS_ENABLE;
1103 res->flags |= IORESOURCE_ROM_ENABLE;
1104 }
1105 pci_write_config_dword(pdev, where, reg);
1106
1107 /* This knows that the upper 32-bits of the address
1108 * must be zero. Our PCI common layer enforces this.
1109 */
1110 if (is_64bit)
1111 pci_write_config_dword(pdev, where + 4, 0);
1112}
1113
1114static void __init apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus)
1115{
1116 struct pci_dev *pdev;
1117
1118 list_for_each_entry(pdev, &sabre_bus->devices, bus_list) {
1119
1120 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
1121 pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
1122 u32 word32;
1123 u16 word16;
1124
1125 sabre_read_pci_cfg(pdev->bus, pdev->devfn,
1126 PCI_COMMAND, 2, &word32);
1127 word16 = (u16) word32;
1128 word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
1129 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
1130 PCI_COMMAND_IO;
1131 word32 = (u32) word16;
1132 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1133 PCI_COMMAND, 2, word32);
1134
1135 /* Status register bits are "write 1 to clear". */
1136 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1137 PCI_STATUS, 2, 0xffff);
1138 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1139 PCI_SEC_STATUS, 2, 0xffff);
1140
1141 /* Use a primary/seconday latency timer value
1142 * of 64.
1143 */
1144 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1145 PCI_LATENCY_TIMER, 1, 64);
1146 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1147 PCI_SEC_LATENCY_TIMER, 1, 64);
1148
1149 /* Enable reporting/forwarding of master aborts,
1150 * parity, and SERR.
1151 */
1152 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1153 PCI_BRIDGE_CONTROL, 1,
1154 (PCI_BRIDGE_CTL_PARITY |
1155 PCI_BRIDGE_CTL_SERR |
1156 PCI_BRIDGE_CTL_MASTER_ABORT));
1157 }
1158 }
1159}
1160
1161static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm)
1162{
1163 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
1164
1165 if (!cookie) {
1166 prom_printf("SABRE: Critical allocation failure.\n");
1167 prom_halt();
1168 }
1169
1170 /* All we care about is the PBM. */
1171 memset(cookie, 0, sizeof(*cookie));
1172 cookie->pbm = pbm;
1173
1174 return cookie;
1175}
1176
1177static void __init sabre_scan_bus(struct pci_controller_info *p)
1178{
1179 static int once;
1180 struct pci_bus *sabre_bus, *pbus;
1181 struct pci_pbm_info *pbm;
1182 struct pcidev_cookie *cookie;
1183 int sabres_scanned;
1184
1185 /* The APB bridge speaks to the Sabre host PCI bridge
1186 * at 66Mhz, but the front side of APB runs at 33Mhz
1187 * for both segments.
1188 */
1189 p->pbm_A.is_66mhz_capable = 0;
1190 p->pbm_B.is_66mhz_capable = 0;
1191
1192 /* This driver has not been verified to handle
1193 * multiple SABREs yet, so trap this.
1194 *
1195 * Also note that the SABRE host bridge is hardwired
1196 * to live at bus 0.
1197 */
1198 if (once != 0) {
1199 prom_printf("SABRE: Multiple controllers unsupported.\n");
1200 prom_halt();
1201 }
1202 once++;
1203
1204 cookie = alloc_bridge_cookie(&p->pbm_A);
1205
1206 sabre_bus = pci_scan_bus(p->pci_first_busno,
1207 p->pci_ops,
1208 &p->pbm_A);
1209 pci_fixup_host_bridge_self(sabre_bus);
1210 sabre_bus->self->sysdata = cookie;
1211
1212 sabre_root_bus = sabre_bus;
1213
1214 apb_init(p, sabre_bus);
1215
1216 sabres_scanned = 0;
1217
1218 list_for_each_entry(pbus, &sabre_bus->children, node) {
1219
1220 if (pbus->number == p->pbm_A.pci_first_busno) {
1221 pbm = &p->pbm_A;
1222 } else if (pbus->number == p->pbm_B.pci_first_busno) {
1223 pbm = &p->pbm_B;
1224 } else
1225 continue;
1226
1227 cookie = alloc_bridge_cookie(pbm);
1228 pbus->self->sysdata = cookie;
1229
1230 sabres_scanned++;
1231
1232 pbus->sysdata = pbm;
1233 pbm->pci_bus = pbus;
1234 pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node);
1235 pci_record_assignments(pbm, pbus);
1236 pci_assign_unassigned(pbm, pbus);
1237 pci_fixup_irq(pbm, pbus);
1238 pci_determine_66mhz_disposition(pbm, pbus);
1239 pci_setup_busmastering(pbm, pbus);
1240 }
1241
1242 if (!sabres_scanned) {
1243 /* Hummingbird, no APBs. */
1244 pbm = &p->pbm_A;
1245 sabre_bus->sysdata = pbm;
1246 pbm->pci_bus = sabre_bus;
1247 pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node);
1248 pci_record_assignments(pbm, sabre_bus);
1249 pci_assign_unassigned(pbm, sabre_bus);
1250 pci_fixup_irq(pbm, sabre_bus);
1251 pci_determine_66mhz_disposition(pbm, sabre_bus);
1252 pci_setup_busmastering(pbm, sabre_bus);
1253 }
1254
1255 sabre_register_error_handlers(p);
1256}
1257
1258static void __init sabre_iommu_init(struct pci_controller_info *p,
1259 int tsbsize, unsigned long dvma_offset,
1260 u32 dma_mask)
1261{
1262 struct pci_iommu *iommu = p->pbm_A.iommu;
1263 unsigned long tsbbase, i, order;
1264 u64 control;
1265
1266 /* Setup initial software IOMMU state. */
1267 spin_lock_init(&iommu->lock);
1268 iommu->iommu_cur_ctx = 0;
1269
1270 /* Register addresses. */
1271 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
1272 iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
1273 iommu->iommu_flush = p->pbm_A.controller_regs + SABRE_IOMMU_FLUSH;
1274 iommu->write_complete_reg = p->pbm_A.controller_regs + SABRE_WRSYNC;
1275 /* Sabre's IOMMU lacks ctx flushing. */
1276 iommu->iommu_ctxflush = 0;
1277
1278 /* Invalidate TLB Entries. */
1279 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
1280 control |= SABRE_IOMMUCTRL_DENAB;
1281 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
1282
1283 for(i = 0; i < 16; i++) {
1284 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TAG + (i * 8UL), 0);
1285 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_DATA + (i * 8UL), 0);
1286 }
1287
1288 /* Leave diag mode enabled for full-flushing done
1289 * in pci_iommu.c
1290 */
1291
1292 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
1293 if (!iommu->dummy_page) {
1294 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1295 prom_halt();
1296 }
1297 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1298 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1299
1300 tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8));
1301 if (!tsbbase) {
1302 prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
1303 prom_halt();
1304 }
1305 iommu->page_table = (iopte_t *)tsbbase;
1306 iommu->page_table_map_base = dvma_offset;
1307 iommu->dma_addr_mask = dma_mask;
1308 pci_iommu_table_init(iommu, PAGE_SIZE << order);
1309
1310 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
1311
1312 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
1313 control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
1314 control |= SABRE_IOMMUCTRL_ENAB;
1315 switch(tsbsize) {
1316 case 64:
1317 control |= SABRE_IOMMU_TSBSZ_64K;
1318 iommu->page_table_sz_bits = 16;
1319 break;
1320 case 128:
1321 control |= SABRE_IOMMU_TSBSZ_128K;
1322 iommu->page_table_sz_bits = 17;
1323 break;
1324 default:
1325 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
1326 prom_halt();
1327 break;
1328 }
1329 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
1330
1331 /* We start with no consistent mappings. */
1332 iommu->lowest_consistent_map =
1333 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1334
1335 for (i = 0; i < PBM_NCLUSTERS; i++) {
1336 iommu->alloc_info[i].flush = 0;
1337 iommu->alloc_info[i].next = 0;
1338 }
1339}
1340
1341static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
1342 struct pci_pbm_info *pbm)
1343{
1344 char *name = pbm->name;
1345 unsigned long ibase = p->pbm_A.controller_regs + SABRE_IOSPACE;
1346 unsigned long mbase = p->pbm_A.controller_regs + SABRE_MEMSPACE;
1347 unsigned int devfn;
1348 unsigned long first, last, i;
1349 u8 *addr, map;
1350
1351 sprintf(name, "SABRE%d PBM%c",
1352 p->index,
1353 (pbm == &p->pbm_A ? 'A' : 'B'));
1354 pbm->io_space.name = pbm->mem_space.name = name;
1355
1356 devfn = PCI_DEVFN(1, (pbm == &p->pbm_A) ? 0 : 1);
1357 addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_IO_ADDRESS_MAP);
1358 map = 0;
1359 pci_config_read8(addr, &map);
1360
1361 first = 8;
1362 last = 0;
1363 for (i = 0; i < 8; i++) {
1364 if ((map & (1 << i)) != 0) {
1365 if (first > i)
1366 first = i;
1367 if (last < i)
1368 last = i;
1369 }
1370 }
1371 pbm->io_space.start = ibase + (first << 21UL);
1372 pbm->io_space.end = ibase + (last << 21UL) + ((1 << 21UL) - 1);
1373 pbm->io_space.flags = IORESOURCE_IO;
1374
1375 addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_MEM_ADDRESS_MAP);
1376 map = 0;
1377 pci_config_read8(addr, &map);
1378
1379 first = 8;
1380 last = 0;
1381 for (i = 0; i < 8; i++) {
1382 if ((map & (1 << i)) != 0) {
1383 if (first > i)
1384 first = i;
1385 if (last < i)
1386 last = i;
1387 }
1388 }
1389 pbm->mem_space.start = mbase + (first << 29UL);
1390 pbm->mem_space.end = mbase + (last << 29UL) + ((1 << 29UL) - 1);
1391 pbm->mem_space.flags = IORESOURCE_MEM;
1392
1393 if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
1394 prom_printf("Cannot register PBM-%c's IO space.\n",
1395 (pbm == &p->pbm_A ? 'A' : 'B'));
1396 prom_halt();
1397 }
1398 if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
1399 prom_printf("Cannot register PBM-%c's MEM space.\n",
1400 (pbm == &p->pbm_A ? 'A' : 'B'));
1401 prom_halt();
1402 }
1403
1404 /* Register legacy regions if this PBM covers that area. */
1405 if (pbm->io_space.start == ibase &&
1406 pbm->mem_space.start == mbase)
1407 pci_register_legacy_regions(&pbm->io_space,
1408 &pbm->mem_space);
1409}
1410
1411static void __init sabre_pbm_init(struct pci_controller_info *p, int sabre_node, u32 dma_begin)
1412{
1413 struct pci_pbm_info *pbm;
1414 char namebuf[128];
1415 u32 busrange[2];
1416 int node, simbas_found;
1417
1418 simbas_found = 0;
1419 node = prom_getchild(sabre_node);
1420 while ((node = prom_searchsiblings(node, "pci")) != 0) {
1421 int err;
1422
1423 err = prom_getproperty(node, "model", namebuf, sizeof(namebuf));
1424 if ((err <= 0) || strncmp(namebuf, "SUNW,simba", err))
1425 goto next_pci;
1426
1427 err = prom_getproperty(node, "bus-range",
1428 (char *)&busrange[0], sizeof(busrange));
1429 if (err == 0 || err == -1) {
1430 prom_printf("APB: Error, cannot get PCI bus-range.\n");
1431 prom_halt();
1432 }
1433
1434 simbas_found++;
1435 if (busrange[0] == 1)
1436 pbm = &p->pbm_B;
1437 else
1438 pbm = &p->pbm_A;
1439 pbm->chip_type = PBM_CHIP_TYPE_SABRE;
1440 pbm->parent = p;
1441 pbm->prom_node = node;
1442 pbm->pci_first_slot = 1;
1443 pbm->pci_first_busno = busrange[0];
1444 pbm->pci_last_busno = busrange[1];
1445
1446 prom_getstring(node, "name", pbm->prom_name, sizeof(pbm->prom_name));
1447 err = prom_getproperty(node, "ranges",
1448 (char *)pbm->pbm_ranges,
1449 sizeof(pbm->pbm_ranges));
1450 if (err != -1)
1451 pbm->num_pbm_ranges =
1452 (err / sizeof(struct linux_prom_pci_ranges));
1453 else
1454 pbm->num_pbm_ranges = 0;
1455
1456 err = prom_getproperty(node, "interrupt-map",
1457 (char *)pbm->pbm_intmap,
1458 sizeof(pbm->pbm_intmap));
1459 if (err != -1) {
1460 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
1461 err = prom_getproperty(node, "interrupt-map-mask",
1462 (char *)&pbm->pbm_intmask,
1463 sizeof(pbm->pbm_intmask));
1464 if (err == -1) {
1465 prom_printf("APB: Fatal error, no interrupt-map-mask.\n");
1466 prom_halt();
1467 }
1468 } else {
1469 pbm->num_pbm_intmap = 0;
1470 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
1471 }
1472
1473 pbm_register_toplevel_resources(p, pbm);
1474
1475 next_pci:
1476 node = prom_getsibling(node);
1477 if (!node)
1478 break;
1479 }
1480 if (simbas_found == 0) {
1481 int err;
1482
1483 /* No APBs underneath, probably this is a hummingbird
1484 * system.
1485 */
1486 pbm = &p->pbm_A;
1487 pbm->parent = p;
1488 pbm->prom_node = sabre_node;
1489 pbm->pci_first_busno = p->pci_first_busno;
1490 pbm->pci_last_busno = p->pci_last_busno;
1491
1492 prom_getstring(sabre_node, "name", pbm->prom_name, sizeof(pbm->prom_name));
1493 err = prom_getproperty(sabre_node, "ranges",
1494 (char *) pbm->pbm_ranges,
1495 sizeof(pbm->pbm_ranges));
1496 if (err != -1)
1497 pbm->num_pbm_ranges =
1498 (err / sizeof(struct linux_prom_pci_ranges));
1499 else
1500 pbm->num_pbm_ranges = 0;
1501
1502 err = prom_getproperty(sabre_node, "interrupt-map",
1503 (char *) pbm->pbm_intmap,
1504 sizeof(pbm->pbm_intmap));
1505
1506 if (err != -1) {
1507 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
1508 err = prom_getproperty(sabre_node, "interrupt-map-mask",
1509 (char *)&pbm->pbm_intmask,
1510 sizeof(pbm->pbm_intmask));
1511 if (err == -1) {
1512 prom_printf("Hummingbird: Fatal error, no interrupt-map-mask.\n");
1513 prom_halt();
1514 }
1515 } else {
1516 pbm->num_pbm_intmap = 0;
1517 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
1518 }
1519
1520
1521 sprintf(pbm->name, "SABRE%d PBM%c", p->index,
1522 (pbm == &p->pbm_A ? 'A' : 'B'));
1523 pbm->io_space.name = pbm->mem_space.name = pbm->name;
1524
1525 /* Hack up top-level resources. */
1526 pbm->io_space.start = p->pbm_A.controller_regs + SABRE_IOSPACE;
1527 pbm->io_space.end = pbm->io_space.start + (1UL << 24) - 1UL;
1528 pbm->io_space.flags = IORESOURCE_IO;
1529
1530 pbm->mem_space.start = p->pbm_A.controller_regs + SABRE_MEMSPACE;
1531 pbm->mem_space.end = pbm->mem_space.start + (unsigned long)dma_begin - 1UL;
1532 pbm->mem_space.flags = IORESOURCE_MEM;
1533
1534 if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
1535 prom_printf("Cannot register Hummingbird's IO space.\n");
1536 prom_halt();
1537 }
1538 if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
1539 prom_printf("Cannot register Hummingbird's MEM space.\n");
1540 prom_halt();
1541 }
1542
1543 pci_register_legacy_regions(&pbm->io_space,
1544 &pbm->mem_space);
1545 }
1546}
1547
1548void __init sabre_init(int pnode, char *model_name)
1549{
1550 struct linux_prom64_registers pr_regs[2];
1551 struct pci_controller_info *p;
1552 struct pci_iommu *iommu;
1553 int tsbsize, err;
1554 u32 busrange[2];
1555 u32 vdma[2];
1556 u32 upa_portid, dma_mask;
1557 u64 clear_irq;
1558
1559 hummingbird_p = 0;
1560 if (!strcmp(model_name, "pci108e,a001"))
1561 hummingbird_p = 1;
1562 else if (!strcmp(model_name, "SUNW,sabre")) {
1563 char compat[64];
1564
1565 if (prom_getproperty(pnode, "compatible",
1566 compat, sizeof(compat)) > 0 &&
1567 !strcmp(compat, "pci108e,a001")) {
1568 hummingbird_p = 1;
1569 } else {
1570 int cpu_node;
1571
1572 /* Of course, Sun has to encode things a thousand
1573 * different ways, inconsistently.
1574 */
1575 cpu_find_by_instance(0, &cpu_node, NULL);
1576 if (prom_getproperty(cpu_node, "name",
1577 compat, sizeof(compat)) > 0 &&
1578 !strcmp(compat, "SUNW,UltraSPARC-IIe"))
1579 hummingbird_p = 1;
1580 }
1581 }
1582
1583 p = kmalloc(sizeof(*p), GFP_ATOMIC);
1584 if (!p) {
1585 prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n");
1586 prom_halt();
1587 }
1588 memset(p, 0, sizeof(*p));
1589
1590 iommu = kmalloc(sizeof(*iommu), GFP_ATOMIC);
1591 if (!iommu) {
1592 prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
1593 prom_halt();
1594 }
1595 memset(iommu, 0, sizeof(*iommu));
1596 p->pbm_A.iommu = p->pbm_B.iommu = iommu;
1597
1598 upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff);
1599
1600 p->next = pci_controller_root;
1601 pci_controller_root = p;
1602
1603 p->pbm_A.portid = upa_portid;
1604 p->pbm_B.portid = upa_portid;
1605 p->index = pci_num_controllers++;
1606 p->pbms_same_domain = 1;
1607 p->scan_bus = sabre_scan_bus;
1608 p->irq_build = sabre_irq_build;
1609 p->base_address_update = sabre_base_address_update;
1610 p->resource_adjust = sabre_resource_adjust;
1611 p->pci_ops = &sabre_ops;
1612
1613 /*
1614 * Map in SABRE register set and report the presence of this SABRE.
1615 */
1616 err = prom_getproperty(pnode, "reg",
1617 (char *)&pr_regs[0], sizeof(pr_regs));
1618 if(err == 0 || err == -1) {
1619 prom_printf("SABRE: Error, cannot get U2P registers "
1620 "from PROM.\n");
1621 prom_halt();
1622 }
1623
1624 /*
1625 * First REG in property is base of entire SABRE register space.
1626 */
1627 p->pbm_A.controller_regs = pr_regs[0].phys_addr;
1628 p->pbm_B.controller_regs = pr_regs[0].phys_addr;
1629 pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC;
1630
1631 printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n",
1632 p->pbm_A.controller_regs, pci_dma_wsync);
1633
1634 /* Clear interrupts */
1635
1636 /* PCI first */
1637 for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8)
1638 sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL);
1639
1640 /* Then OBIO */
1641 for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8)
1642 sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL);
1643
1644 /* Error interrupts are enabled later after the bus scan. */
1645 sabre_write(p->pbm_A.controller_regs + SABRE_PCICTRL,
1646 (SABRE_PCICTRL_MRLEN | SABRE_PCICTRL_SERR |
1647 SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN));
1648
1649 /* Now map in PCI config space for entire SABRE. */
1650 p->pbm_A.config_space = p->pbm_B.config_space =
1651 (p->pbm_A.controller_regs + SABRE_CONFIGSPACE);
1652 printk("SABRE: Shared PCI config space at %016lx\n",
1653 p->pbm_A.config_space);
1654
1655 err = prom_getproperty(pnode, "virtual-dma",
1656 (char *)&vdma[0], sizeof(vdma));
1657 if(err == 0 || err == -1) {
1658 prom_printf("SABRE: Error, cannot get virtual-dma property "
1659 "from PROM.\n");
1660 prom_halt();
1661 }
1662
1663 dma_mask = vdma[0];
1664 switch(vdma[1]) {
1665 case 0x20000000:
1666 dma_mask |= 0x1fffffff;
1667 tsbsize = 64;
1668 break;
1669 case 0x40000000:
1670 dma_mask |= 0x3fffffff;
1671 tsbsize = 128;
1672 break;
1673
1674 case 0x80000000:
1675 dma_mask |= 0x7fffffff;
1676 tsbsize = 128;
1677 break;
1678 default:
1679 prom_printf("SABRE: strange virtual-dma size.\n");
1680 prom_halt();
1681 }
1682
1683 sabre_iommu_init(p, tsbsize, vdma[0], dma_mask);
1684
1685 printk("SABRE: DVMA at %08x [%08x]\n", vdma[0], vdma[1]);
1686
1687 err = prom_getproperty(pnode, "bus-range",
1688 (char *)&busrange[0], sizeof(busrange));
1689 if(err == 0 || err == -1) {
1690 prom_printf("SABRE: Error, cannot get PCI bus-range "
1691 " from PROM.\n");
1692 prom_halt();
1693 }
1694
1695 p->pci_first_busno = busrange[0];
1696 p->pci_last_busno = busrange[1];
1697
1698 /*
1699 * Look for APB underneath.
1700 */
1701 sabre_pbm_init(p, pnode, vdma[0]);
1702}
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
new file mode 100644
index 000000000000..e93fcadc3722
--- /dev/null
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -0,0 +1,2187 @@
1/* $Id: pci_schizo.c,v 1.24 2002/01/23 11:27:32 davem Exp $
2 * pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
3 *
4 * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/pci.h>
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/interrupt.h>
13
14#include <asm/pbm.h>
15#include <asm/iommu.h>
16#include <asm/irq.h>
17#include <asm/upa.h>
18
19#include "pci_impl.h"
20#include "iommu_common.h"
21
22/* All SCHIZO registers are 64-bits. The following accessor
23 * routines are how they are accessed. The REG parameter
24 * is a physical address.
25 */
26#define schizo_read(__reg) \
27({ u64 __ret; \
28 __asm__ __volatile__("ldxa [%1] %2, %0" \
29 : "=r" (__ret) \
30 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
31 : "memory"); \
32 __ret; \
33})
34#define schizo_write(__reg, __val) \
35 __asm__ __volatile__("stxa %0, [%1] %2" \
36 : /* no outputs */ \
37 : "r" (__val), "r" (__reg), \
38 "i" (ASI_PHYS_BYPASS_EC_E) \
39 : "memory")
40
41/* This is a convention that at least Excalibur and Merlin
42 * follow. I suppose the SCHIZO used in Starcat and friends
43 * will do similar.
44 *
45 * The only way I could see this changing is if the newlink
46 * block requires more space in Schizo's address space than
47 * they predicted, thus requiring an address space reorg when
48 * the newer Schizo is taped out.
49 */
50
51/* Streaming buffer control register. */
52#define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
53#define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
54#define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
55#define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
56#define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
57
58/* IOMMU control register. */
59#define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
60#define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
61#define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
62#define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
63#define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
64#define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
65#define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
66#define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
67#define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
68#define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
69#define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
70#define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
71#define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
72#define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
73#define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
74#define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
75#define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
76#define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
77
78/* Schizo config space address format is nearly identical to
79 * that of PSYCHO:
80 *
81 * 32 24 23 16 15 11 10 8 7 2 1 0
82 * ---------------------------------------------------------
83 * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 |
84 * ---------------------------------------------------------
85 */
86#define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space)
87#define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \
88 (((unsigned long)(BUS) << 16) | \
89 ((unsigned long)(DEVFN) << 8) | \
90 ((unsigned long)(REG)))
91
92static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm,
93 unsigned char bus,
94 unsigned int devfn,
95 int where)
96{
97 if (!pbm)
98 return NULL;
99 bus -= pbm->pci_first_busno;
100 return (void *)
101 (SCHIZO_CONFIG_BASE(pbm) |
102 SCHIZO_CONFIG_ENCODE(bus, devfn, where));
103}
104
105/* Just make sure the bus number is in range. */
106static int schizo_out_of_range(struct pci_pbm_info *pbm,
107 unsigned char bus,
108 unsigned char devfn)
109{
110 if (bus < pbm->pci_first_busno ||
111 bus > pbm->pci_last_busno)
112 return 1;
113 return 0;
114}
115
116/* SCHIZO PCI configuration space accessors. */
117
118static int schizo_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
119 int where, int size, u32 *value)
120{
121 struct pci_pbm_info *pbm = bus_dev->sysdata;
122 unsigned char bus = bus_dev->number;
123 u32 *addr;
124 u16 tmp16;
125 u8 tmp8;
126
127 switch (size) {
128 case 1:
129 *value = 0xff;
130 break;
131 case 2:
132 *value = 0xffff;
133 break;
134 case 4:
135 *value = 0xffffffff;
136 break;
137 }
138
139 addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
140 if (!addr)
141 return PCIBIOS_SUCCESSFUL;
142
143 if (schizo_out_of_range(pbm, bus, devfn))
144 return PCIBIOS_SUCCESSFUL;
145 switch (size) {
146 case 1:
147 pci_config_read8((u8 *)addr, &tmp8);
148 *value = tmp8;
149 break;
150
151 case 2:
152 if (where & 0x01) {
153 printk("pci_read_config_word: misaligned reg [%x]\n",
154 where);
155 return PCIBIOS_SUCCESSFUL;
156 }
157 pci_config_read16((u16 *)addr, &tmp16);
158 *value = tmp16;
159 break;
160
161 case 4:
162 if (where & 0x03) {
163 printk("pci_read_config_dword: misaligned reg [%x]\n",
164 where);
165 return PCIBIOS_SUCCESSFUL;
166 }
167 pci_config_read32(addr, value);
168 break;
169 }
170 return PCIBIOS_SUCCESSFUL;
171}
172
173static int schizo_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
174 int where, int size, u32 value)
175{
176 struct pci_pbm_info *pbm = bus_dev->sysdata;
177 unsigned char bus = bus_dev->number;
178 u32 *addr;
179
180 addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
181 if (!addr)
182 return PCIBIOS_SUCCESSFUL;
183
184 if (schizo_out_of_range(pbm, bus, devfn))
185 return PCIBIOS_SUCCESSFUL;
186
187 switch (size) {
188 case 1:
189 pci_config_write8((u8 *)addr, value);
190 break;
191
192 case 2:
193 if (where & 0x01) {
194 printk("pci_write_config_word: misaligned reg [%x]\n",
195 where);
196 return PCIBIOS_SUCCESSFUL;
197 }
198 pci_config_write16((u16 *)addr, value);
199 break;
200
201 case 4:
202 if (where & 0x03) {
203 printk("pci_write_config_dword: misaligned reg [%x]\n",
204 where);
205 return PCIBIOS_SUCCESSFUL;
206 }
207
208 pci_config_write32(addr, value);
209 }
210 return PCIBIOS_SUCCESSFUL;
211}
212
213static struct pci_ops schizo_ops = {
214 .read = schizo_read_pci_cfg,
215 .write = schizo_write_pci_cfg,
216};
217
218/* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the
219 * imap/iclr registers are per-PBM.
220 */
221#define SCHIZO_IMAP_BASE 0x1000UL
222#define SCHIZO_ICLR_BASE 0x1400UL
223
224static unsigned long schizo_imap_offset(unsigned long ino)
225{
226 return SCHIZO_IMAP_BASE + (ino * 8UL);
227}
228
229static unsigned long schizo_iclr_offset(unsigned long ino)
230{
231 return SCHIZO_ICLR_BASE + (ino * 8UL);
232}
233
234/* PCI SCHIZO INO number to Sparc PIL level. This table only matters for
235 * INOs which will not have an associated PCI device struct, ie. onboard
236 * EBUS devices and PCI controller internal error interrupts.
237 */
238static unsigned char schizo_pil_table[] = {
239/*0x00*/0, 0, 0, 0, /* PCI slot 0 Int A, B, C, D */
240/*0x04*/0, 0, 0, 0, /* PCI slot 1 Int A, B, C, D */
241/*0x08*/0, 0, 0, 0, /* PCI slot 2 Int A, B, C, D */
242/*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */
243/*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */
244/*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */
245/*0x18*/4, /* SCSI */
246/*0x19*/4, /* second SCSI */
247/*0x1a*/0, /* UNKNOWN */
248/*0x1b*/0, /* UNKNOWN */
249/*0x1c*/8, /* Parallel */
250/*0x1d*/5, /* Ethernet */
251/*0x1e*/8, /* Firewire-1394 */
252/*0x1f*/9, /* USB */
253/*0x20*/13, /* Audio Record */
254/*0x21*/14, /* Audio Playback */
255/*0x22*/12, /* Serial */
256/*0x23*/4, /* EBUS I2C */
257/*0x24*/10, /* RTC Clock */
258/*0x25*/11, /* Floppy */
259/*0x26*/0, /* UNKNOWN */
260/*0x27*/0, /* UNKNOWN */
261/*0x28*/0, /* UNKNOWN */
262/*0x29*/0, /* UNKNOWN */
263/*0x2a*/10, /* UPA 1 */
264/*0x2b*/10, /* UPA 2 */
265/*0x2c*/0, /* UNKNOWN */
266/*0x2d*/0, /* UNKNOWN */
267/*0x2e*/0, /* UNKNOWN */
268/*0x2f*/0, /* UNKNOWN */
269/*0x30*/15, /* Uncorrectable ECC */
270/*0x31*/15, /* Correctable ECC */
271/*0x32*/15, /* PCI Bus A Error */
272/*0x33*/15, /* PCI Bus B Error */
273/*0x34*/15, /* Safari Bus Error */
274/*0x35*/0, /* Reserved */
275/*0x36*/0, /* Reserved */
276/*0x37*/0, /* Reserved */
277/*0x38*/0, /* Reserved for NewLink */
278/*0x39*/0, /* Reserved for NewLink */
279/*0x3a*/0, /* Reserved for NewLink */
280/*0x3b*/0, /* Reserved for NewLink */
281/*0x3c*/0, /* Reserved for NewLink */
282/*0x3d*/0, /* Reserved for NewLink */
283/*0x3e*/0, /* Reserved for NewLink */
284/*0x3f*/0, /* Reserved for NewLink */
285};
286
287static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
288{
289 int ret;
290
291 if (pdev &&
292 pdev->vendor == PCI_VENDOR_ID_SUN &&
293 pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
294 return 9;
295
296 ret = schizo_pil_table[ino];
297 if (ret == 0 && pdev == NULL) {
298 ret = 4;
299 } else if (ret == 0) {
300 switch ((pdev->class >> 16) & 0xff) {
301 case PCI_BASE_CLASS_STORAGE:
302 ret = 4;
303 break;
304
305 case PCI_BASE_CLASS_NETWORK:
306 ret = 6;
307 break;
308
309 case PCI_BASE_CLASS_DISPLAY:
310 ret = 9;
311 break;
312
313 case PCI_BASE_CLASS_MULTIMEDIA:
314 case PCI_BASE_CLASS_MEMORY:
315 case PCI_BASE_CLASS_BRIDGE:
316 case PCI_BASE_CLASS_SERIAL:
317 ret = 10;
318 break;
319
320 default:
321 ret = 4;
322 break;
323 };
324 }
325
326 return ret;
327}
328
329static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
330 struct pci_dev *pdev,
331 unsigned int ino)
332{
333 struct ino_bucket *bucket;
334 unsigned long imap, iclr;
335 unsigned long imap_off, iclr_off;
336 int pil, ign_fixup;
337
338 ino &= PCI_IRQ_INO;
339 imap_off = schizo_imap_offset(ino);
340
341 /* Now build the IRQ bucket. */
342 pil = schizo_ino_to_pil(pdev, ino);
343
344 if (PIL_RESERVED(pil))
345 BUG();
346
347 imap = pbm->pbm_regs + imap_off;
348 imap += 4;
349
350 iclr_off = schizo_iclr_offset(ino);
351 iclr = pbm->pbm_regs + iclr_off;
352 iclr += 4;
353
354 /* On Schizo, no inofixup occurs. This is because each
355 * INO has it's own IMAP register. On Psycho and Sabre
356 * there is only one IMAP register for each PCI slot even
357 * though four different INOs can be generated by each
358 * PCI slot.
359 *
360 * But, for JBUS variants (essentially, Tomatillo), we have
361 * to fixup the lowest bit of the interrupt group number.
362 */
363 ign_fixup = 0;
364 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
365 if (pbm->portid & 1)
366 ign_fixup = (1 << 6);
367 }
368
369 bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap));
370 bucket->flags |= IBF_PCI;
371
372 return __irq(bucket);
373}
374
375/* SCHIZO error handling support. */
376enum schizo_error_type {
377 UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR
378};
379
380static DEFINE_SPINLOCK(stc_buf_lock);
381static unsigned long stc_error_buf[128];
382static unsigned long stc_tag_buf[16];
383static unsigned long stc_line_buf[16];
384
385#define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */
386#define SCHIZO_CE_INO 0x31 /* Correctable ECC error */
387#define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */
388#define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */
389#define SCHIZO_SERR_INO 0x34 /* Safari interface error */
390
391struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
392{
393 ino &= IMAP_INO;
394 if (p->pbm_A.ino_bitmap & (1UL << ino))
395 return &p->pbm_A;
396 if (p->pbm_B.ino_bitmap & (1UL << ino))
397 return &p->pbm_B;
398
399 printk("PCI%d: No ino_bitmap entry for ino[%x], bitmaps "
400 "PBM_A[%016lx] PBM_B[%016lx]",
401 p->index, ino,
402 p->pbm_A.ino_bitmap,
403 p->pbm_B.ino_bitmap);
404 printk("PCI%d: Using PBM_A, report this problem immediately.\n",
405 p->index);
406
407 return &p->pbm_A;
408}
409
410static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
411{
412 struct pci_pbm_info *pbm;
413 struct ino_bucket *bucket;
414 unsigned long iclr;
415
416 /* Do not clear the interrupt for the other PCI bus.
417 *
418 * This "ACK both PBM IRQs" only needs to be performed
419 * for chip-wide error interrupts.
420 */
421 if ((irq & IMAP_INO) == SCHIZO_PCIERR_A_INO ||
422 (irq & IMAP_INO) == SCHIZO_PCIERR_B_INO)
423 return;
424
425 pbm = pbm_for_ino(p, irq);
426 if (pbm == &p->pbm_A)
427 pbm = &p->pbm_B;
428 else
429 pbm = &p->pbm_A;
430
431 irq = schizo_irq_build(pbm, NULL,
432 (pbm->portid << 6) | (irq & IMAP_INO));
433 bucket = __bucket(irq);
434 iclr = bucket->iclr;
435
436 upa_writel(ICLR_IDLE, iclr);
437}
438
439#define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */
440#define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */
441#define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */
442
443#define SCHIZO_STCERR_WRITE 0x2UL
444#define SCHIZO_STCERR_READ 0x1UL
445
446#define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL
447#define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL
448#define SCHIZO_STCTAG_VALID 0x8000000000000000UL
449#define SCHIZO_STCTAG_READ 0x4000000000000000UL
450
451#define SCHIZO_STCLINE_LINDX 0x0000000007800000UL
452#define SCHIZO_STCLINE_SPTR 0x000000000007e000UL
453#define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL
454#define SCHIZO_STCLINE_EPTR 0x000000000000003fUL
455#define SCHIZO_STCLINE_VALID 0x0000000000600000UL
456#define SCHIZO_STCLINE_FOFN 0x0000000000180000UL
457
458static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
459 enum schizo_error_type type)
460{
461 struct pci_strbuf *strbuf = &pbm->stc;
462 unsigned long regbase = pbm->pbm_regs;
463 unsigned long err_base, tag_base, line_base;
464 u64 control;
465 int i;
466
467 err_base = regbase + SCHIZO_STC_ERR;
468 tag_base = regbase + SCHIZO_STC_TAG;
469 line_base = regbase + SCHIZO_STC_LINE;
470
471 spin_lock(&stc_buf_lock);
472
473 /* This is __REALLY__ dangerous. When we put the
474 * streaming buffer into diagnostic mode to probe
475 * it's tags and error status, we _must_ clear all
476 * of the line tag valid bits before re-enabling
477 * the streaming buffer. If any dirty data lives
478 * in the STC when we do this, we will end up
479 * invalidating it before it has a chance to reach
480 * main memory.
481 */
482 control = schizo_read(strbuf->strbuf_control);
483 schizo_write(strbuf->strbuf_control,
484 (control | SCHIZO_STRBUF_CTRL_DENAB));
485 for (i = 0; i < 128; i++) {
486 unsigned long val;
487
488 val = schizo_read(err_base + (i * 8UL));
489 schizo_write(err_base + (i * 8UL), 0UL);
490 stc_error_buf[i] = val;
491 }
492 for (i = 0; i < 16; i++) {
493 stc_tag_buf[i] = schizo_read(tag_base + (i * 8UL));
494 stc_line_buf[i] = schizo_read(line_base + (i * 8UL));
495 schizo_write(tag_base + (i * 8UL), 0UL);
496 schizo_write(line_base + (i * 8UL), 0UL);
497 }
498
499 /* OK, state is logged, exit diagnostic mode. */
500 schizo_write(strbuf->strbuf_control, control);
501
502 for (i = 0; i < 16; i++) {
503 int j, saw_error, first, last;
504
505 saw_error = 0;
506 first = i * 8;
507 last = first + 8;
508 for (j = first; j < last; j++) {
509 unsigned long errval = stc_error_buf[j];
510 if (errval != 0) {
511 saw_error++;
512 printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n",
513 pbm->name,
514 j,
515 (errval & SCHIZO_STCERR_WRITE) ? 1 : 0,
516 (errval & SCHIZO_STCERR_READ) ? 1 : 0);
517 }
518 }
519 if (saw_error != 0) {
520 unsigned long tagval = stc_tag_buf[i];
521 unsigned long lineval = stc_line_buf[i];
522 printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n",
523 pbm->name,
524 i,
525 ((tagval & SCHIZO_STCTAG_PPN) >> 19UL),
526 (tagval & SCHIZO_STCTAG_VPN),
527 ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0),
528 ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0));
529
530 /* XXX Should spit out per-bank error information... -DaveM */
531 printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
532 "V(%d)FOFN(%d)]\n",
533 pbm->name,
534 i,
535 ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL),
536 ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL),
537 ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL),
538 ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL),
539 ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0),
540 ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0));
541 }
542 }
543
544 spin_unlock(&stc_buf_lock);
545}
546
547/* IOMMU is per-PBM in Schizo, so interrogate both for anonymous
548 * controller level errors.
549 */
550
551#define SCHIZO_IOMMU_TAG 0xa580UL
552#define SCHIZO_IOMMU_DATA 0xa600UL
553
554#define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL
555#define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL
556#define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL
557#define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL
558#define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL
559#define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL
560#define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL
561
562#define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL
563#define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL
564#define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL
565
566static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
567 enum schizo_error_type type)
568{
569 struct pci_iommu *iommu = pbm->iommu;
570 unsigned long iommu_tag[16];
571 unsigned long iommu_data[16];
572 unsigned long flags;
573 u64 control;
574 int i;
575
576 spin_lock_irqsave(&iommu->lock, flags);
577 control = schizo_read(iommu->iommu_control);
578 if (control & SCHIZO_IOMMU_CTRL_XLTEERR) {
579 unsigned long base;
580 char *type_string;
581
582 /* Clear the error encountered bit. */
583 control &= ~SCHIZO_IOMMU_CTRL_XLTEERR;
584 schizo_write(iommu->iommu_control, control);
585
586 switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
587 case 0:
588 type_string = "Protection Error";
589 break;
590 case 1:
591 type_string = "Invalid Error";
592 break;
593 case 2:
594 type_string = "TimeOut Error";
595 break;
596 case 3:
597 default:
598 type_string = "ECC Error";
599 break;
600 };
601 printk("%s: IOMMU Error, type[%s]\n",
602 pbm->name, type_string);
603
604 /* Put the IOMMU into diagnostic mode and probe
605 * it's TLB for entries with error status.
606 *
607 * It is very possible for another DVMA to occur
608 * while we do this probe, and corrupt the system
609 * further. But we are so screwed at this point
610 * that we are likely to crash hard anyways, so
611 * get as much diagnostic information to the
612 * console as we can.
613 */
614 schizo_write(iommu->iommu_control,
615 control | SCHIZO_IOMMU_CTRL_DENAB);
616
617 base = pbm->pbm_regs;
618
619 for (i = 0; i < 16; i++) {
620 iommu_tag[i] =
621 schizo_read(base + SCHIZO_IOMMU_TAG + (i * 8UL));
622 iommu_data[i] =
623 schizo_read(base + SCHIZO_IOMMU_DATA + (i * 8UL));
624
625 /* Now clear out the entry. */
626 schizo_write(base + SCHIZO_IOMMU_TAG + (i * 8UL), 0);
627 schizo_write(base + SCHIZO_IOMMU_DATA + (i * 8UL), 0);
628 }
629
630 /* Leave diagnostic mode. */
631 schizo_write(iommu->iommu_control, control);
632
633 for (i = 0; i < 16; i++) {
634 unsigned long tag, data;
635
636 tag = iommu_tag[i];
637 if (!(tag & SCHIZO_IOMMU_TAG_ERR))
638 continue;
639
640 data = iommu_data[i];
641 switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) {
642 case 0:
643 type_string = "Protection Error";
644 break;
645 case 1:
646 type_string = "Invalid Error";
647 break;
648 case 2:
649 type_string = "TimeOut Error";
650 break;
651 case 3:
652 default:
653 type_string = "ECC Error";
654 break;
655 };
656 printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
657 "sz(%dK) vpg(%08lx)]\n",
658 pbm->name, i, type_string,
659 (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL),
660 ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0),
661 ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0),
662 ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8),
663 (tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
664 printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
665 pbm->name, i,
666 ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0),
667 ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0),
668 (data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
669 }
670 }
671 if (pbm->stc.strbuf_enabled)
672 __schizo_check_stc_error_pbm(pbm, type);
673 spin_unlock_irqrestore(&iommu->lock, flags);
674}
675
676static void schizo_check_iommu_error(struct pci_controller_info *p,
677 enum schizo_error_type type)
678{
679 schizo_check_iommu_error_pbm(&p->pbm_A, type);
680 schizo_check_iommu_error_pbm(&p->pbm_B, type);
681}
682
683/* Uncorrectable ECC error status gathering. */
684#define SCHIZO_UE_AFSR 0x10030UL
685#define SCHIZO_UE_AFAR 0x10038UL
686
687#define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */
688#define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */
689#define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */
690#define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */
691#define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */
692#define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */
693#define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */
694#define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */
695#define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */
696#define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */
697#define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */
698#define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */
699#define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */
700#define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */
701
702static irqreturn_t schizo_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
703{
704 struct pci_controller_info *p = dev_id;
705 unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFSR;
706 unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFAR;
707 unsigned long afsr, afar, error_bits;
708 int reported, limit;
709
710 /* Latch uncorrectable error status. */
711 afar = schizo_read(afar_reg);
712
713 /* If either of the error pending bits are set in the
714 * AFSR, the error status is being actively updated by
715 * the hardware and we must re-read to get a clean value.
716 */
717 limit = 1000;
718 do {
719 afsr = schizo_read(afsr_reg);
720 } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
721
722 /* Clear the primary/secondary error status bits. */
723 error_bits = afsr &
724 (SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR |
725 SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA);
726 if (!error_bits)
727 return IRQ_NONE;
728 schizo_write(afsr_reg, error_bits);
729
730 /* Log the error. */
731 printk("PCI%d: Uncorrectable Error, primary error type[%s]\n",
732 p->index,
733 (((error_bits & SCHIZO_UEAFSR_PPIO) ?
734 "PIO" :
735 ((error_bits & SCHIZO_UEAFSR_PDRD) ?
736 "DMA Read" :
737 ((error_bits & SCHIZO_UEAFSR_PDWR) ?
738 "DMA Write" : "???")))));
739 printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
740 p->index,
741 (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
742 (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
743 (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
744 printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
745 p->index,
746 (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
747 (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
748 (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
749 (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
750 (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
751 printk("PCI%d: UE AFAR [%016lx]\n", p->index, afar);
752 printk("PCI%d: UE Secondary errors [", p->index);
753 reported = 0;
754 if (afsr & SCHIZO_UEAFSR_SPIO) {
755 reported++;
756 printk("(PIO)");
757 }
758 if (afsr & SCHIZO_UEAFSR_SDMA) {
759 reported++;
760 printk("(DMA)");
761 }
762 if (!reported)
763 printk("(none)");
764 printk("]\n");
765
766 /* Interrogate IOMMU for error status. */
767 schizo_check_iommu_error(p, UE_ERR);
768
769 schizo_clear_other_err_intr(p, irq);
770
771 return IRQ_HANDLED;
772}
773
774#define SCHIZO_CE_AFSR 0x10040UL
775#define SCHIZO_CE_AFAR 0x10048UL
776
777#define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL
778#define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL
779#define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL
780#define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL
781#define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL
782#define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL
783#define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL
784#define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL
785#define SCHIZO_CEAFSR_AID 0x000000001f000000UL
786#define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL
787#define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL
788#define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL
789#define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL
790#define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL
791
792static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
793{
794 struct pci_controller_info *p = dev_id;
795 unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFSR;
796 unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFAR;
797 unsigned long afsr, afar, error_bits;
798 int reported, limit;
799
800 /* Latch error status. */
801 afar = schizo_read(afar_reg);
802
803 /* If either of the error pending bits are set in the
804 * AFSR, the error status is being actively updated by
805 * the hardware and we must re-read to get a clean value.
806 */
807 limit = 1000;
808 do {
809 afsr = schizo_read(afsr_reg);
810 } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
811
812 /* Clear primary/secondary error status bits. */
813 error_bits = afsr &
814 (SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR |
815 SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA);
816 if (!error_bits)
817 return IRQ_NONE;
818 schizo_write(afsr_reg, error_bits);
819
820 /* Log the error. */
821 printk("PCI%d: Correctable Error, primary error type[%s]\n",
822 p->index,
823 (((error_bits & SCHIZO_CEAFSR_PPIO) ?
824 "PIO" :
825 ((error_bits & SCHIZO_CEAFSR_PDRD) ?
826 "DMA Read" :
827 ((error_bits & SCHIZO_CEAFSR_PDWR) ?
828 "DMA Write" : "???")))));
829
830 /* XXX Use syndrome and afar to print out module string just like
831 * XXX UDB CE trap handler does... -DaveM
832 */
833 printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
834 p->index,
835 (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
836 (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
837 (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
838 printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
839 p->index,
840 (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
841 (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
842 (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
843 (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
844 (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
845 printk("PCI%d: CE AFAR [%016lx]\n", p->index, afar);
846 printk("PCI%d: CE Secondary errors [", p->index);
847 reported = 0;
848 if (afsr & SCHIZO_CEAFSR_SPIO) {
849 reported++;
850 printk("(PIO)");
851 }
852 if (afsr & SCHIZO_CEAFSR_SDMA) {
853 reported++;
854 printk("(DMA)");
855 }
856 if (!reported)
857 printk("(none)");
858 printk("]\n");
859
860 schizo_clear_other_err_intr(p, irq);
861
862 return IRQ_HANDLED;
863}
864
865#define SCHIZO_PCI_AFSR 0x2010UL
866#define SCHIZO_PCI_AFAR 0x2018UL
867
868#define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */
869#define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */
870#define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */
871#define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */
872#define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */
873#define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */
874#define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */
875#define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */
876#define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */
877#define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */
878#define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */
879#define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */
880#define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */
881#define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */
882#define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */
883#define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */
884#define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */
885
886#define SCHIZO_PCI_CTRL (0x2000UL)
887#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */
888#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */
889#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */
890#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */
891#define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */
892#define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */
893#define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */
894#define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */
895#define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */
896#define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */
897#define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */
898#define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */
899#define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */
900#define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */
901#define SCHIZO_PCICTRL_PTO_SHIFT 24UL
902#define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */
903#define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */
904#define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */
905#define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */
906#define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */
907#define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */
908#define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */
909#define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */
910#define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */
911#define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */
912#define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */
913
914static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
915{
916 unsigned long csr_reg, csr, csr_error_bits;
917 irqreturn_t ret = IRQ_NONE;
918 u16 stat;
919
920 csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
921 csr = schizo_read(csr_reg);
922 csr_error_bits =
923 csr & (SCHIZO_PCICTRL_BUS_UNUS |
924 SCHIZO_PCICTRL_TTO_ERR |
925 SCHIZO_PCICTRL_RTRY_ERR |
926 SCHIZO_PCICTRL_DTO_ERR |
927 SCHIZO_PCICTRL_SBH_ERR |
928 SCHIZO_PCICTRL_SERR);
929 if (csr_error_bits) {
930 /* Clear the errors. */
931 schizo_write(csr_reg, csr);
932
933 /* Log 'em. */
934 if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS)
935 printk("%s: Bus unusable error asserted.\n",
936 pbm->name);
937 if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR)
938 printk("%s: PCI TRDY# timeout error asserted.\n",
939 pbm->name);
940 if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR)
941 printk("%s: PCI excessive retry error asserted.\n",
942 pbm->name);
943 if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR)
944 printk("%s: PCI discard timeout error asserted.\n",
945 pbm->name);
946 if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR)
947 printk("%s: PCI streaming byte hole error asserted.\n",
948 pbm->name);
949 if (csr_error_bits & SCHIZO_PCICTRL_SERR)
950 printk("%s: PCI SERR signal asserted.\n",
951 pbm->name);
952 ret = IRQ_HANDLED;
953 }
954 pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
955 if (stat & (PCI_STATUS_PARITY |
956 PCI_STATUS_SIG_TARGET_ABORT |
957 PCI_STATUS_REC_TARGET_ABORT |
958 PCI_STATUS_REC_MASTER_ABORT |
959 PCI_STATUS_SIG_SYSTEM_ERROR)) {
960 printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
961 pbm->name, stat);
962 pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
963 ret = IRQ_HANDLED;
964 }
965 return ret;
966}
967
968static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
969{
970 struct pci_pbm_info *pbm = dev_id;
971 struct pci_controller_info *p = pbm->parent;
972 unsigned long afsr_reg, afar_reg, base;
973 unsigned long afsr, afar, error_bits;
974 int reported;
975
976 base = pbm->pbm_regs;
977
978 afsr_reg = base + SCHIZO_PCI_AFSR;
979 afar_reg = base + SCHIZO_PCI_AFAR;
980
981 /* Latch error status. */
982 afar = schizo_read(afar_reg);
983 afsr = schizo_read(afsr_reg);
984
985 /* Clear primary/secondary error status bits. */
986 error_bits = afsr &
987 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
988 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
989 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
990 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
991 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
992 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS);
993 if (!error_bits)
994 return schizo_pcierr_intr_other(pbm);
995 schizo_write(afsr_reg, error_bits);
996
997 /* Log the error. */
998 printk("%s: PCI Error, primary error type[%s]\n",
999 pbm->name,
1000 (((error_bits & SCHIZO_PCIAFSR_PMA) ?
1001 "Master Abort" :
1002 ((error_bits & SCHIZO_PCIAFSR_PTA) ?
1003 "Target Abort" :
1004 ((error_bits & SCHIZO_PCIAFSR_PRTRY) ?
1005 "Excessive Retries" :
1006 ((error_bits & SCHIZO_PCIAFSR_PPERR) ?
1007 "Parity Error" :
1008 ((error_bits & SCHIZO_PCIAFSR_PTTO) ?
1009 "Timeout" :
1010 ((error_bits & SCHIZO_PCIAFSR_PUNUS) ?
1011 "Bus Unusable" : "???"))))))));
1012 printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n",
1013 pbm->name,
1014 (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL,
1015 (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0,
1016 ((afsr & SCHIZO_PCIAFSR_CFG) ?
1017 "Config" :
1018 ((afsr & SCHIZO_PCIAFSR_MEM) ?
1019 "Memory" :
1020 ((afsr & SCHIZO_PCIAFSR_IO) ?
1021 "I/O" : "???"))));
1022 printk("%s: PCI AFAR [%016lx]\n",
1023 pbm->name, afar);
1024 printk("%s: PCI Secondary errors [",
1025 pbm->name);
1026 reported = 0;
1027 if (afsr & SCHIZO_PCIAFSR_SMA) {
1028 reported++;
1029 printk("(Master Abort)");
1030 }
1031 if (afsr & SCHIZO_PCIAFSR_STA) {
1032 reported++;
1033 printk("(Target Abort)");
1034 }
1035 if (afsr & SCHIZO_PCIAFSR_SRTRY) {
1036 reported++;
1037 printk("(Excessive Retries)");
1038 }
1039 if (afsr & SCHIZO_PCIAFSR_SPERR) {
1040 reported++;
1041 printk("(Parity Error)");
1042 }
1043 if (afsr & SCHIZO_PCIAFSR_STTO) {
1044 reported++;
1045 printk("(Timeout)");
1046 }
1047 if (afsr & SCHIZO_PCIAFSR_SUNUS) {
1048 reported++;
1049 printk("(Bus Unusable)");
1050 }
1051 if (!reported)
1052 printk("(none)");
1053 printk("]\n");
1054
1055 /* For the error types shown, scan PBM's PCI bus for devices
1056 * which have logged that error type.
1057 */
1058
1059 /* If we see a Target Abort, this could be the result of an
1060 * IOMMU translation error of some sort. It is extremely
1061 * useful to log this information as usually it indicates
1062 * a bug in the IOMMU support code or a PCI device driver.
1063 */
1064 if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) {
1065 schizo_check_iommu_error(p, PCI_ERR);
1066 pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
1067 }
1068 if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA))
1069 pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
1070
1071 /* For excessive retries, PSYCHO/PBM will abort the device
1072 * and there is no way to specifically check for excessive
1073 * retries in the config space status registers. So what
1074 * we hope is that we'll catch it via the master/target
1075 * abort events.
1076 */
1077
1078 if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR))
1079 pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
1080
1081 schizo_clear_other_err_intr(p, irq);
1082
1083 return IRQ_HANDLED;
1084}
1085
1086#define SCHIZO_SAFARI_ERRLOG 0x10018UL
1087
1088#define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL
1089
1090#define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */
1091#define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */
1092#define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */
1093#define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */
1094#define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */
1095#define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */
1096#define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */
1097#define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */
1098#define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */
1099#define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */
1100#define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */
1101#define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */
1102#define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */
1103#define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */
1104#define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */
1105#define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */
1106#define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */
1107#define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */
1108#define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */
1109#define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */
1110#define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */
1111#define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */
1112#define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */
1113#define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */
1114#define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */
1115#define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */
1116#define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */
1117#define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */
1118#define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */
1119#define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */
1120#define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */
1121
1122/* We only expect UNMAP errors here. The rest of the Safari errors
1123 * are marked fatal and thus cause a system reset.
1124 */
1125static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *regs)
1126{
1127 struct pci_controller_info *p = dev_id;
1128 u64 errlog;
1129
1130 errlog = schizo_read(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG);
1131 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG,
1132 errlog & ~(SAFARI_ERRLOG_ERROUT));
1133
1134 if (!(errlog & BUS_ERROR_UNMAP)) {
1135 printk("PCI%d: Unexpected Safari/JBUS error interrupt, errlog[%016lx]\n",
1136 p->index, errlog);
1137
1138 schizo_clear_other_err_intr(p, irq);
1139 return IRQ_HANDLED;
1140 }
1141
1142 printk("PCI%d: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n",
1143 p->index);
1144 schizo_check_iommu_error(p, SAFARI_ERR);
1145
1146 schizo_clear_other_err_intr(p, irq);
1147 return IRQ_HANDLED;
1148}
1149
1150/* Nearly identical to PSYCHO equivalents... */
1151#define SCHIZO_ECC_CTRL 0x10020UL
1152#define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
1153#define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
1154#define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
1155
1156#define SCHIZO_SAFARI_ERRCTRL 0x10008UL
1157#define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL
1158#define SCHIZO_SAFARI_IRQCTRL 0x10010UL
1159#define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL
1160
1161/* How the Tomatillo IRQs are routed around is pure guesswork here.
1162 *
1163 * All the Tomatillo devices I see in prtconf dumps seem to have only
1164 * a single PCI bus unit attached to it. It would seem they are seperate
1165 * devices because their PortID (ie. JBUS ID) values are all different
1166 * and thus the registers are mapped to totally different locations.
1167 *
1168 * However, two Tomatillo's look "similar" in that the only difference
1169 * in their PortID is the lowest bit.
1170 *
1171 * So if we were to ignore this lower bit, it certainly looks like two
1172 * PCI bus units of the same Tomatillo. I still have not really
1173 * figured this out...
1174 */
1175static void __init tomatillo_register_error_handlers(struct pci_controller_info *p)
1176{
1177 struct pci_pbm_info *pbm;
1178 unsigned int irq;
1179 struct ino_bucket *bucket;
1180 u64 tmp, err_mask, err_no_mask;
1181
1182 /* Build IRQs and register handlers. */
1183 pbm = pbm_for_ino(p, SCHIZO_UE_INO);
1184 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO);
1185 if (request_irq(irq, schizo_ue_intr,
1186 SA_SHIRQ, "TOMATILLO UE", p) < 0) {
1187 prom_printf("%s: Cannot register UE interrupt.\n",
1188 pbm->name);
1189 prom_halt();
1190 }
1191 bucket = __bucket(irq);
1192 tmp = upa_readl(bucket->imap);
1193 upa_writel(tmp, (pbm->pbm_regs +
1194 schizo_imap_offset(SCHIZO_UE_INO) + 4));
1195
1196 pbm = pbm_for_ino(p, SCHIZO_CE_INO);
1197 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO);
1198 if (request_irq(irq, schizo_ce_intr,
1199 SA_SHIRQ, "TOMATILLO CE", p) < 0) {
1200 prom_printf("%s: Cannot register CE interrupt.\n",
1201 pbm->name);
1202 prom_halt();
1203 }
1204 bucket = __bucket(irq);
1205 tmp = upa_readl(bucket->imap);
1206 upa_writel(tmp, (pbm->pbm_regs +
1207 schizo_imap_offset(SCHIZO_CE_INO) + 4));
1208
1209 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
1210 irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) |
1211 SCHIZO_PCIERR_A_INO));
1212 if (request_irq(irq, schizo_pcierr_intr,
1213 SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) {
1214 prom_printf("%s: Cannot register PBM A PciERR interrupt.\n",
1215 pbm->name);
1216 prom_halt();
1217 }
1218 bucket = __bucket(irq);
1219 tmp = upa_readl(bucket->imap);
1220 upa_writel(tmp, (pbm->pbm_regs +
1221 schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
1222
1223 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
1224 irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) |
1225 SCHIZO_PCIERR_B_INO));
1226 if (request_irq(irq, schizo_pcierr_intr,
1227 SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) {
1228 prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
1229 pbm->name);
1230 prom_halt();
1231 }
1232 bucket = __bucket(irq);
1233 tmp = upa_readl(bucket->imap);
1234 upa_writel(tmp, (pbm->pbm_regs +
1235 schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
1236
1237 pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
1238 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO);
1239 if (request_irq(irq, schizo_safarierr_intr,
1240 SA_SHIRQ, "TOMATILLO SERR", p) < 0) {
1241 prom_printf("%s: Cannot register SafariERR interrupt.\n",
1242 pbm->name);
1243 prom_halt();
1244 }
1245 bucket = __bucket(irq);
1246 tmp = upa_readl(bucket->imap);
1247 upa_writel(tmp, (pbm->pbm_regs +
1248 schizo_imap_offset(SCHIZO_SERR_INO) + 4));
1249
1250 /* Enable UE and CE interrupts for controller. */
1251 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
1252 (SCHIZO_ECCCTRL_EE |
1253 SCHIZO_ECCCTRL_UE |
1254 SCHIZO_ECCCTRL_CE));
1255
1256 schizo_write(p->pbm_B.controller_regs + SCHIZO_ECC_CTRL,
1257 (SCHIZO_ECCCTRL_EE |
1258 SCHIZO_ECCCTRL_UE |
1259 SCHIZO_ECCCTRL_CE));
1260
1261 /* Enable PCI Error interrupts and clear error
1262 * bits.
1263 */
1264 err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
1265 SCHIZO_PCICTRL_TTO_ERR |
1266 SCHIZO_PCICTRL_RTRY_ERR |
1267 SCHIZO_PCICTRL_SERR |
1268 SCHIZO_PCICTRL_EEN);
1269
1270 err_no_mask = SCHIZO_PCICTRL_DTO_ERR;
1271
1272 tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL);
1273 tmp |= err_mask;
1274 tmp &= ~err_no_mask;
1275 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1276
1277 tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL);
1278 tmp |= err_mask;
1279 tmp &= ~err_no_mask;
1280 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1281
1282 err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
1283 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
1284 SCHIZO_PCIAFSR_PTTO |
1285 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
1286 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
1287 SCHIZO_PCIAFSR_STTO);
1288
1289 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR, err_mask);
1290 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR, err_mask);
1291
1292 err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR |
1293 BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD |
1294 BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA |
1295 BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO |
1296 BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR |
1297 BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B |
1298 BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR |
1299 BUS_ERROR_APERR | BUS_ERROR_UNMAP |
1300 BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT);
1301
1302 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL,
1303 (SCHIZO_SAFERRCTRL_EN | err_mask));
1304 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRCTRL,
1305 (SCHIZO_SAFERRCTRL_EN | err_mask));
1306
1307 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL,
1308 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
1309 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_IRQCTRL,
1310 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
1311}
1312
1313static void __init schizo_register_error_handlers(struct pci_controller_info *p)
1314{
1315 struct pci_pbm_info *pbm;
1316 unsigned int irq;
1317 struct ino_bucket *bucket;
1318 u64 tmp, err_mask, err_no_mask;
1319
1320 /* Build IRQs and register handlers. */
1321 pbm = pbm_for_ino(p, SCHIZO_UE_INO);
1322 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO);
1323 if (request_irq(irq, schizo_ue_intr,
1324 SA_SHIRQ, "SCHIZO UE", p) < 0) {
1325 prom_printf("%s: Cannot register UE interrupt.\n",
1326 pbm->name);
1327 prom_halt();
1328 }
1329 bucket = __bucket(irq);
1330 tmp = upa_readl(bucket->imap);
1331 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));
1332
1333 pbm = pbm_for_ino(p, SCHIZO_CE_INO);
1334 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO);
1335 if (request_irq(irq, schizo_ce_intr,
1336 SA_SHIRQ, "SCHIZO CE", p) < 0) {
1337 prom_printf("%s: Cannot register CE interrupt.\n",
1338 pbm->name);
1339 prom_halt();
1340 }
1341 bucket = __bucket(irq);
1342 tmp = upa_readl(bucket->imap);
1343 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4));
1344
1345 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
1346 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO);
1347 if (request_irq(irq, schizo_pcierr_intr,
1348 SA_SHIRQ, "SCHIZO PCIERR", pbm) < 0) {
1349 prom_printf("%s: Cannot register PBM A PciERR interrupt.\n",
1350 pbm->name);
1351 prom_halt();
1352 }
1353 bucket = __bucket(irq);
1354 tmp = upa_readl(bucket->imap);
1355 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
1356
1357 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
1358 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO);
1359 if (request_irq(irq, schizo_pcierr_intr,
1360 SA_SHIRQ, "SCHIZO PCIERR", &p->pbm_B) < 0) {
1361 prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
1362 pbm->name);
1363 prom_halt();
1364 }
1365 bucket = __bucket(irq);
1366 tmp = upa_readl(bucket->imap);
1367 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
1368
1369 pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
1370 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO);
1371 if (request_irq(irq, schizo_safarierr_intr,
1372 SA_SHIRQ, "SCHIZO SERR", p) < 0) {
1373 prom_printf("%s: Cannot register SafariERR interrupt.\n",
1374 pbm->name);
1375 prom_halt();
1376 }
1377 bucket = __bucket(irq);
1378 tmp = upa_readl(bucket->imap);
1379 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4));
1380
1381 /* Enable UE and CE interrupts for controller. */
1382 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
1383 (SCHIZO_ECCCTRL_EE |
1384 SCHIZO_ECCCTRL_UE |
1385 SCHIZO_ECCCTRL_CE));
1386
1387 err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
1388 SCHIZO_PCICTRL_ESLCK |
1389 SCHIZO_PCICTRL_TTO_ERR |
1390 SCHIZO_PCICTRL_RTRY_ERR |
1391 SCHIZO_PCICTRL_SBH_ERR |
1392 SCHIZO_PCICTRL_SERR |
1393 SCHIZO_PCICTRL_EEN);
1394
1395 err_no_mask = (SCHIZO_PCICTRL_DTO_ERR |
1396 SCHIZO_PCICTRL_SBH_INT);
1397
1398 /* Enable PCI Error interrupts and clear error
1399 * bits for each PBM.
1400 */
1401 tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL);
1402 tmp |= err_mask;
1403 tmp &= ~err_no_mask;
1404 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1405
1406 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR,
1407 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
1408 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
1409 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
1410 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
1411 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
1412 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS));
1413
1414 tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL);
1415 tmp |= err_mask;
1416 tmp &= ~err_no_mask;
1417 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1418
1419 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR,
1420 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
1421 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
1422 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
1423 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
1424 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
1425 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS));
1426
1427 /* Make all Safari error conditions fatal except unmapped
1428 * errors which we make generate interrupts.
1429 */
1430 err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS |
1431 BUS_ERROR_BADMA | BUS_ERROR_BADMB |
1432 BUS_ERROR_BADMC |
1433 BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
1434 BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB |
1435 BUS_ERROR_CIQTO |
1436 BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO |
1437 BUS_ERROR_UFPQTO | BUS_ERROR_APERR |
1438 BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT |
1439 BUS_ERROR_ILL);
1440#if 1
1441 /* XXX Something wrong with some Excalibur systems
1442 * XXX Sun is shipping. The behavior on a 2-cpu
1443 * XXX machine is that both CPU1 parity error bits
1444 * XXX are set and are immediately set again when
1445 * XXX their error status bits are cleared. Just
1446 * XXX ignore them for now. -DaveM
1447 */
1448 err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
1449 BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB);
1450#endif
1451
1452 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL,
1453 (SCHIZO_SAFERRCTRL_EN | err_mask));
1454
1455 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL,
1456 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
1457}
1458
1459static void __init pbm_config_busmastering(struct pci_pbm_info *pbm)
1460{
1461 u8 *addr;
1462
1463 /* Set cache-line size to 64 bytes, this is actually
1464 * a nop but I do it for completeness.
1465 */
1466 addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1467 0, PCI_CACHE_LINE_SIZE);
1468 pci_config_write8(addr, 64 / sizeof(u32));
1469
1470 /* Set PBM latency timer to 64 PCI clocks. */
1471 addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1472 0, PCI_LATENCY_TIMER);
1473 pci_config_write8(addr, 64);
1474}
1475
1476static void __init pbm_scan_bus(struct pci_controller_info *p,
1477 struct pci_pbm_info *pbm)
1478{
1479 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
1480
1481 if (!cookie) {
1482 prom_printf("%s: Critical allocation failure.\n", pbm->name);
1483 prom_halt();
1484 }
1485
1486 /* All we care about is the PBM. */
1487 memset(cookie, 0, sizeof(*cookie));
1488 cookie->pbm = pbm;
1489
1490 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
1491 p->pci_ops,
1492 pbm);
1493 pci_fixup_host_bridge_self(pbm->pci_bus);
1494 pbm->pci_bus->self->sysdata = cookie;
1495
1496 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
1497 pci_record_assignments(pbm, pbm->pci_bus);
1498 pci_assign_unassigned(pbm, pbm->pci_bus);
1499 pci_fixup_irq(pbm, pbm->pci_bus);
1500 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
1501 pci_setup_busmastering(pbm, pbm->pci_bus);
1502}
1503
1504static void __init __schizo_scan_bus(struct pci_controller_info *p,
1505 int chip_type)
1506{
1507 if (!p->pbm_B.prom_node || !p->pbm_A.prom_node) {
1508 printk("PCI: Only one PCI bus module of controller found.\n");
1509 printk("PCI: Ignoring entire controller.\n");
1510 return;
1511 }
1512
1513 pbm_config_busmastering(&p->pbm_B);
1514 p->pbm_B.is_66mhz_capable =
1515 prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
1516 pbm_config_busmastering(&p->pbm_A);
1517 p->pbm_A.is_66mhz_capable =
1518 prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
1519 pbm_scan_bus(p, &p->pbm_B);
1520 pbm_scan_bus(p, &p->pbm_A);
1521
1522 /* After the PCI bus scan is complete, we can register
1523 * the error interrupt handlers.
1524 */
1525 if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
1526 tomatillo_register_error_handlers(p);
1527 else
1528 schizo_register_error_handlers(p);
1529}
1530
1531static void __init schizo_scan_bus(struct pci_controller_info *p)
1532{
1533 __schizo_scan_bus(p, PBM_CHIP_TYPE_SCHIZO);
1534}
1535
1536static void __init tomatillo_scan_bus(struct pci_controller_info *p)
1537{
1538 __schizo_scan_bus(p, PBM_CHIP_TYPE_TOMATILLO);
1539}
1540
1541static void __init schizo_base_address_update(struct pci_dev *pdev, int resource)
1542{
1543 struct pcidev_cookie *pcp = pdev->sysdata;
1544 struct pci_pbm_info *pbm = pcp->pbm;
1545 struct resource *res, *root;
1546 u32 reg;
1547 int where, size, is_64bit;
1548
1549 res = &pdev->resource[resource];
1550 if (resource < 6) {
1551 where = PCI_BASE_ADDRESS_0 + (resource * 4);
1552 } else if (resource == PCI_ROM_RESOURCE) {
1553 where = pdev->rom_base_reg;
1554 } else {
1555 /* Somebody might have asked allocation of a non-standard resource */
1556 return;
1557 }
1558
1559 is_64bit = 0;
1560 if (res->flags & IORESOURCE_IO)
1561 root = &pbm->io_space;
1562 else {
1563 root = &pbm->mem_space;
1564 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
1565 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1566 is_64bit = 1;
1567 }
1568
1569 size = res->end - res->start;
1570 pci_read_config_dword(pdev, where, &reg);
1571 reg = ((reg & size) |
1572 (((u32)(res->start - root->start)) & ~size));
1573 if (resource == PCI_ROM_RESOURCE) {
1574 reg |= PCI_ROM_ADDRESS_ENABLE;
1575 res->flags |= IORESOURCE_ROM_ENABLE;
1576 }
1577 pci_write_config_dword(pdev, where, reg);
1578
1579 /* This knows that the upper 32-bits of the address
1580 * must be zero. Our PCI common layer enforces this.
1581 */
1582 if (is_64bit)
1583 pci_write_config_dword(pdev, where + 4, 0);
1584}
1585
1586static void __init schizo_resource_adjust(struct pci_dev *pdev,
1587 struct resource *res,
1588 struct resource *root)
1589{
1590 res->start += root->start;
1591 res->end += root->start;
1592}
1593
1594/* Use ranges property to determine where PCI MEM, I/O, and Config
1595 * space are for this PCI bus module.
1596 */
1597static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm)
1598{
1599 int i, saw_cfg, saw_mem, saw_io;
1600
1601 saw_cfg = saw_mem = saw_io = 0;
1602 for (i = 0; i < pbm->num_pbm_ranges; i++) {
1603 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
1604 unsigned long a;
1605 int type;
1606
1607 type = (pr->child_phys_hi >> 24) & 0x3;
1608 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
1609 ((unsigned long)pr->parent_phys_lo << 0UL));
1610
1611 switch (type) {
1612 case 0:
1613 /* PCI config space, 16MB */
1614 pbm->config_space = a;
1615 saw_cfg = 1;
1616 break;
1617
1618 case 1:
1619 /* 16-bit IO space, 16MB */
1620 pbm->io_space.start = a;
1621 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
1622 pbm->io_space.flags = IORESOURCE_IO;
1623 saw_io = 1;
1624 break;
1625
1626 case 2:
1627 /* 32-bit MEM space, 2GB */
1628 pbm->mem_space.start = a;
1629 pbm->mem_space.end = a + (0x80000000UL - 1UL);
1630 pbm->mem_space.flags = IORESOURCE_MEM;
1631 saw_mem = 1;
1632 break;
1633
1634 default:
1635 break;
1636 };
1637 }
1638
1639 if (!saw_cfg || !saw_io || !saw_mem) {
1640 prom_printf("%s: Fatal error, missing %s PBM range.\n",
1641 pbm->name,
1642 ((!saw_cfg ?
1643 "CFG" :
1644 (!saw_io ?
1645 "IO" : "MEM"))));
1646 prom_halt();
1647 }
1648
1649 printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n",
1650 pbm->name,
1651 pbm->config_space,
1652 pbm->io_space.start,
1653 pbm->mem_space.start);
1654}
1655
1656static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
1657 struct pci_pbm_info *pbm)
1658{
1659 pbm->io_space.name = pbm->mem_space.name = pbm->name;
1660
1661 request_resource(&ioport_resource, &pbm->io_space);
1662 request_resource(&iomem_resource, &pbm->mem_space);
1663 pci_register_legacy_regions(&pbm->io_space,
1664 &pbm->mem_space);
1665}
1666
1667#define SCHIZO_STRBUF_CONTROL (0x02800UL)
1668#define SCHIZO_STRBUF_FLUSH (0x02808UL)
1669#define SCHIZO_STRBUF_FSYNC (0x02810UL)
1670#define SCHIZO_STRBUF_CTXFLUSH (0x02818UL)
1671#define SCHIZO_STRBUF_CTXMATCH (0x10000UL)
1672
1673static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
1674{
1675 unsigned long base = pbm->pbm_regs;
1676 u64 control;
1677
1678 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
1679 /* TOMATILLO lacks streaming cache. */
1680 return;
1681 }
1682
1683 /* SCHIZO has context flushing. */
1684 pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL;
1685 pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH;
1686 pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC;
1687 pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH;
1688 pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH;
1689
1690 pbm->stc.strbuf_flushflag = (volatile unsigned long *)
1691 ((((unsigned long)&pbm->stc.__flushflag_buf[0])
1692 + 63UL)
1693 & ~63UL);
1694 pbm->stc.strbuf_flushflag_pa = (unsigned long)
1695 __pa(pbm->stc.strbuf_flushflag);
1696
1697 /* Turn off LRU locking and diag mode, enable the
1698 * streaming buffer and leave the rerun-disable
1699 * setting however OBP set it.
1700 */
1701 control = schizo_read(pbm->stc.strbuf_control);
1702 control &= ~(SCHIZO_STRBUF_CTRL_LPTR |
1703 SCHIZO_STRBUF_CTRL_LENAB |
1704 SCHIZO_STRBUF_CTRL_DENAB);
1705 control |= SCHIZO_STRBUF_CTRL_ENAB;
1706 schizo_write(pbm->stc.strbuf_control, control);
1707
1708 pbm->stc.strbuf_enabled = 1;
1709}
1710
1711#define SCHIZO_IOMMU_CONTROL (0x00200UL)
1712#define SCHIZO_IOMMU_TSBBASE (0x00208UL)
1713#define SCHIZO_IOMMU_FLUSH (0x00210UL)
1714#define SCHIZO_IOMMU_CTXFLUSH (0x00218UL)
1715
1716static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1717{
1718 struct pci_iommu *iommu = pbm->iommu;
1719 unsigned long tsbbase, i, tagbase, database, order;
1720 u32 vdma[2], dma_mask;
1721 u64 control;
1722 int err, tsbsize;
1723
1724 err = prom_getproperty(pbm->prom_node, "virtual-dma",
1725 (char *)&vdma[0], sizeof(vdma));
1726 if (err == 0 || err == -1) {
1727 /* No property, use default values. */
1728 vdma[0] = 0xc0000000;
1729 vdma[1] = 0x40000000;
1730 }
1731
1732 dma_mask = vdma[0];
1733 switch (vdma[1]) {
1734 case 0x20000000:
1735 dma_mask |= 0x1fffffff;
1736 tsbsize = 64;
1737 break;
1738
1739 case 0x40000000:
1740 dma_mask |= 0x3fffffff;
1741 tsbsize = 128;
1742 break;
1743
1744 case 0x80000000:
1745 dma_mask |= 0x7fffffff;
1746 tsbsize = 128;
1747 break;
1748
1749 default:
1750 prom_printf("SCHIZO: strange virtual-dma size.\n");
1751 prom_halt();
1752 };
1753
1754 /* Setup initial software IOMMU state. */
1755 spin_lock_init(&iommu->lock);
1756 iommu->iommu_cur_ctx = 0;
1757
1758 /* Register addresses, SCHIZO has iommu ctx flushing. */
1759 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
1760 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
1761 iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH;
1762 iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH;
1763
1764 /* We use the main control/status register of SCHIZO as the write
1765 * completion register.
1766 */
1767 iommu->write_complete_reg = pbm->controller_regs + 0x10000UL;
1768
1769 /*
1770 * Invalidate TLB Entries.
1771 */
1772 control = schizo_read(iommu->iommu_control);
1773 control |= SCHIZO_IOMMU_CTRL_DENAB;
1774 schizo_write(iommu->iommu_control, control);
1775
1776 tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA;
1777
1778 for(i = 0; i < 16; i++) {
1779 schizo_write(pbm->pbm_regs + tagbase + (i * 8UL), 0);
1780 schizo_write(pbm->pbm_regs + database + (i * 8UL), 0);
1781 }
1782
1783 /* Leave diag mode enabled for full-flushing done
1784 * in pci_iommu.c
1785 */
1786
1787 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
1788 if (!iommu->dummy_page) {
1789 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1790 prom_halt();
1791 }
1792 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1793 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1794
1795 /* Using assumed page size 8K with 128K entries we need 1MB iommu page
1796 * table (128K ioptes * 8 bytes per iopte). This is
1797 * page order 7 on UltraSparc.
1798 */
1799 order = get_order(tsbsize * 8 * 1024);
1800 tsbbase = __get_free_pages(GFP_KERNEL, order);
1801 if (!tsbbase) {
1802 prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name);
1803 prom_halt();
1804 }
1805
1806 iommu->page_table = (iopte_t *)tsbbase;
1807 iommu->page_table_map_base = vdma[0];
1808 iommu->dma_addr_mask = dma_mask;
1809 pci_iommu_table_init(iommu, PAGE_SIZE << order);
1810
1811 switch (tsbsize) {
1812 case 64:
1813 iommu->page_table_sz_bits = 16;
1814 break;
1815
1816 case 128:
1817 iommu->page_table_sz_bits = 17;
1818 break;
1819
1820 default:
1821 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
1822 prom_halt();
1823 break;
1824 };
1825
1826 /* We start with no consistent mappings. */
1827 iommu->lowest_consistent_map =
1828 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1829
1830 for (i = 0; i < PBM_NCLUSTERS; i++) {
1831 iommu->alloc_info[i].flush = 0;
1832 iommu->alloc_info[i].next = 0;
1833 }
1834
1835 schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
1836
1837 control = schizo_read(iommu->iommu_control);
1838 control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
1839 switch (tsbsize) {
1840 case 64:
1841 control |= SCHIZO_IOMMU_TSBSZ_64K;
1842 break;
1843 case 128:
1844 control |= SCHIZO_IOMMU_TSBSZ_128K;
1845 break;
1846 };
1847
1848 control |= SCHIZO_IOMMU_CTRL_ENAB;
1849 schizo_write(iommu->iommu_control, control);
1850}
1851
1852#define SCHIZO_PCI_IRQ_RETRY (0x1a00UL)
1853#define SCHIZO_IRQ_RETRY_INF 0xffUL
1854
1855#define SCHIZO_PCI_DIAG (0x2020UL)
1856#define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */
1857#define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */
1858#define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */
1859#define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */
1860#define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */
1861#define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */
1862#define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */
1863#define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */
1864#define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */
1865
1866#define TOMATILLO_PCI_IOC_CSR (0x2248UL)
1867#define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL
1868#define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL
1869#define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL
1870#define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL
1871#define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL
1872#define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL
1873#define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL
1874#define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL
1875#define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL
1876#define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL
1877#define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL
1878#define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL
1879#define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL
1880#define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL
1881#define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL
1882
1883#define TOMATILLO_PCI_IOC_TDIAG (0x2250UL)
1884#define TOMATILLO_PCI_IOC_DDIAG (0x2290UL)
1885
1886static void __init schizo_pbm_hw_init(struct pci_pbm_info *pbm)
1887{
1888 u64 tmp;
1889
1890 /* Set IRQ retry to infinity. */
1891 schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY,
1892 SCHIZO_IRQ_RETRY_INF);
1893
1894 /* Enable arbiter for all PCI slots. Also, disable PCI interval
1895 * timer so that DTO (Discard TimeOuts) are not reported because
1896 * some Schizo revisions report them erroneously.
1897 */
1898 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL);
1899 if (pbm->chip_type == PBM_CHIP_TYPE_SCHIZO_PLUS &&
1900 pbm->chip_version == 0x5 &&
1901 pbm->chip_revision == 0x1)
1902 tmp |= 0x0f;
1903 else
1904 tmp |= 0xff;
1905
1906 tmp &= ~SCHIZO_PCICTRL_PTO;
1907 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
1908 pbm->chip_version >= 0x2)
1909 tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
1910 else
1911 tmp |= 0x1UL << SCHIZO_PCICTRL_PTO_SHIFT;
1912
1913 if (!prom_getbool(pbm->prom_node, "no-bus-parking"))
1914 tmp |= SCHIZO_PCICTRL_PARK;
1915
1916 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
1917 pbm->chip_version <= 0x1)
1918 tmp |= (1UL << 61);
1919 else
1920 tmp &= ~(1UL << 61);
1921
1922 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
1923 tmp |= (SCHIZO_PCICTRL_MRM_PREF |
1924 SCHIZO_PCICTRL_RDO_PREF |
1925 SCHIZO_PCICTRL_RDL_PREF);
1926
1927 schizo_write(pbm->pbm_regs + SCHIZO_PCI_CTRL, tmp);
1928
1929 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_DIAG);
1930 tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB |
1931 SCHIZO_PCIDIAG_D_RETRY |
1932 SCHIZO_PCIDIAG_D_INTSYNC);
1933 schizo_write(pbm->pbm_regs + SCHIZO_PCI_DIAG, tmp);
1934
1935 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
1936 /* Clear prefetch lengths to workaround a bug in
1937 * Jalapeno...
1938 */
1939 tmp = (TOMATILLO_IOC_PART_WPENAB |
1940 (1 << TOMATILLO_IOC_PREF_OFF_SHIFT) |
1941 TOMATILLO_IOC_RDMULT_CPENAB |
1942 TOMATILLO_IOC_RDONE_CPENAB |
1943 TOMATILLO_IOC_RDLINE_CPENAB);
1944
1945 schizo_write(pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR,
1946 tmp);
1947 }
1948}
1949
1950static void __init schizo_pbm_init(struct pci_controller_info *p,
1951 int prom_node, u32 portid,
1952 int chip_type)
1953{
1954 struct linux_prom64_registers pr_regs[4];
1955 unsigned int busrange[2];
1956 struct pci_pbm_info *pbm;
1957 const char *chipset_name;
1958 u32 ino_bitmap[2];
1959 int is_pbm_a;
1960 int err;
1961
1962 switch (chip_type) {
1963 case PBM_CHIP_TYPE_TOMATILLO:
1964 chipset_name = "TOMATILLO";
1965 break;
1966
1967 case PBM_CHIP_TYPE_SCHIZO_PLUS:
1968 chipset_name = "SCHIZO+";
1969 break;
1970
1971 case PBM_CHIP_TYPE_SCHIZO:
1972 default:
1973 chipset_name = "SCHIZO";
1974 break;
1975 };
1976
1977 /* For SCHIZO, three OBP regs:
1978 * 1) PBM controller regs
1979 * 2) Schizo front-end controller regs (same for both PBMs)
1980 * 3) PBM PCI config space
1981 *
1982 * For TOMATILLO, four OBP regs:
1983 * 1) PBM controller regs
1984 * 2) Tomatillo front-end controller regs
1985 * 3) PBM PCI config space
1986 * 4) Ichip regs
1987 */
1988 err = prom_getproperty(prom_node, "reg",
1989 (char *)&pr_regs[0],
1990 sizeof(pr_regs));
1991 if (err == 0 || err == -1) {
1992 prom_printf("%s: Fatal error, no reg property.\n",
1993 chipset_name);
1994 prom_halt();
1995 }
1996
1997 is_pbm_a = ((pr_regs[0].phys_addr & 0x00700000) == 0x00600000);
1998
1999 if (is_pbm_a)
2000 pbm = &p->pbm_A;
2001 else
2002 pbm = &p->pbm_B;
2003
2004 pbm->portid = portid;
2005 pbm->parent = p;
2006 pbm->prom_node = prom_node;
2007 pbm->pci_first_slot = 1;
2008
2009 pbm->chip_type = chip_type;
2010 pbm->chip_version =
2011 prom_getintdefault(prom_node, "version#", 0);
2012 pbm->chip_revision =
2013 prom_getintdefault(prom_node, "module-revision#", 0);
2014
2015 pbm->pbm_regs = pr_regs[0].phys_addr;
2016 pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL;
2017
2018 sprintf(pbm->name,
2019 (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
2020 "TOMATILLO%d PBM%c" :
2021 "SCHIZO%d PBM%c"),
2022 p->index,
2023 (pbm == &p->pbm_A ? 'A' : 'B'));
2024
2025 printk("%s: ver[%x:%x], portid %x, "
2026 "cregs[%lx] pregs[%lx]\n",
2027 pbm->name,
2028 pbm->chip_version, pbm->chip_revision,
2029 pbm->portid,
2030 pbm->controller_regs,
2031 pbm->pbm_regs);
2032
2033 schizo_pbm_hw_init(pbm);
2034
2035 prom_getstring(prom_node, "name",
2036 pbm->prom_name,
2037 sizeof(pbm->prom_name));
2038
2039 err = prom_getproperty(prom_node, "ranges",
2040 (char *) pbm->pbm_ranges,
2041 sizeof(pbm->pbm_ranges));
2042 if (err == 0 || err == -1) {
2043 prom_printf("%s: Fatal error, no ranges property.\n",
2044 pbm->name);
2045 prom_halt();
2046 }
2047
2048 pbm->num_pbm_ranges =
2049 (err / sizeof(struct linux_prom_pci_ranges));
2050
2051 schizo_determine_mem_io_space(pbm);
2052 pbm_register_toplevel_resources(p, pbm);
2053
2054 err = prom_getproperty(prom_node, "interrupt-map",
2055 (char *)pbm->pbm_intmap,
2056 sizeof(pbm->pbm_intmap));
2057 if (err != -1) {
2058 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
2059 err = prom_getproperty(prom_node, "interrupt-map-mask",
2060 (char *)&pbm->pbm_intmask,
2061 sizeof(pbm->pbm_intmask));
2062 if (err == -1) {
2063 prom_printf("%s: Fatal error, no "
2064 "interrupt-map-mask.\n", pbm->name);
2065 prom_halt();
2066 }
2067 } else {
2068 pbm->num_pbm_intmap = 0;
2069 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
2070 }
2071
2072 err = prom_getproperty(prom_node, "ino-bitmap",
2073 (char *) &ino_bitmap[0],
2074 sizeof(ino_bitmap));
2075 if (err == 0 || err == -1) {
2076 prom_printf("%s: Fatal error, no ino-bitmap.\n", pbm->name);
2077 prom_halt();
2078 }
2079 pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) |
2080 ((u64)ino_bitmap[0] << 0UL));
2081
2082 err = prom_getproperty(prom_node, "bus-range",
2083 (char *)&busrange[0],
2084 sizeof(busrange));
2085 if (err == 0 || err == -1) {
2086 prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
2087 prom_halt();
2088 }
2089 pbm->pci_first_busno = busrange[0];
2090 pbm->pci_last_busno = busrange[1];
2091
2092 schizo_pbm_iommu_init(pbm);
2093 schizo_pbm_strbuf_init(pbm);
2094}
2095
2096static inline int portid_compare(u32 x, u32 y, int chip_type)
2097{
2098 if (chip_type == PBM_CHIP_TYPE_TOMATILLO) {
2099 if (x == (y ^ 1))
2100 return 1;
2101 return 0;
2102 }
2103 return (x == y);
2104}
2105
2106static void __init __schizo_init(int node, char *model_name, int chip_type)
2107{
2108 struct pci_controller_info *p;
2109 struct pci_iommu *iommu;
2110 int is_pbm_a;
2111 u32 portid;
2112
2113 portid = prom_getintdefault(node, "portid", 0xff);
2114
2115 for(p = pci_controller_root; p; p = p->next) {
2116 struct pci_pbm_info *pbm;
2117
2118 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
2119 continue;
2120
2121 pbm = (p->pbm_A.prom_node ?
2122 &p->pbm_A :
2123 &p->pbm_B);
2124
2125 if (portid_compare(pbm->portid, portid, chip_type)) {
2126 is_pbm_a = (p->pbm_A.prom_node == 0);
2127 schizo_pbm_init(p, node, portid, chip_type);
2128 return;
2129 }
2130 }
2131
2132 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
2133 if (!p) {
2134 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2135 prom_halt();
2136 }
2137 memset(p, 0, sizeof(*p));
2138
2139 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
2140 if (!iommu) {
2141 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2142 prom_halt();
2143 }
2144 memset(iommu, 0, sizeof(*iommu));
2145 p->pbm_A.iommu = iommu;
2146
2147 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
2148 if (!iommu) {
2149 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2150 prom_halt();
2151 }
2152 memset(iommu, 0, sizeof(*iommu));
2153 p->pbm_B.iommu = iommu;
2154
2155 p->next = pci_controller_root;
2156 pci_controller_root = p;
2157
2158 p->index = pci_num_controllers++;
2159 p->pbms_same_domain = 0;
2160 p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
2161 tomatillo_scan_bus :
2162 schizo_scan_bus);
2163 p->irq_build = schizo_irq_build;
2164 p->base_address_update = schizo_base_address_update;
2165 p->resource_adjust = schizo_resource_adjust;
2166 p->pci_ops = &schizo_ops;
2167
2168 /* Like PSYCHO we have a 2GB aligned area for memory space. */
2169 pci_memspace_mask = 0x7fffffffUL;
2170
2171 schizo_pbm_init(p, node, portid, chip_type);
2172}
2173
2174void __init schizo_init(int node, char *model_name)
2175{
2176 __schizo_init(node, model_name, PBM_CHIP_TYPE_SCHIZO);
2177}
2178
2179void __init schizo_plus_init(int node, char *model_name)
2180{
2181 __schizo_init(node, model_name, PBM_CHIP_TYPE_SCHIZO_PLUS);
2182}
2183
2184void __init tomatillo_init(int node, char *model_name)
2185{
2186 __schizo_init(node, model_name, PBM_CHIP_TYPE_TOMATILLO);
2187}
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
new file mode 100644
index 000000000000..52f14e399b1c
--- /dev/null
+++ b/arch/sparc64/kernel/power.c
@@ -0,0 +1,150 @@
1/* $Id: power.c,v 1.10 2001/12/11 01:57:16 davem Exp $
2 * power.c: Power management driver.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/sched.h>
12#include <linux/signal.h>
13#include <linux/delay.h>
14#include <linux/interrupt.h>
15
16#include <asm/system.h>
17#include <asm/ebus.h>
18#include <asm/auxio.h>
19
20#define __KERNEL_SYSCALLS__
21#include <linux/unistd.h>
22
23/*
24 * sysctl - toggle power-off restriction for serial console
25 * systems in machine_power_off()
26 */
27int scons_pwroff = 1;
28
29#ifdef CONFIG_PCI
30static void __iomem *power_reg;
31
32static DECLARE_WAIT_QUEUE_HEAD(powerd_wait);
33static int button_pressed;
34
35static irqreturn_t power_handler(int irq, void *dev_id, struct pt_regs *regs)
36{
37 if (button_pressed == 0) {
38 button_pressed = 1;
39 wake_up(&powerd_wait);
40 }
41
42 /* FIXME: Check registers for status... */
43 return IRQ_HANDLED;
44}
45#endif /* CONFIG_PCI */
46
47extern void machine_halt(void);
48extern void machine_alt_power_off(void);
49static void (*poweroff_method)(void) = machine_alt_power_off;
50
51void machine_power_off(void)
52{
53 if (!serial_console || scons_pwroff) {
54#ifdef CONFIG_PCI
55 if (power_reg) {
56 /* Both register bits seem to have the
57 * same effect, so until I figure out
58 * what the difference is...
59 */
60 writel(AUXIO_PCIO_CPWR_OFF | AUXIO_PCIO_SPWR_OFF, power_reg);
61 } else
62#endif /* CONFIG_PCI */
63 if (poweroff_method != NULL) {
64 poweroff_method();
65 /* not reached */
66 }
67 }
68 machine_halt();
69}
70
71EXPORT_SYMBOL(machine_power_off);
72
73#ifdef CONFIG_PCI
74static int powerd(void *__unused)
75{
76 static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
77 char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
78 DECLARE_WAITQUEUE(wait, current);
79
80 daemonize("powerd");
81
82 add_wait_queue(&powerd_wait, &wait);
83again:
84 for (;;) {
85 set_task_state(current, TASK_INTERRUPTIBLE);
86 if (button_pressed)
87 break;
88 flush_signals(current);
89 schedule();
90 }
91 __set_current_state(TASK_RUNNING);
92 remove_wait_queue(&powerd_wait, &wait);
93
94 /* Ok, down we go... */
95 button_pressed = 0;
96 if (execve("/sbin/shutdown", argv, envp) < 0) {
97 printk("powerd: shutdown execution failed\n");
98 add_wait_queue(&powerd_wait, &wait);
99 goto again;
100 }
101 return 0;
102}
103
104static int __init has_button_interrupt(struct linux_ebus_device *edev)
105{
106 if (edev->irqs[0] == PCI_IRQ_NONE)
107 return 0;
108 if (!prom_node_has_property(edev->prom_node, "button"))
109 return 0;
110
111 return 1;
112}
113
114void __init power_init(void)
115{
116 struct linux_ebus *ebus;
117 struct linux_ebus_device *edev;
118 static int invoked;
119
120 if (invoked)
121 return;
122 invoked = 1;
123
124 for_each_ebus(ebus) {
125 for_each_ebusdev(edev, ebus) {
126 if (!strcmp(edev->prom_name, "power"))
127 goto found;
128 }
129 }
130 return;
131
132found:
133 power_reg = ioremap(edev->resource[0].start, 0x4);
134 printk("power: Control reg at %p ... ", power_reg);
135 poweroff_method = machine_halt; /* able to use the standard halt */
136 if (has_button_interrupt(edev)) {
137 if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
138 printk("Failed to start power daemon.\n");
139 return;
140 }
141 printk("powerd running.\n");
142
143 if (request_irq(edev->irqs[0],
144 power_handler, SA_SHIRQ, "power", NULL) < 0)
145 printk("power: Error, cannot register IRQ handler.\n");
146 } else {
147 printk("not using powerd.\n");
148 }
149}
150#endif /* CONFIG_PCI */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
new file mode 100644
index 000000000000..26d3ec41da1c
--- /dev/null
+++ b/arch/sparc64/kernel/process.c
@@ -0,0 +1,869 @@
1/* $Id: process.c,v 1.131 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/process.c
3 *
4 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9/*
10 * This file handles the architecture-dependent parts of process handling..
11 */
12
13#include <stdarg.h>
14
15#include <linux/config.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/kallsyms.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/stddef.h>
25#include <linux/ptrace.h>
26#include <linux/slab.h>
27#include <linux/user.h>
28#include <linux/a.out.h>
29#include <linux/config.h>
30#include <linux/reboot.h>
31#include <linux/delay.h>
32#include <linux/compat.h>
33#include <linux/init.h>
34
35#include <asm/oplib.h>
36#include <asm/uaccess.h>
37#include <asm/system.h>
38#include <asm/page.h>
39#include <asm/pgalloc.h>
40#include <asm/pgtable.h>
41#include <asm/processor.h>
42#include <asm/pstate.h>
43#include <asm/elf.h>
44#include <asm/fpumacro.h>
45#include <asm/head.h>
46#include <asm/cpudata.h>
47#include <asm/unistd.h>
48
49/* #define VERBOSE_SHOWREGS */
50
51/*
52 * Nothing special yet...
53 */
54void default_idle(void)
55{
56}
57
58#ifndef CONFIG_SMP
59
60/*
61 * the idle loop on a Sparc... ;)
62 */
63void cpu_idle(void)
64{
65 if (current->pid != 0)
66 return;
67
68 /* endless idle loop with no priority at all */
69 for (;;) {
70 /* If current->work.need_resched is zero we should really
71 * setup for a system wakup event and execute a shutdown
72 * instruction.
73 *
74 * But this requires writing back the contents of the
75 * L2 cache etc. so implement this later. -DaveM
76 */
77 while (!need_resched())
78 barrier();
79
80 schedule();
81 check_pgt_cache();
82 }
83 return;
84}
85
86#else
87
88/*
89 * the idle loop on a UltraMultiPenguin...
90 */
91#define idle_me_harder() (cpu_data(smp_processor_id()).idle_volume += 1)
92#define unidle_me() (cpu_data(smp_processor_id()).idle_volume = 0)
93void cpu_idle(void)
94{
95 set_thread_flag(TIF_POLLING_NRFLAG);
96 while(1) {
97 if (need_resched()) {
98 unidle_me();
99 clear_thread_flag(TIF_POLLING_NRFLAG);
100 schedule();
101 set_thread_flag(TIF_POLLING_NRFLAG);
102 check_pgt_cache();
103 }
104 idle_me_harder();
105
106 /* The store ordering is so that IRQ handlers on
107 * other cpus see our increasing idleness for the buddy
108 * redistribution algorithm. -DaveM
109 */
110 membar("#StoreStore | #StoreLoad");
111 }
112}
113
114#endif
115
116extern char reboot_command [];
117
118extern void (*prom_palette)(int);
119extern void (*prom_keyboard)(void);
120
121void machine_halt(void)
122{
123 if (!serial_console && prom_palette)
124 prom_palette (1);
125 if (prom_keyboard)
126 prom_keyboard();
127 prom_halt();
128 panic("Halt failed!");
129}
130
131EXPORT_SYMBOL(machine_halt);
132
133void machine_alt_power_off(void)
134{
135 if (!serial_console && prom_palette)
136 prom_palette(1);
137 if (prom_keyboard)
138 prom_keyboard();
139 prom_halt_power_off();
140 panic("Power-off failed!");
141}
142
143void machine_restart(char * cmd)
144{
145 char *p;
146
147 p = strchr (reboot_command, '\n');
148 if (p) *p = 0;
149 if (!serial_console && prom_palette)
150 prom_palette (1);
151 if (prom_keyboard)
152 prom_keyboard();
153 if (cmd)
154 prom_reboot(cmd);
155 if (*reboot_command)
156 prom_reboot(reboot_command);
157 prom_reboot("");
158 panic("Reboot failed!");
159}
160
161EXPORT_SYMBOL(machine_restart);
162
163static void show_regwindow32(struct pt_regs *regs)
164{
165 struct reg_window32 __user *rw;
166 struct reg_window32 r_w;
167 mm_segment_t old_fs;
168
169 __asm__ __volatile__ ("flushw");
170 rw = compat_ptr((unsigned)regs->u_regs[14]);
171 old_fs = get_fs();
172 set_fs (USER_DS);
173 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
174 set_fs (old_fs);
175 return;
176 }
177
178 set_fs (old_fs);
179 printk("l0: %08x l1: %08x l2: %08x l3: %08x "
180 "l4: %08x l5: %08x l6: %08x l7: %08x\n",
181 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
182 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
183 printk("i0: %08x i1: %08x i2: %08x i3: %08x "
184 "i4: %08x i5: %08x i6: %08x i7: %08x\n",
185 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
186 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
187}
188
189static void show_regwindow(struct pt_regs *regs)
190{
191 struct reg_window __user *rw;
192 struct reg_window *rwk;
193 struct reg_window r_w;
194 mm_segment_t old_fs;
195
196 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
197 __asm__ __volatile__ ("flushw");
198 rw = (struct reg_window __user *)
199 (regs->u_regs[14] + STACK_BIAS);
200 rwk = (struct reg_window *)
201 (regs->u_regs[14] + STACK_BIAS);
202 if (!(regs->tstate & TSTATE_PRIV)) {
203 old_fs = get_fs();
204 set_fs (USER_DS);
205 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
206 set_fs (old_fs);
207 return;
208 }
209 rwk = &r_w;
210 set_fs (old_fs);
211 }
212 } else {
213 show_regwindow32(regs);
214 return;
215 }
216 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
217 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
218 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
219 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
220 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
221 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
222 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
223 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
224 if (regs->tstate & TSTATE_PRIV)
225 print_symbol("I7: <%s>\n", rwk->ins[7]);
226}
227
228void show_stackframe(struct sparc_stackf *sf)
229{
230 unsigned long size;
231 unsigned long *stk;
232 int i;
233
234 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n"
235 "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
236 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
237 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
238 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n"
239 "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n",
240 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
241 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
242 printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n"
243 "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n",
244 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
245 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
246 sf->xxargs[0]);
247 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
248 size -= STACKFRAME_SZ;
249 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
250 i = 0;
251 do {
252 printk("s%d: %016lx\n", i++, *stk++);
253 } while ((size -= sizeof(unsigned long)));
254}
255
256void show_stackframe32(struct sparc_stackf32 *sf)
257{
258 unsigned long size;
259 unsigned *stk;
260 int i;
261
262 printk("l0: %08x l1: %08x l2: %08x l3: %08x\n",
263 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]);
264 printk("l4: %08x l5: %08x l6: %08x l7: %08x\n",
265 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
266 printk("i0: %08x i1: %08x i2: %08x i3: %08x\n",
267 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]);
268 printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n",
269 sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc);
270 printk("sp: %08x x0: %08x x1: %08x x2: %08x\n"
271 "x3: %08x x4: %08x x5: %08x xx: %08x\n",
272 sf->structptr, sf->xargs[0], sf->xargs[1],
273 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
274 sf->xxargs[0]);
275 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
276 size -= STACKFRAME32_SZ;
277 stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ);
278 i = 0;
279 do {
280 printk("s%d: %08x\n", i++, *stk++);
281 } while ((size -= sizeof(unsigned)));
282}
283
284#ifdef CONFIG_SMP
285static DEFINE_SPINLOCK(regdump_lock);
286#endif
287
288void __show_regs(struct pt_regs * regs)
289{
290#ifdef CONFIG_SMP
291 unsigned long flags;
292
293 /* Protect against xcall ipis which might lead to livelock on the lock */
294 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
295 "wrpr %0, %1, %%pstate"
296 : "=r" (flags)
297 : "i" (PSTATE_IE));
298 spin_lock(&regdump_lock);
299#endif
300 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
301 regs->tpc, regs->tnpc, regs->y, print_tainted());
302 print_symbol("TPC: <%s>\n", regs->tpc);
303 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
304 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
305 regs->u_regs[3]);
306 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
307 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
308 regs->u_regs[7]);
309 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
310 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
311 regs->u_regs[11]);
312 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
313 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
314 regs->u_regs[15]);
315 print_symbol("RPC: <%s>\n", regs->u_regs[15]);
316 show_regwindow(regs);
317#ifdef CONFIG_SMP
318 spin_unlock(&regdump_lock);
319 __asm__ __volatile__("wrpr %0, 0, %%pstate"
320 : : "r" (flags));
321#endif
322}
323
324#ifdef VERBOSE_SHOWREGS
325static void idump_from_user (unsigned int *pc)
326{
327 int i;
328 int code;
329
330 if((((unsigned long) pc) & 3))
331 return;
332
333 pc -= 3;
334 for(i = -3; i < 6; i++) {
335 get_user(code, pc);
336 printk("%c%08x%c",i?' ':'<',code,i?' ':'>');
337 pc++;
338 }
339 printk("\n");
340}
341#endif
342
343void show_regs(struct pt_regs *regs)
344{
345#ifdef VERBOSE_SHOWREGS
346 extern long etrap, etraptl1;
347#endif
348 __show_regs(regs);
349#ifdef CONFIG_SMP
350 {
351 extern void smp_report_regs(void);
352
353 smp_report_regs();
354 }
355#endif
356
357#ifdef VERBOSE_SHOWREGS
358 if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
359 regs->u_regs[14] >= (long)current - PAGE_SIZE &&
360 regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) {
361 printk ("*********parent**********\n");
362 __show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF));
363 idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc);
364 printk ("*********endpar**********\n");
365 }
366#endif
367}
368
369void show_regs32(struct pt_regs32 *regs)
370{
371 printk("PSR: %08x PC: %08x NPC: %08x Y: %08x %s\n", regs->psr,
372 regs->pc, regs->npc, regs->y, print_tainted());
373 printk("g0: %08x g1: %08x g2: %08x g3: %08x ",
374 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
375 regs->u_regs[3]);
376 printk("g4: %08x g5: %08x g6: %08x g7: %08x\n",
377 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
378 regs->u_regs[7]);
379 printk("o0: %08x o1: %08x o2: %08x o3: %08x ",
380 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
381 regs->u_regs[11]);
382 printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
383 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
384 regs->u_regs[15]);
385}
386
387unsigned long thread_saved_pc(struct task_struct *tsk)
388{
389 struct thread_info *ti = tsk->thread_info;
390 unsigned long ret = 0xdeadbeefUL;
391
392 if (ti && ti->ksp) {
393 unsigned long *sp;
394 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
395 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
396 sp[14]) {
397 unsigned long *fp;
398 fp = (unsigned long *)(sp[14] + STACK_BIAS);
399 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
400 ret = fp[15];
401 }
402 }
403 return ret;
404}
405
406/* Free current thread data structures etc.. */
407void exit_thread(void)
408{
409 struct thread_info *t = current_thread_info();
410
411 if (t->utraps) {
412 if (t->utraps[0] < 2)
413 kfree (t->utraps);
414 else
415 t->utraps[0]--;
416 }
417
418 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
419 t->user_cntd0 = t->user_cntd1 = NULL;
420 t->pcr_reg = 0;
421 write_pcr(0);
422 }
423}
424
425void flush_thread(void)
426{
427 struct thread_info *t = current_thread_info();
428
429 if (t->flags & _TIF_ABI_PENDING)
430 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
431
432 if (t->task->mm) {
433 unsigned long pgd_cache = 0UL;
434 if (test_thread_flag(TIF_32BIT)) {
435 struct mm_struct *mm = t->task->mm;
436 pgd_t *pgd0 = &mm->pgd[0];
437 pud_t *pud0 = pud_offset(pgd0, 0);
438
439 if (pud_none(*pud0)) {
440 pmd_t *page = pmd_alloc_one(mm, 0);
441 pud_set(pud0, page);
442 }
443 pgd_cache = get_pgd_cache(pgd0);
444 }
445 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
446 "membar #Sync"
447 : /* no outputs */
448 : "r" (pgd_cache),
449 "r" (TSB_REG),
450 "i" (ASI_DMMU));
451 }
452 set_thread_wsaved(0);
453
454 /* Turn off performance counters if on. */
455 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
456 t->user_cntd0 = t->user_cntd1 = NULL;
457 t->pcr_reg = 0;
458 write_pcr(0);
459 }
460
461 /* Clear FPU register state. */
462 t->fpsaved[0] = 0;
463
464 if (get_thread_current_ds() != ASI_AIUS)
465 set_fs(USER_DS);
466
467 /* Init new signal delivery disposition. */
468 clear_thread_flag(TIF_NEWSIGNALS);
469}
470
471/* It's a bit more tricky when 64-bit tasks are involved... */
472static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
473{
474 unsigned long fp, distance, rval;
475
476 if (!(test_thread_flag(TIF_32BIT))) {
477 csp += STACK_BIAS;
478 psp += STACK_BIAS;
479 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
480 fp += STACK_BIAS;
481 } else
482 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
483
484 /* Now 8-byte align the stack as this is mandatory in the
485 * Sparc ABI due to how register windows work. This hides
486 * the restriction from thread libraries etc. -DaveM
487 */
488 csp &= ~7UL;
489
490 distance = fp - psp;
491 rval = (csp - distance);
492 if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
493 rval = 0;
494 else if (test_thread_flag(TIF_32BIT)) {
495 if (put_user(((u32)csp),
496 &(((struct reg_window32 __user *)rval)->ins[6])))
497 rval = 0;
498 } else {
499 if (put_user(((u64)csp - STACK_BIAS),
500 &(((struct reg_window __user *)rval)->ins[6])))
501 rval = 0;
502 else
503 rval = rval - STACK_BIAS;
504 }
505
506 return rval;
507}
508
509/* Standard stuff. */
510static inline void shift_window_buffer(int first_win, int last_win,
511 struct thread_info *t)
512{
513 int i;
514
515 for (i = first_win; i < last_win; i++) {
516 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
517 memcpy(&t->reg_window[i], &t->reg_window[i+1],
518 sizeof(struct reg_window));
519 }
520}
521
522void synchronize_user_stack(void)
523{
524 struct thread_info *t = current_thread_info();
525 unsigned long window;
526
527 flush_user_windows();
528 if ((window = get_thread_wsaved()) != 0) {
529 int winsize = sizeof(struct reg_window);
530 int bias = 0;
531
532 if (test_thread_flag(TIF_32BIT))
533 winsize = sizeof(struct reg_window32);
534 else
535 bias = STACK_BIAS;
536
537 window -= 1;
538 do {
539 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
540 struct reg_window *rwin = &t->reg_window[window];
541
542 if (!copy_to_user((char __user *)sp, rwin, winsize)) {
543 shift_window_buffer(window, get_thread_wsaved() - 1, t);
544 set_thread_wsaved(get_thread_wsaved() - 1);
545 }
546 } while (window--);
547 }
548}
549
550void fault_in_user_windows(void)
551{
552 struct thread_info *t = current_thread_info();
553 unsigned long window;
554 int winsize = sizeof(struct reg_window);
555 int bias = 0;
556
557 if (test_thread_flag(TIF_32BIT))
558 winsize = sizeof(struct reg_window32);
559 else
560 bias = STACK_BIAS;
561
562 flush_user_windows();
563 window = get_thread_wsaved();
564
565 if (window != 0) {
566 window -= 1;
567 do {
568 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
569 struct reg_window *rwin = &t->reg_window[window];
570
571 if (copy_to_user((char __user *)sp, rwin, winsize))
572 goto barf;
573 } while (window--);
574 }
575 set_thread_wsaved(0);
576 return;
577
578barf:
579 set_thread_wsaved(window + 1);
580 do_exit(SIGILL);
581}
582
583asmlinkage long sparc_do_fork(unsigned long clone_flags,
584 unsigned long stack_start,
585 struct pt_regs *regs,
586 unsigned long stack_size)
587{
588 int __user *parent_tid_ptr, *child_tid_ptr;
589
590#ifdef CONFIG_COMPAT
591 if (test_thread_flag(TIF_32BIT)) {
592 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
593 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
594 } else
595#endif
596 {
597 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
598 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
599 }
600
601 return do_fork(clone_flags, stack_start,
602 regs, stack_size,
603 parent_tid_ptr, child_tid_ptr);
604}
605
606/* Copy a Sparc thread. The fork() return value conventions
607 * under SunOS are nothing short of bletcherous:
608 * Parent --> %o0 == childs pid, %o1 == 0
609 * Child --> %o0 == parents pid, %o1 == 1
610 */
611int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
612 unsigned long unused,
613 struct task_struct *p, struct pt_regs *regs)
614{
615 struct thread_info *t = p->thread_info;
616 char *child_trap_frame;
617
618#ifdef CONFIG_DEBUG_SPINLOCK
619 p->thread.smp_lock_count = 0;
620 p->thread.smp_lock_pc = 0;
621#endif
622
623 /* Calculate offset to stack_frame & pt_regs */
624 child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
625 memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
626
627 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
628 _TIF_NEWCHILD |
629 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
630 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
631 t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
632 t->fpsaved[0] = 0;
633
634 if (regs->tstate & TSTATE_PRIV) {
635 /* Special case, if we are spawning a kernel thread from
636 * a userspace task (via KMOD, NFS, or similar) we must
637 * disable performance counters in the child because the
638 * address space and protection realm are changing.
639 */
640 if (t->flags & _TIF_PERFCTR) {
641 t->user_cntd0 = t->user_cntd1 = NULL;
642 t->pcr_reg = 0;
643 t->flags &= ~_TIF_PERFCTR;
644 }
645 t->kregs->u_regs[UREG_FP] = t->ksp;
646 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
647 flush_register_windows();
648 memcpy((void *)(t->ksp + STACK_BIAS),
649 (void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
650 sizeof(struct sparc_stackf));
651 t->kregs->u_regs[UREG_G6] = (unsigned long) t;
652 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
653 } else {
654 if (t->flags & _TIF_32BIT) {
655 sp &= 0x00000000ffffffffUL;
656 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
657 }
658 t->kregs->u_regs[UREG_FP] = sp;
659 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
660 if (sp != regs->u_regs[UREG_FP]) {
661 unsigned long csp;
662
663 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
664 if (!csp)
665 return -EFAULT;
666 t->kregs->u_regs[UREG_FP] = csp;
667 }
668 if (t->utraps)
669 t->utraps[0]++;
670 }
671
672 /* Set the return value for the child. */
673 t->kregs->u_regs[UREG_I0] = current->pid;
674 t->kregs->u_regs[UREG_I1] = 1;
675
676 /* Set the second return value for the parent. */
677 regs->u_regs[UREG_I1] = 0;
678
679 if (clone_flags & CLONE_SETTLS)
680 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
681
682 return 0;
683}
684
685/*
686 * This is the mechanism for creating a new kernel thread.
687 *
688 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
689 * who haven't done an "execve()") should use this: it will work within
690 * a system call from a "real" process, but the process memory space will
691 * not be free'd until both the parent and the child have exited.
692 */
693pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
694{
695 long retval;
696
697 /* If the parent runs before fn(arg) is called by the child,
698 * the input registers of this function can be clobbered.
699 * So we stash 'fn' and 'arg' into global registers which
700 * will not be modified by the parent.
701 */
702 __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */
703 "mov %5, %%g3\n\t" /* Save ARG into global */
704 "mov %1, %%g1\n\t" /* Clone syscall nr. */
705 "mov %2, %%o0\n\t" /* Clone flags. */
706 "mov 0, %%o1\n\t" /* usp arg == 0 */
707 "t 0x6d\n\t" /* Linux/Sparc clone(). */
708 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
709 " mov %%o0, %0\n\t"
710 "jmpl %%g2, %%o7\n\t" /* Call the function. */
711 " mov %%g3, %%o0\n\t" /* Set arg in delay. */
712 "mov %3, %%g1\n\t"
713 "t 0x6d\n\t" /* Linux/Sparc exit(). */
714 /* Notreached by child. */
715 "1:" :
716 "=r" (retval) :
717 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
718 "i" (__NR_exit), "r" (fn), "r" (arg) :
719 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
720 return retval;
721}
722
723/*
724 * fill in the user structure for a core dump..
725 */
726void dump_thread(struct pt_regs * regs, struct user * dump)
727{
728 /* Only should be used for SunOS and ancient a.out
729 * SparcLinux binaries... Not worth implementing.
730 */
731 memset(dump, 0, sizeof(struct user));
732}
733
734typedef struct {
735 union {
736 unsigned int pr_regs[32];
737 unsigned long pr_dregs[16];
738 } pr_fr;
739 unsigned int __unused;
740 unsigned int pr_fsr;
741 unsigned char pr_qcnt;
742 unsigned char pr_q_entrysize;
743 unsigned char pr_en;
744 unsigned int pr_q[64];
745} elf_fpregset_t32;
746
747/*
748 * fill in the fpu structure for a core dump.
749 */
750int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
751{
752 unsigned long *kfpregs = current_thread_info()->fpregs;
753 unsigned long fprs = current_thread_info()->fpsaved[0];
754
755 if (test_thread_flag(TIF_32BIT)) {
756 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
757
758 if (fprs & FPRS_DL)
759 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
760 sizeof(unsigned int) * 32);
761 else
762 memset(&fpregs32->pr_fr.pr_regs[0], 0,
763 sizeof(unsigned int) * 32);
764 fpregs32->pr_qcnt = 0;
765 fpregs32->pr_q_entrysize = 8;
766 memset(&fpregs32->pr_q[0], 0,
767 (sizeof(unsigned int) * 64));
768 if (fprs & FPRS_FEF) {
769 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
770 fpregs32->pr_en = 1;
771 } else {
772 fpregs32->pr_fsr = 0;
773 fpregs32->pr_en = 0;
774 }
775 } else {
776 if(fprs & FPRS_DL)
777 memcpy(&fpregs->pr_regs[0], kfpregs,
778 sizeof(unsigned int) * 32);
779 else
780 memset(&fpregs->pr_regs[0], 0,
781 sizeof(unsigned int) * 32);
782 if(fprs & FPRS_DU)
783 memcpy(&fpregs->pr_regs[16], kfpregs+16,
784 sizeof(unsigned int) * 32);
785 else
786 memset(&fpregs->pr_regs[16], 0,
787 sizeof(unsigned int) * 32);
788 if(fprs & FPRS_FEF) {
789 fpregs->pr_fsr = current_thread_info()->xfsr[0];
790 fpregs->pr_gsr = current_thread_info()->gsr[0];
791 } else {
792 fpregs->pr_fsr = fpregs->pr_gsr = 0;
793 }
794 fpregs->pr_fprs = fprs;
795 }
796 return 1;
797}
798
799/*
800 * sparc_execve() executes a new program after the asm stub has set
801 * things up for us. This should basically do what I want it to.
802 */
803asmlinkage int sparc_execve(struct pt_regs *regs)
804{
805 int error, base = 0;
806 char *filename;
807
808 /* User register window flush is done by entry.S */
809
810 /* Check for indirect call. */
811 if (regs->u_regs[UREG_G1] == 0)
812 base = 1;
813
814 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
815 error = PTR_ERR(filename);
816 if (IS_ERR(filename))
817 goto out;
818 error = do_execve(filename,
819 (char __user * __user *)
820 regs->u_regs[base + UREG_I1],
821 (char __user * __user *)
822 regs->u_regs[base + UREG_I2], regs);
823 putname(filename);
824 if (!error) {
825 fprs_write(0);
826 current_thread_info()->xfsr[0] = 0;
827 current_thread_info()->fpsaved[0] = 0;
828 regs->tstate &= ~TSTATE_PEF;
829 task_lock(current);
830 current->ptrace &= ~PT_DTRACE;
831 task_unlock(current);
832 }
833out:
834 return error;
835}
836
837unsigned long get_wchan(struct task_struct *task)
838{
839 unsigned long pc, fp, bias = 0;
840 unsigned long thread_info_base;
841 struct reg_window *rw;
842 unsigned long ret = 0;
843 int count = 0;
844
845 if (!task || task == current ||
846 task->state == TASK_RUNNING)
847 goto out;
848
849 thread_info_base = (unsigned long) task->thread_info;
850 bias = STACK_BIAS;
851 fp = task->thread_info->ksp + bias;
852
853 do {
854 /* Bogus frame pointer? */
855 if (fp < (thread_info_base + sizeof(struct thread_info)) ||
856 fp >= (thread_info_base + THREAD_SIZE))
857 break;
858 rw = (struct reg_window *) fp;
859 pc = rw->ins[7];
860 if (!in_sched_functions(pc)) {
861 ret = pc;
862 goto out;
863 }
864 fp = rw->ins[6] + bias;
865 } while (++count < 16);
866
867out:
868 return ret;
869}
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
new file mode 100644
index 000000000000..1722dc51b0d8
--- /dev/null
+++ b/arch/sparc64/kernel/ptrace.c
@@ -0,0 +1,646 @@
1/* ptrace.c: Sparc process tracing support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 *
6 * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
7 * and David Mosberger.
8 *
9 * Added Linux support -miguel (weird, eh?, the original code was meant
10 * to emulate SunOS).
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/errno.h>
17#include <linux/ptrace.h>
18#include <linux/user.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/security.h>
22
23#include <asm/asi.h>
24#include <asm/pgtable.h>
25#include <asm/system.h>
26#include <asm/uaccess.h>
27#include <asm/psrcompat.h>
28#include <asm/visasm.h>
29#include <asm/spitfire.h>
30
31/* Returning from ptrace is a bit tricky because the syscall return
32 * low level code assumes any value returned which is negative and
33 * is a valid errno will mean setting the condition codes to indicate
34 * an error return. This doesn't work, so we have this hook.
35 */
36static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
37{
38 regs->u_regs[UREG_I0] = error;
39 regs->tstate |= (TSTATE_ICARRY | TSTATE_XCARRY);
40 regs->tpc = regs->tnpc;
41 regs->tnpc += 4;
42}
43
44static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
45{
46 regs->u_regs[UREG_I0] = value;
47 regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
48 regs->tpc = regs->tnpc;
49 regs->tnpc += 4;
50}
51
52static inline void
53pt_succ_return_linux(struct pt_regs *regs, unsigned long value, void __user *addr)
54{
55 if (test_thread_flag(TIF_32BIT)) {
56 if (put_user(value, (unsigned int __user *) addr)) {
57 pt_error_return(regs, EFAULT);
58 return;
59 }
60 } else {
61 if (put_user(value, (long __user *) addr)) {
62 pt_error_return(regs, EFAULT);
63 return;
64 }
65 }
66 regs->u_regs[UREG_I0] = 0;
67 regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
68 regs->tpc = regs->tnpc;
69 regs->tnpc += 4;
70}
71
72static void
73pt_os_succ_return (struct pt_regs *regs, unsigned long val, void __user *addr)
74{
75 if (current->personality == PER_SUNOS)
76 pt_succ_return (regs, val);
77 else
78 pt_succ_return_linux (regs, val, addr);
79}
80
81/* #define ALLOW_INIT_TRACING */
82/* #define DEBUG_PTRACE */
83
84#ifdef DEBUG_PTRACE
85char *pt_rq [] = {
86 /* 0 */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
87 /* 4 */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
88 /* 8 */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
89 /* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
90 /* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
91 /* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
92 /* 24 */ "SYSCALL", ""
93};
94#endif
95
96/*
97 * Called by kernel/ptrace.c when detaching..
98 *
99 * Make sure single step bits etc are not set.
100 */
101void ptrace_disable(struct task_struct *child)
102{
103 /* nothing to do */
104}
105
106asmlinkage void do_ptrace(struct pt_regs *regs)
107{
108 int request = regs->u_regs[UREG_I0];
109 pid_t pid = regs->u_regs[UREG_I1];
110 unsigned long addr = regs->u_regs[UREG_I2];
111 unsigned long data = regs->u_regs[UREG_I3];
112 unsigned long addr2 = regs->u_regs[UREG_I4];
113 struct task_struct *child;
114 int ret;
115
116 if (test_thread_flag(TIF_32BIT)) {
117 addr &= 0xffffffffUL;
118 data &= 0xffffffffUL;
119 addr2 &= 0xffffffffUL;
120 }
121 lock_kernel();
122#ifdef DEBUG_PTRACE
123 {
124 char *s;
125
126 if ((request >= 0) && (request <= 24))
127 s = pt_rq [request];
128 else
129 s = "unknown";
130
131 if (request == PTRACE_POKEDATA && data == 0x91d02001){
132 printk ("do_ptrace: breakpoint pid=%d, addr=%016lx addr2=%016lx\n",
133 pid, addr, addr2);
134 } else
135 printk("do_ptrace: rq=%s(%d) pid=%d addr=%016lx data=%016lx addr2=%016lx\n",
136 s, request, pid, addr, data, addr2);
137 }
138#endif
139 if (request == PTRACE_TRACEME) {
140 int ret;
141
142 /* are we already being traced? */
143 if (current->ptrace & PT_PTRACED) {
144 pt_error_return(regs, EPERM);
145 goto out;
146 }
147 ret = security_ptrace(current->parent, current);
148 if (ret) {
149 pt_error_return(regs, -ret);
150 goto out;
151 }
152
153 /* set the ptrace bit in the process flags. */
154 current->ptrace |= PT_PTRACED;
155 pt_succ_return(regs, 0);
156 goto out;
157 }
158#ifndef ALLOW_INIT_TRACING
159 if (pid == 1) {
160 /* Can't dork with init. */
161 pt_error_return(regs, EPERM);
162 goto out;
163 }
164#endif
165 read_lock(&tasklist_lock);
166 child = find_task_by_pid(pid);
167 if (child)
168 get_task_struct(child);
169 read_unlock(&tasklist_lock);
170
171 if (!child) {
172 pt_error_return(regs, ESRCH);
173 goto out;
174 }
175
176 if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
177 || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
178 if (ptrace_attach(child)) {
179 pt_error_return(regs, EPERM);
180 goto out_tsk;
181 }
182 pt_succ_return(regs, 0);
183 goto out_tsk;
184 }
185
186 ret = ptrace_check_attach(child, request == PTRACE_KILL);
187 if (ret < 0) {
188 pt_error_return(regs, -ret);
189 goto out_tsk;
190 }
191
192 if (!(test_thread_flag(TIF_32BIT)) &&
193 ((request == PTRACE_READDATA64) ||
194 (request == PTRACE_WRITEDATA64) ||
195 (request == PTRACE_READTEXT64) ||
196 (request == PTRACE_WRITETEXT64) ||
197 (request == PTRACE_PEEKTEXT64) ||
198 (request == PTRACE_POKETEXT64) ||
199 (request == PTRACE_PEEKDATA64) ||
200 (request == PTRACE_POKEDATA64))) {
201 addr = regs->u_regs[UREG_G2];
202 addr2 = regs->u_regs[UREG_G3];
203 request -= 30; /* wheee... */
204 }
205
206 switch(request) {
207 case PTRACE_PEEKTEXT: /* read word at location addr. */
208 case PTRACE_PEEKDATA: {
209 unsigned long tmp64;
210 unsigned int tmp32;
211 int res, copied;
212
213 res = -EIO;
214 if (test_thread_flag(TIF_32BIT)) {
215 copied = access_process_vm(child, addr,
216 &tmp32, sizeof(tmp32), 0);
217 tmp64 = (unsigned long) tmp32;
218 if (copied == sizeof(tmp32))
219 res = 0;
220 } else {
221 copied = access_process_vm(child, addr,
222 &tmp64, sizeof(tmp64), 0);
223 if (copied == sizeof(tmp64))
224 res = 0;
225 }
226 if (res < 0)
227 pt_error_return(regs, -res);
228 else
229 pt_os_succ_return(regs, tmp64, (void __user *) data);
230 goto flush_and_out;
231 }
232
233 case PTRACE_POKETEXT: /* write the word at location addr. */
234 case PTRACE_POKEDATA: {
235 unsigned long tmp64;
236 unsigned int tmp32;
237 int copied, res = -EIO;
238
239 if (test_thread_flag(TIF_32BIT)) {
240 tmp32 = data;
241 copied = access_process_vm(child, addr,
242 &tmp32, sizeof(tmp32), 1);
243 if (copied == sizeof(tmp32))
244 res = 0;
245 } else {
246 tmp64 = data;
247 copied = access_process_vm(child, addr,
248 &tmp64, sizeof(tmp64), 1);
249 if (copied == sizeof(tmp64))
250 res = 0;
251 }
252 if (res < 0)
253 pt_error_return(regs, -res);
254 else
255 pt_succ_return(regs, res);
256 goto flush_and_out;
257 }
258
259 case PTRACE_GETREGS: {
260 struct pt_regs32 __user *pregs =
261 (struct pt_regs32 __user *) addr;
262 struct pt_regs *cregs = child->thread_info->kregs;
263 int rval;
264
265 if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
266 __put_user(cregs->tpc, (&pregs->pc)) ||
267 __put_user(cregs->tnpc, (&pregs->npc)) ||
268 __put_user(cregs->y, (&pregs->y))) {
269 pt_error_return(regs, EFAULT);
270 goto out_tsk;
271 }
272 for (rval = 1; rval < 16; rval++)
273 if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
274 pt_error_return(regs, EFAULT);
275 goto out_tsk;
276 }
277 pt_succ_return(regs, 0);
278#ifdef DEBUG_PTRACE
279 printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
280#endif
281 goto out_tsk;
282 }
283
284 case PTRACE_GETREGS64: {
285 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
286 struct pt_regs *cregs = child->thread_info->kregs;
287 unsigned long tpc = cregs->tpc;
288 int rval;
289
290 if ((child->thread_info->flags & _TIF_32BIT) != 0)
291 tpc &= 0xffffffff;
292 if (__put_user(cregs->tstate, (&pregs->tstate)) ||
293 __put_user(tpc, (&pregs->tpc)) ||
294 __put_user(cregs->tnpc, (&pregs->tnpc)) ||
295 __put_user(cregs->y, (&pregs->y))) {
296 pt_error_return(regs, EFAULT);
297 goto out_tsk;
298 }
299 for (rval = 1; rval < 16; rval++)
300 if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
301 pt_error_return(regs, EFAULT);
302 goto out_tsk;
303 }
304 pt_succ_return(regs, 0);
305#ifdef DEBUG_PTRACE
306 printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
307#endif
308 goto out_tsk;
309 }
310
311 case PTRACE_SETREGS: {
312 struct pt_regs32 __user *pregs =
313 (struct pt_regs32 __user *) addr;
314 struct pt_regs *cregs = child->thread_info->kregs;
315 unsigned int psr, pc, npc, y;
316 int i;
317
318 /* Must be careful, tracing process can only set certain
319 * bits in the psr.
320 */
321 if (__get_user(psr, (&pregs->psr)) ||
322 __get_user(pc, (&pregs->pc)) ||
323 __get_user(npc, (&pregs->npc)) ||
324 __get_user(y, (&pregs->y))) {
325 pt_error_return(regs, EFAULT);
326 goto out_tsk;
327 }
328 cregs->tstate &= ~(TSTATE_ICC);
329 cregs->tstate |= psr_to_tstate_icc(psr);
330 if (!((pc | npc) & 3)) {
331 cregs->tpc = pc;
332 cregs->tnpc = npc;
333 }
334 cregs->y = y;
335 for (i = 1; i < 16; i++) {
336 if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
337 pt_error_return(regs, EFAULT);
338 goto out_tsk;
339 }
340 }
341 pt_succ_return(regs, 0);
342 goto out_tsk;
343 }
344
345 case PTRACE_SETREGS64: {
346 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
347 struct pt_regs *cregs = child->thread_info->kregs;
348 unsigned long tstate, tpc, tnpc, y;
349 int i;
350
351 /* Must be careful, tracing process can only set certain
352 * bits in the psr.
353 */
354 if (__get_user(tstate, (&pregs->tstate)) ||
355 __get_user(tpc, (&pregs->tpc)) ||
356 __get_user(tnpc, (&pregs->tnpc)) ||
357 __get_user(y, (&pregs->y))) {
358 pt_error_return(regs, EFAULT);
359 goto out_tsk;
360 }
361 if ((child->thread_info->flags & _TIF_32BIT) != 0) {
362 tpc &= 0xffffffff;
363 tnpc &= 0xffffffff;
364 }
365 tstate &= (TSTATE_ICC | TSTATE_XCC);
366 cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
367 cregs->tstate |= tstate;
368 if (!((tpc | tnpc) & 3)) {
369 cregs->tpc = tpc;
370 cregs->tnpc = tnpc;
371 }
372 cregs->y = y;
373 for (i = 1; i < 16; i++) {
374 if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
375 pt_error_return(regs, EFAULT);
376 goto out_tsk;
377 }
378 }
379 pt_succ_return(regs, 0);
380 goto out_tsk;
381 }
382
383 case PTRACE_GETFPREGS: {
384 struct fps {
385 unsigned int regs[32];
386 unsigned int fsr;
387 unsigned int flags;
388 unsigned int extra;
389 unsigned int fpqd;
390 struct fq {
391 unsigned int insnaddr;
392 unsigned int insn;
393 } fpq[16];
394 };
395 struct fps __user *fps = (struct fps __user *) addr;
396 unsigned long *fpregs = child->thread_info->fpregs;
397
398 if (copy_to_user(&fps->regs[0], fpregs,
399 (32 * sizeof(unsigned int))) ||
400 __put_user(child->thread_info->xfsr[0], (&fps->fsr)) ||
401 __put_user(0, (&fps->fpqd)) ||
402 __put_user(0, (&fps->flags)) ||
403 __put_user(0, (&fps->extra)) ||
404 clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
405 pt_error_return(regs, EFAULT);
406 goto out_tsk;
407 }
408 pt_succ_return(regs, 0);
409 goto out_tsk;
410 }
411
412 case PTRACE_GETFPREGS64: {
413 struct fps {
414 unsigned int regs[64];
415 unsigned long fsr;
416 };
417 struct fps __user *fps = (struct fps __user *) addr;
418 unsigned long *fpregs = child->thread_info->fpregs;
419
420 if (copy_to_user(&fps->regs[0], fpregs,
421 (64 * sizeof(unsigned int))) ||
422 __put_user(child->thread_info->xfsr[0], (&fps->fsr))) {
423 pt_error_return(regs, EFAULT);
424 goto out_tsk;
425 }
426 pt_succ_return(regs, 0);
427 goto out_tsk;
428 }
429
430 case PTRACE_SETFPREGS: {
431 struct fps {
432 unsigned int regs[32];
433 unsigned int fsr;
434 unsigned int flags;
435 unsigned int extra;
436 unsigned int fpqd;
437 struct fq {
438 unsigned int insnaddr;
439 unsigned int insn;
440 } fpq[16];
441 };
442 struct fps __user *fps = (struct fps __user *) addr;
443 unsigned long *fpregs = child->thread_info->fpregs;
444 unsigned fsr;
445
446 if (copy_from_user(fpregs, &fps->regs[0],
447 (32 * sizeof(unsigned int))) ||
448 __get_user(fsr, (&fps->fsr))) {
449 pt_error_return(regs, EFAULT);
450 goto out_tsk;
451 }
452 child->thread_info->xfsr[0] &= 0xffffffff00000000UL;
453 child->thread_info->xfsr[0] |= fsr;
454 if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
455 child->thread_info->gsr[0] = 0;
456 child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
457 pt_succ_return(regs, 0);
458 goto out_tsk;
459 }
460
461 case PTRACE_SETFPREGS64: {
462 struct fps {
463 unsigned int regs[64];
464 unsigned long fsr;
465 };
466 struct fps __user *fps = (struct fps __user *) addr;
467 unsigned long *fpregs = child->thread_info->fpregs;
468
469 if (copy_from_user(fpregs, &fps->regs[0],
470 (64 * sizeof(unsigned int))) ||
471 __get_user(child->thread_info->xfsr[0], (&fps->fsr))) {
472 pt_error_return(regs, EFAULT);
473 goto out_tsk;
474 }
475 if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
476 child->thread_info->gsr[0] = 0;
477 child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
478 pt_succ_return(regs, 0);
479 goto out_tsk;
480 }
481
482 case PTRACE_READTEXT:
483 case PTRACE_READDATA: {
484 int res = ptrace_readdata(child, addr,
485 (char __user *)addr2, data);
486 if (res == data) {
487 pt_succ_return(regs, 0);
488 goto flush_and_out;
489 }
490 if (res >= 0)
491 res = -EIO;
492 pt_error_return(regs, -res);
493 goto flush_and_out;
494 }
495
496 case PTRACE_WRITETEXT:
497 case PTRACE_WRITEDATA: {
498 int res = ptrace_writedata(child, (char __user *) addr2,
499 addr, data);
500 if (res == data) {
501 pt_succ_return(regs, 0);
502 goto flush_and_out;
503 }
504 if (res >= 0)
505 res = -EIO;
506 pt_error_return(regs, -res);
507 goto flush_and_out;
508 }
509 case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
510 addr = 1;
511
512 case PTRACE_CONT: { /* restart after signal. */
513 if (data > _NSIG) {
514 pt_error_return(regs, EIO);
515 goto out_tsk;
516 }
517 if (addr != 1) {
518 unsigned long pc_mask = ~0UL;
519
520 if ((child->thread_info->flags & _TIF_32BIT) != 0)
521 pc_mask = 0xffffffff;
522
523 if (addr & 3) {
524 pt_error_return(regs, EINVAL);
525 goto out_tsk;
526 }
527#ifdef DEBUG_PTRACE
528 printk ("Original: %016lx %016lx\n",
529 child->thread_info->kregs->tpc,
530 child->thread_info->kregs->tnpc);
531 printk ("Continuing with %016lx %016lx\n", addr, addr+4);
532#endif
533 child->thread_info->kregs->tpc = (addr & pc_mask);
534 child->thread_info->kregs->tnpc = ((addr + 4) & pc_mask);
535 }
536
537 if (request == PTRACE_SYSCALL) {
538 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
539 } else {
540 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
541 }
542
543 child->exit_code = data;
544#ifdef DEBUG_PTRACE
545 printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
546 child->pid, child->exit_code,
547 child->thread_info->kregs->tpc,
548 child->thread_info->kregs->tnpc);
549
550#endif
551 wake_up_process(child);
552 pt_succ_return(regs, 0);
553 goto out_tsk;
554 }
555
556/*
557 * make the child exit. Best I can do is send it a sigkill.
558 * perhaps it should be put in the status that it wants to
559 * exit.
560 */
561 case PTRACE_KILL: {
562 if (child->exit_state == EXIT_ZOMBIE) { /* already dead */
563 pt_succ_return(regs, 0);
564 goto out_tsk;
565 }
566 child->exit_code = SIGKILL;
567 wake_up_process(child);
568 pt_succ_return(regs, 0);
569 goto out_tsk;
570 }
571
572 case PTRACE_SUNDETACH: { /* detach a process that was attached. */
573 int error = ptrace_detach(child, data);
574 if (error) {
575 pt_error_return(regs, EIO);
576 goto out_tsk;
577 }
578 pt_succ_return(regs, 0);
579 goto out_tsk;
580 }
581
582 /* PTRACE_DUMPCORE unsupported... */
583
584 default: {
585 int err = ptrace_request(child, request, addr, data);
586 if (err)
587 pt_error_return(regs, -err);
588 else
589 pt_succ_return(regs, 0);
590 goto out_tsk;
591 }
592 }
593flush_and_out:
594 {
595 unsigned long va;
596
597 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
598 for (va = 0; va < (1 << 16); va += (1 << 5))
599 spitfire_put_dcache_tag(va, 0x0);
600 /* No need to mess with I-cache on Cheetah. */
601 } else {
602 for (va = 0; va < L1DCACHE_SIZE; va += 32)
603 spitfire_put_dcache_tag(va, 0x0);
604 if (request == PTRACE_PEEKTEXT ||
605 request == PTRACE_POKETEXT ||
606 request == PTRACE_READTEXT ||
607 request == PTRACE_WRITETEXT) {
608 for (va = 0; va < (PAGE_SIZE << 1); va += 32)
609 spitfire_put_icache_tag(va, 0x0);
610 __asm__ __volatile__("flush %g6");
611 }
612 }
613 }
614out_tsk:
615 if (child)
616 put_task_struct(child);
617out:
618 unlock_kernel();
619}
620
621asmlinkage void syscall_trace(void)
622{
623#ifdef DEBUG_PTRACE
624 printk("%s [%d]: syscall_trace\n", current->comm, current->pid);
625#endif
626 if (!test_thread_flag(TIF_SYSCALL_TRACE))
627 return;
628 if (!(current->ptrace & PT_PTRACED))
629 return;
630 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
631 ? 0x80 : 0));
632
633 /*
634 * this isn't the same as continuing with a signal, but it will do
635 * for normal use. strace only continues with a signal if the
636 * stopping signal is not SIGTRAP. -brl
637 */
638#ifdef DEBUG_PTRACE
639 printk("%s [%d]: syscall_trace exit= %x\n", current->comm,
640 current->pid, current->exit_code);
641#endif
642 if (current->exit_code) {
643 send_sig (current->exit_code, current, 1);
644 current->exit_code = 0;
645 }
646}
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
new file mode 100644
index 000000000000..0696ed4b9d64
--- /dev/null
+++ b/arch/sparc64/kernel/rtrap.S
@@ -0,0 +1,362 @@
1/* $Id: rtrap.S,v 1.61 2002/02/09 19:49:31 davem Exp $
2 * rtrap.S: Preparing for return from trap on Sparc V9.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/config.h>
9
10#include <asm/asi.h>
11#include <asm/pstate.h>
12#include <asm/ptrace.h>
13#include <asm/spitfire.h>
14#include <asm/head.h>
15#include <asm/visasm.h>
16#include <asm/processor.h>
17
18#define RTRAP_PSTATE (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
19#define RTRAP_PSTATE_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
20#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
21
22 /* Register %l6 keeps track of whether we are returning
23 * from a system call or not. It is cleared if we call
24 * do_notify_resume, and it must not be otherwise modified
25 * until we fully commit to returning to userspace.
26 */
27
28 .text
29 .align 32
30__handle_softirq:
31 call do_softirq
32 nop
33 ba,a,pt %xcc, __handle_softirq_continue
34 nop
35__handle_preemption:
36 call schedule
37 wrpr %g0, RTRAP_PSTATE, %pstate
38 ba,pt %xcc, __handle_preemption_continue
39 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
40
41__handle_user_windows:
42 call fault_in_user_windows
43 wrpr %g0, RTRAP_PSTATE, %pstate
44 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
45 /* Redo sched+sig checks */
46 ldx [%g6 + TI_FLAGS], %l0
47 andcc %l0, _TIF_NEED_RESCHED, %g0
48
49 be,pt %xcc, 1f
50 nop
51 call schedule
52 wrpr %g0, RTRAP_PSTATE, %pstate
53 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
54 ldx [%g6 + TI_FLAGS], %l0
55
561: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
57 be,pt %xcc, __handle_user_windows_continue
58 nop
59 clr %o0
60 mov %l5, %o2
61 mov %l6, %o3
62 add %sp, PTREGS_OFF, %o1
63 mov %l0, %o4
64
65 call do_notify_resume
66 wrpr %g0, RTRAP_PSTATE, %pstate
67 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
68 clr %l6
69 /* Signal delivery can modify pt_regs tstate, so we must
70 * reload it.
71 */
72 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
73 sethi %hi(0xf << 20), %l4
74 and %l1, %l4, %l4
75 ba,pt %xcc, __handle_user_windows_continue
76
77 andn %l1, %l4, %l1
78__handle_perfctrs:
79 call update_perfctrs
80 wrpr %g0, RTRAP_PSTATE, %pstate
81 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
82 ldub [%g6 + TI_WSAVED], %o2
83 brz,pt %o2, 1f
84 nop
85 /* Redo userwin+sched+sig checks */
86 call fault_in_user_windows
87
88 wrpr %g0, RTRAP_PSTATE, %pstate
89 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
90 ldx [%g6 + TI_FLAGS], %l0
91 andcc %l0, _TIF_NEED_RESCHED, %g0
92 be,pt %xcc, 1f
93
94 nop
95 call schedule
96 wrpr %g0, RTRAP_PSTATE, %pstate
97 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
98 ldx [%g6 + TI_FLAGS], %l0
991: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
100
101 be,pt %xcc, __handle_perfctrs_continue
102 sethi %hi(TSTATE_PEF), %o0
103 clr %o0
104 mov %l5, %o2
105 mov %l6, %o3
106 add %sp, PTREGS_OFF, %o1
107 mov %l0, %o4
108 call do_notify_resume
109
110 wrpr %g0, RTRAP_PSTATE, %pstate
111 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
112 clr %l6
113 /* Signal delivery can modify pt_regs tstate, so we must
114 * reload it.
115 */
116 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
117 sethi %hi(0xf << 20), %l4
118 and %l1, %l4, %l4
119 andn %l1, %l4, %l1
120 ba,pt %xcc, __handle_perfctrs_continue
121
122 sethi %hi(TSTATE_PEF), %o0
123__handle_userfpu:
124 rd %fprs, %l5
125 andcc %l5, FPRS_FEF, %g0
126 sethi %hi(TSTATE_PEF), %o0
127 be,a,pn %icc, __handle_userfpu_continue
128 andn %l1, %o0, %l1
129 ba,a,pt %xcc, __handle_userfpu_continue
130
131__handle_signal:
132 clr %o0
133 mov %l5, %o2
134 mov %l6, %o3
135 add %sp, PTREGS_OFF, %o1
136 mov %l0, %o4
137 call do_notify_resume
138 wrpr %g0, RTRAP_PSTATE, %pstate
139 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
140 clr %l6
141
142 /* Signal delivery can modify pt_regs tstate, so we must
143 * reload it.
144 */
145 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
146 sethi %hi(0xf << 20), %l4
147 and %l1, %l4, %l4
148 ba,pt %xcc, __handle_signal_continue
149 andn %l1, %l4, %l1
150
151 .align 64
152 .globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
153rtrap_irq:
154rtrap_clr_l6: clr %l6
155rtrap:
156 ldub [%g6 + TI_CPU], %l0
157 sethi %hi(irq_stat), %l2 ! &softirq_active
158 or %l2, %lo(irq_stat), %l2 ! &softirq_active
159irqsz_patchme: sllx %l0, 0, %l0
160 lduw [%l2 + %l0], %l1 ! softirq_pending
161 cmp %l1, 0
162
163 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
164 bne,pn %icc, __handle_softirq
165 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
166__handle_softirq_continue:
167rtrap_xcall:
168 sethi %hi(0xf << 20), %l4
169 andcc %l1, TSTATE_PRIV, %l3
170 and %l1, %l4, %l4
171 bne,pn %icc, to_kernel
172 andn %l1, %l4, %l1
173
174 /* We must hold IRQs off and atomically test schedule+signal
175 * state, then hold them off all the way back to userspace.
176 * If we are returning to kernel, none of this matters.
177 *
178 * If we do not do this, there is a window where we would do
179 * the tests, later the signal/resched event arrives but we do
180 * not process it since we are still in kernel mode. It would
181 * take until the next local IRQ before the signal/resched
182 * event would be handled.
183 *
184 * This also means that if we have to deal with performance
185 * counters or user windows, we have to redo all of these
186 * sched+signal checks with IRQs disabled.
187 */
188to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
189 wrpr 0, %pil
190__handle_preemption_continue:
191 ldx [%g6 + TI_FLAGS], %l0
192 sethi %hi(_TIF_USER_WORK_MASK), %o0
193 or %o0, %lo(_TIF_USER_WORK_MASK), %o0
194 andcc %l0, %o0, %g0
195 sethi %hi(TSTATE_PEF), %o0
196 be,pt %xcc, user_nowork
197 andcc %l1, %o0, %g0
198 andcc %l0, _TIF_NEED_RESCHED, %g0
199 bne,pn %xcc, __handle_preemption
200 andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
201 bne,pn %xcc, __handle_signal
202__handle_signal_continue:
203 ldub [%g6 + TI_WSAVED], %o2
204 brnz,pn %o2, __handle_user_windows
205 nop
206__handle_user_windows_continue:
207 ldx [%g6 + TI_FLAGS], %l5
208 andcc %l5, _TIF_PERFCTR, %g0
209 sethi %hi(TSTATE_PEF), %o0
210 bne,pn %xcc, __handle_perfctrs
211__handle_perfctrs_continue:
212 andcc %l1, %o0, %g0
213
214 /* This fpdepth clear is necessary for non-syscall rtraps only */
215user_nowork:
216 bne,pn %xcc, __handle_userfpu
217 stb %g0, [%g6 + TI_FPDEPTH]
218__handle_userfpu_continue:
219
220rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
221 ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
222
223 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
224 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
225 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
226 mov TSB_REG, %g6
227 brnz,a,pn %l3, 1f
228 ldxa [%g6] ASI_IMMU, %g5
2291: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
230 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
231 wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
232 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
233 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
234
235 ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
236 ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
237 ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
238 ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
239 ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
240 ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
241 ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
242 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
243
244 ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
245 wr %o3, %g0, %y
246 srl %l4, 20, %l4
247 wrpr %l4, 0x0, %pil
248 wrpr %g0, 0x1, %tl
249 wrpr %l1, %g0, %tstate
250 wrpr %l2, %g0, %tpc
251 wrpr %o2, %g0, %tnpc
252
253 brnz,pn %l3, kern_rtt
254 mov PRIMARY_CONTEXT, %l7
255 ldxa [%l7 + %l7] ASI_DMMU, %l0
256cplus_rtrap_insn_1:
257 sethi %hi(0), %l1
258 sllx %l1, 32, %l1
259 or %l0, %l1, %l0
260 stxa %l0, [%l7] ASI_DMMU
261 flush %g6
262 rdpr %wstate, %l1
263 rdpr %otherwin, %l2
264 srl %l1, 3, %l1
265
266 wrpr %l2, %g0, %canrestore
267 wrpr %l1, %g0, %wstate
268 wrpr %g0, %g0, %otherwin
269 restore
270 rdpr %canrestore, %g1
271 wrpr %g1, 0x0, %cleanwin
272 retry
273 nop
274
275kern_rtt: restore
276 retry
277to_kernel:
278#ifdef CONFIG_PREEMPT
279 ldsw [%g6 + TI_PRE_COUNT], %l5
280 brnz %l5, kern_fpucheck
281 ldx [%g6 + TI_FLAGS], %l5
282 andcc %l5, _TIF_NEED_RESCHED, %g0
283 be,pt %xcc, kern_fpucheck
284 srl %l4, 20, %l5
285 cmp %l5, 0
286 bne,pn %xcc, kern_fpucheck
287 sethi %hi(PREEMPT_ACTIVE), %l6
288 stw %l6, [%g6 + TI_PRE_COUNT]
289 call schedule
290 nop
291 ba,pt %xcc, rtrap
292 stw %g0, [%g6 + TI_PRE_COUNT]
293#endif
294kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
295 brz,pt %l5, rt_continue
296 srl %l5, 1, %o0
297 add %g6, TI_FPSAVED, %l6
298 ldub [%l6 + %o0], %l2
299 sub %l5, 2, %l5
300
301 add %g6, TI_GSR, %o1
302 andcc %l2, (FPRS_FEF|FPRS_DU), %g0
303 be,pt %icc, 2f
304 and %l2, FPRS_DL, %l6
305 andcc %l2, FPRS_FEF, %g0
306 be,pn %icc, 5f
307 sll %o0, 3, %o5
308 rd %fprs, %g1
309
310 wr %g1, FPRS_FEF, %fprs
311 ldx [%o1 + %o5], %g1
312 add %g6, TI_XFSR, %o1
313 membar #StoreLoad | #LoadLoad
314 sll %o0, 8, %o2
315 add %g6, TI_FPREGS, %o3
316 brz,pn %l6, 1f
317 add %g6, TI_FPREGS+0x40, %o4
318
319 ldda [%o3 + %o2] ASI_BLK_P, %f0
320 ldda [%o4 + %o2] ASI_BLK_P, %f16
3211: andcc %l2, FPRS_DU, %g0
322 be,pn %icc, 1f
323 wr %g1, 0, %gsr
324 add %o2, 0x80, %o2
325 ldda [%o3 + %o2] ASI_BLK_P, %f32
326 ldda [%o4 + %o2] ASI_BLK_P, %f48
327
3281: membar #Sync
329 ldx [%o1 + %o5], %fsr
3302: stb %l5, [%g6 + TI_FPDEPTH]
331 ba,pt %xcc, rt_continue
332 nop
3335: wr %g0, FPRS_FEF, %fprs
334 membar #StoreLoad | #LoadLoad
335 sll %o0, 8, %o2
336
337 add %g6, TI_FPREGS+0x80, %o3
338 add %g6, TI_FPREGS+0xc0, %o4
339 ldda [%o3 + %o2] ASI_BLK_P, %f32
340 ldda [%o4 + %o2] ASI_BLK_P, %f48
341 membar #Sync
342 wr %g0, FPRS_DU, %fprs
343 ba,pt %xcc, rt_continue
344 stb %l5, [%g6 + TI_FPDEPTH]
345
346cplus_rinsn_1:
347 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
348
349 .globl cheetah_plus_patch_rtrap
350cheetah_plus_patch_rtrap:
351 /* We configure the dTLB512_0 for 4MB pages and the
352 * dTLB512_1 for 8K pages when in context zero.
353 */
354 sethi %hi(cplus_rinsn_1), %o0
355 sethi %hi(cplus_rtrap_insn_1), %o2
356 lduw [%o0 + %lo(cplus_rinsn_1)], %o1
357 or %o2, %lo(cplus_rtrap_insn_1), %o2
358 stw %o1, [%o2]
359 flush %o2
360
361 retl
362 nop
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
new file mode 100644
index 000000000000..14d9c3a21b9a
--- /dev/null
+++ b/arch/sparc64/kernel/sbus.c
@@ -0,0 +1,1243 @@
1/* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
2 * sbus.c: UltraSparc SBUS controller support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/mm.h>
10#include <linux/spinlock.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14
15#include <asm/page.h>
16#include <asm/sbus.h>
17#include <asm/io.h>
18#include <asm/upa.h>
19#include <asm/cache.h>
20#include <asm/dma.h>
21#include <asm/irq.h>
22#include <asm/starfire.h>
23
24#include "iommu_common.h"
25
26/* These should be allocated on an SMP_CACHE_BYTES
27 * aligned boundary for optimal performance.
28 *
29 * On SYSIO, using an 8K page size we have 1GB of SBUS
30 * DMA space mapped. We divide this space into equally
31 * sized clusters. We allocate a DMA mapping from the
32 * cluster that matches the order of the allocation, or
33 * if the order is greater than the number of clusters,
34 * we try to allocate from the last cluster.
35 */
36
37#define NCLUSTERS 8UL
38#define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
39#define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
40#define CLUSTER_MASK (CLUSTER_SIZE - 1)
41#define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT)
42#define MAP_BASE ((u32)0xc0000000)
43
44struct sbus_iommu {
45/*0x00*/spinlock_t lock;
46
47/*0x08*/iopte_t *page_table;
48/*0x10*/unsigned long strbuf_regs;
49/*0x18*/unsigned long iommu_regs;
50/*0x20*/unsigned long sbus_control_reg;
51
52/*0x28*/volatile unsigned long strbuf_flushflag;
53
54 /* If NCLUSTERS is ever decresed to 4 or lower,
55 * you must increase the size of the type of
56 * these counters. You have been duly warned. -DaveM
57 */
58/*0x30*/struct {
59 u16 next;
60 u16 flush;
61 } alloc_info[NCLUSTERS];
62
63 /* The lowest used consistent mapping entry. Since
64 * we allocate consistent maps out of cluster 0 this
65 * is relative to the beginning of closter 0.
66 */
67/*0x50*/u32 lowest_consistent_map;
68};
69
70/* Offsets from iommu_regs */
71#define SYSIO_IOMMUREG_BASE 0x2400UL
72#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
73#define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
74#define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
75#define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
76#define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
77#define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
78#define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
79#define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
80
81#define IOMMU_DRAM_VALID (1UL << 30UL)
82
83static void __iommu_flushall(struct sbus_iommu *iommu)
84{
85 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
86 int entry;
87
88 for (entry = 0; entry < 16; entry++) {
89 upa_writeq(0, tag);
90 tag += 8UL;
91 }
92 upa_readq(iommu->sbus_control_reg);
93
94 for (entry = 0; entry < NCLUSTERS; entry++) {
95 iommu->alloc_info[entry].flush =
96 iommu->alloc_info[entry].next;
97 }
98}
99
100static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
101{
102 while (npages--)
103 upa_writeq(base + (npages << IO_PAGE_SHIFT),
104 iommu->iommu_regs + IOMMU_FLUSH);
105 upa_readq(iommu->sbus_control_reg);
106}
107
108/* Offsets from strbuf_regs */
109#define SYSIO_STRBUFREG_BASE 0x2800UL
110#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
111#define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
112#define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
113#define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
114#define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
115#define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
116#define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
117
118#define STRBUF_TAG_VALID 0x02UL
119
120static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
121{
122 iommu->strbuf_flushflag = 0UL;
123 while (npages--)
124 upa_writeq(base + (npages << IO_PAGE_SHIFT),
125 iommu->strbuf_regs + STRBUF_PFLUSH);
126
127 /* Whoopee cushion! */
128 upa_writeq(__pa(&iommu->strbuf_flushflag),
129 iommu->strbuf_regs + STRBUF_FSYNC);
130 upa_readq(iommu->sbus_control_reg);
131 while (iommu->strbuf_flushflag == 0UL)
132 membar("#LoadLoad");
133}
134
135static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
136{
137 iopte_t *iopte, *limit, *first, *cluster;
138 unsigned long cnum, ent, nent, flush_point, found;
139
140 cnum = 0;
141 nent = 1;
142 while ((1UL << cnum) < npages)
143 cnum++;
144 if(cnum >= NCLUSTERS) {
145 nent = 1UL << (cnum - NCLUSTERS);
146 cnum = NCLUSTERS - 1;
147 }
148 iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
149
150 if (cnum == 0)
151 limit = (iommu->page_table +
152 iommu->lowest_consistent_map);
153 else
154 limit = (iopte + CLUSTER_NPAGES);
155
156 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
157 flush_point = iommu->alloc_info[cnum].flush;
158
159 first = iopte;
160 cluster = NULL;
161 found = 0;
162 for (;;) {
163 if (iopte_val(*iopte) == 0UL) {
164 found++;
165 if (!cluster)
166 cluster = iopte;
167 } else {
168 /* Used cluster in the way */
169 cluster = NULL;
170 found = 0;
171 }
172
173 if (found == nent)
174 break;
175
176 iopte += (1 << cnum);
177 ent++;
178 if (iopte >= limit) {
179 iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
180 ent = 0;
181
182 /* Multiple cluster allocations must not wrap */
183 cluster = NULL;
184 found = 0;
185 }
186 if (ent == flush_point)
187 __iommu_flushall(iommu);
188 if (iopte == first)
189 goto bad;
190 }
191
192 /* ent/iopte points to the last cluster entry we're going to use,
193 * so save our place for the next allocation.
194 */
195 if ((iopte + (1 << cnum)) >= limit)
196 ent = 0;
197 else
198 ent = ent + 1;
199 iommu->alloc_info[cnum].next = ent;
200 if (ent == flush_point)
201 __iommu_flushall(iommu);
202
203 /* I've got your streaming cluster right here buddy boy... */
204 return cluster;
205
206bad:
207 printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
208 npages);
209 return NULL;
210}
211
212static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
213{
214 unsigned long cnum, ent, nent;
215 iopte_t *iopte;
216
217 cnum = 0;
218 nent = 1;
219 while ((1UL << cnum) < npages)
220 cnum++;
221 if(cnum >= NCLUSTERS) {
222 nent = 1UL << (cnum - NCLUSTERS);
223 cnum = NCLUSTERS - 1;
224 }
225 ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
226 iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
227 do {
228 iopte_val(*iopte) = 0UL;
229 iopte += 1 << cnum;
230 } while(--nent);
231
232 /* If the global flush might not have caught this entry,
233 * adjust the flush point such that we will flush before
234 * ever trying to reuse it.
235 */
236#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
237 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
238 iommu->alloc_info[cnum].flush = ent;
239#undef between
240}
241
242/* We allocate consistent mappings from the end of cluster zero. */
243static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages)
244{
245 iopte_t *iopte;
246
247 iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
248 while (iopte > iommu->page_table) {
249 iopte--;
250 if (!(iopte_val(*iopte) & IOPTE_VALID)) {
251 unsigned long tmp = npages;
252
253 while (--tmp) {
254 iopte--;
255 if (iopte_val(*iopte) & IOPTE_VALID)
256 break;
257 }
258 if (tmp == 0) {
259 u32 entry = (iopte - iommu->page_table);
260
261 if (entry < iommu->lowest_consistent_map)
262 iommu->lowest_consistent_map = entry;
263 return iopte;
264 }
265 }
266 }
267 return NULL;
268}
269
270static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
271{
272 iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
273
274 if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
275 iopte_t *walk = iopte + npages;
276 iopte_t *limit;
277
278 limit = iommu->page_table + CLUSTER_NPAGES;
279 while (walk < limit) {
280 if (iopte_val(*walk) != 0UL)
281 break;
282 walk++;
283 }
284 iommu->lowest_consistent_map =
285 (walk - iommu->page_table);
286 }
287
288 while (npages--)
289 *iopte++ = __iopte(0UL);
290}
291
292void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
293{
294 unsigned long order, first_page, flags;
295 struct sbus_iommu *iommu;
296 iopte_t *iopte;
297 void *ret;
298 int npages;
299
300 if (size <= 0 || sdev == NULL || dvma_addr == NULL)
301 return NULL;
302
303 size = IO_PAGE_ALIGN(size);
304 order = get_order(size);
305 if (order >= 10)
306 return NULL;
307 first_page = __get_free_pages(GFP_KERNEL, order);
308 if (first_page == 0UL)
309 return NULL;
310 memset((char *)first_page, 0, PAGE_SIZE << order);
311
312 iommu = sdev->bus->iommu;
313
314 spin_lock_irqsave(&iommu->lock, flags);
315 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
316 if (iopte == NULL) {
317 spin_unlock_irqrestore(&iommu->lock, flags);
318 free_pages(first_page, order);
319 return NULL;
320 }
321
322 /* Ok, we're committed at this point. */
323 *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
324 ret = (void *) first_page;
325 npages = size >> IO_PAGE_SHIFT;
326 while (npages--) {
327 *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
328 (__pa(first_page) & IOPTE_PAGE));
329 first_page += IO_PAGE_SIZE;
330 }
331 iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
332 spin_unlock_irqrestore(&iommu->lock, flags);
333
334 return ret;
335}
336
337void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
338{
339 unsigned long order, npages;
340 struct sbus_iommu *iommu;
341
342 if (size <= 0 || sdev == NULL || cpu == NULL)
343 return;
344
345 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
346 iommu = sdev->bus->iommu;
347
348 spin_lock_irq(&iommu->lock);
349 free_consistent_cluster(iommu, dvma, npages);
350 iommu_flush(iommu, dvma, npages);
351 spin_unlock_irq(&iommu->lock);
352
353 order = get_order(size);
354 if (order < 10)
355 free_pages((unsigned long)cpu, order);
356}
357
358dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir)
359{
360 struct sbus_iommu *iommu = sdev->bus->iommu;
361 unsigned long npages, pbase, flags;
362 iopte_t *iopte;
363 u32 dma_base, offset;
364 unsigned long iopte_bits;
365
366 if (dir == SBUS_DMA_NONE)
367 BUG();
368
369 pbase = (unsigned long) ptr;
370 offset = (u32) (pbase & ~IO_PAGE_MASK);
371 size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
372 pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
373
374 spin_lock_irqsave(&iommu->lock, flags);
375 npages = size >> IO_PAGE_SHIFT;
376 iopte = alloc_streaming_cluster(iommu, npages);
377 if (iopte == NULL)
378 goto bad;
379 dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
380 npages = size >> IO_PAGE_SHIFT;
381 iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
382 if (dir != SBUS_DMA_TODEVICE)
383 iopte_bits |= IOPTE_WRITE;
384 while (npages--) {
385 *iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
386 pbase += IO_PAGE_SIZE;
387 }
388 npages = size >> IO_PAGE_SHIFT;
389 spin_unlock_irqrestore(&iommu->lock, flags);
390
391 return (dma_base | offset);
392
393bad:
394 spin_unlock_irqrestore(&iommu->lock, flags);
395 BUG();
396 return 0;
397}
398
399void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
400{
401 struct sbus_iommu *iommu = sdev->bus->iommu;
402 u32 dma_base = dma_addr & IO_PAGE_MASK;
403 unsigned long flags;
404
405 size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
406
407 spin_lock_irqsave(&iommu->lock, flags);
408 free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
409 strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
410 spin_unlock_irqrestore(&iommu->lock, flags);
411}
412
413#define SG_ENT_PHYS_ADDRESS(SG) \
414 (__pa(page_address((SG)->page)) + (SG)->offset)
415
416static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits)
417{
418 struct scatterlist *dma_sg = sg;
419 struct scatterlist *sg_end = sg + nelems;
420 int i;
421
422 for (i = 0; i < nused; i++) {
423 unsigned long pteval = ~0UL;
424 u32 dma_npages;
425
426 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
427 dma_sg->dma_length +
428 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
429 do {
430 unsigned long offset;
431 signed int len;
432
433 /* If we are here, we know we have at least one
434 * more page to map. So walk forward until we
435 * hit a page crossing, and begin creating new
436 * mappings from that spot.
437 */
438 for (;;) {
439 unsigned long tmp;
440
441 tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg);
442 len = sg->length;
443 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
444 pteval = tmp & IO_PAGE_MASK;
445 offset = tmp & (IO_PAGE_SIZE - 1UL);
446 break;
447 }
448 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
449 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
450 offset = 0UL;
451 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
452 break;
453 }
454 sg++;
455 }
456
457 pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
458 while (len > 0) {
459 *iopte++ = __iopte(pteval);
460 pteval += IO_PAGE_SIZE;
461 len -= (IO_PAGE_SIZE - offset);
462 offset = 0;
463 dma_npages--;
464 }
465
466 pteval = (pteval & IOPTE_PAGE) + len;
467 sg++;
468
469 /* Skip over any tail mappings we've fully mapped,
470 * adjusting pteval along the way. Stop when we
471 * detect a page crossing event.
472 */
473 while (sg < sg_end &&
474 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
475 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
476 ((pteval ^
477 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
478 pteval += sg->length;
479 sg++;
480 }
481 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
482 pteval = ~0UL;
483 } while (dma_npages != 0);
484 dma_sg++;
485 }
486}
487
488int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir)
489{
490 struct sbus_iommu *iommu = sdev->bus->iommu;
491 unsigned long flags, npages;
492 iopte_t *iopte;
493 u32 dma_base;
494 struct scatterlist *sgtmp;
495 int used;
496 unsigned long iopte_bits;
497
498 if (dir == SBUS_DMA_NONE)
499 BUG();
500
501 /* Fast path single entry scatterlists. */
502 if (nents == 1) {
503 sg->dma_address =
504 sbus_map_single(sdev,
505 (page_address(sg->page) + sg->offset),
506 sg->length, dir);
507 sg->dma_length = sg->length;
508 return 1;
509 }
510
511 npages = prepare_sg(sg, nents);
512
513 spin_lock_irqsave(&iommu->lock, flags);
514 iopte = alloc_streaming_cluster(iommu, npages);
515 if (iopte == NULL)
516 goto bad;
517 dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
518
519 /* Normalize DVMA addresses. */
520 sgtmp = sg;
521 used = nents;
522
523 while (used && sgtmp->dma_length) {
524 sgtmp->dma_address += dma_base;
525 sgtmp++;
526 used--;
527 }
528 used = nents - used;
529
530 iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
531 if (dir != SBUS_DMA_TODEVICE)
532 iopte_bits |= IOPTE_WRITE;
533
534 fill_sg(iopte, sg, used, nents, iopte_bits);
535#ifdef VERIFY_SG
536 verify_sglist(sg, nents, iopte, npages);
537#endif
538 spin_unlock_irqrestore(&iommu->lock, flags);
539
540 return used;
541
542bad:
543 spin_unlock_irqrestore(&iommu->lock, flags);
544 BUG();
545 return 0;
546}
547
548void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
549{
550 unsigned long size, flags;
551 struct sbus_iommu *iommu;
552 u32 dvma_base;
553 int i;
554
555 /* Fast path single entry scatterlists. */
556 if (nents == 1) {
557 sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction);
558 return;
559 }
560
561 dvma_base = sg[0].dma_address & IO_PAGE_MASK;
562 for (i = 0; i < nents; i++) {
563 if (sg[i].dma_length == 0)
564 break;
565 }
566 i--;
567 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base;
568
569 iommu = sdev->bus->iommu;
570 spin_lock_irqsave(&iommu->lock, flags);
571 free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
572 strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
573 spin_unlock_irqrestore(&iommu->lock, flags);
574}
575
576void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
577{
578 struct sbus_iommu *iommu = sdev->bus->iommu;
579 unsigned long flags;
580
581 size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
582
583 spin_lock_irqsave(&iommu->lock, flags);
584 strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
585 spin_unlock_irqrestore(&iommu->lock, flags);
586}
587
588void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
589{
590}
591
592void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
593{
594 struct sbus_iommu *iommu = sdev->bus->iommu;
595 unsigned long flags, size;
596 u32 base;
597 int i;
598
599 base = sg[0].dma_address & IO_PAGE_MASK;
600 for (i = 0; i < nents; i++) {
601 if (sg[i].dma_length == 0)
602 break;
603 }
604 i--;
605 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
606
607 spin_lock_irqsave(&iommu->lock, flags);
608 strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
609 spin_unlock_irqrestore(&iommu->lock, flags);
610}
611
612void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
613{
614}
615
616/* Enable 64-bit DVMA mode for the given device. */
617void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
618{
619 struct sbus_iommu *iommu = sdev->bus->iommu;
620 int slot = sdev->slot;
621 unsigned long cfg_reg;
622 u64 val;
623
624 cfg_reg = iommu->sbus_control_reg;
625 switch (slot) {
626 case 0:
627 cfg_reg += 0x20UL;
628 break;
629 case 1:
630 cfg_reg += 0x28UL;
631 break;
632 case 2:
633 cfg_reg += 0x30UL;
634 break;
635 case 3:
636 cfg_reg += 0x38UL;
637 break;
638 case 13:
639 cfg_reg += 0x40UL;
640 break;
641 case 14:
642 cfg_reg += 0x48UL;
643 break;
644 case 15:
645 cfg_reg += 0x50UL;
646 break;
647
648 default:
649 return;
650 };
651
652 val = upa_readq(cfg_reg);
653 if (val & (1UL << 14UL)) {
654 /* Extended transfer mode already enabled. */
655 return;
656 }
657
658 val |= (1UL << 14UL);
659
660 if (bursts & DMA_BURST8)
661 val |= (1UL << 1UL);
662 if (bursts & DMA_BURST16)
663 val |= (1UL << 2UL);
664 if (bursts & DMA_BURST32)
665 val |= (1UL << 3UL);
666 if (bursts & DMA_BURST64)
667 val |= (1UL << 4UL);
668 upa_writeq(val, cfg_reg);
669}
670
671/* SBUS SYSIO INO number to Sparc PIL level. */
672static unsigned char sysio_ino_to_pil[] = {
673 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */
674 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */
675 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */
676 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */
677 4, /* Onboard SCSI */
678 5, /* Onboard Ethernet */
679/*XXX*/ 8, /* Onboard BPP */
680 0, /* Bogon */
681 13, /* Audio */
682/*XXX*/15, /* PowerFail */
683 0, /* Bogon */
684 0, /* Bogon */
685 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
686 11, /* Floppy */
687 0, /* Spare Hardware (bogon for now) */
688 0, /* Keyboard (bogon for now) */
689 0, /* Mouse (bogon for now) */
690 0, /* Serial (bogon for now) */
691 0, 0, /* Bogon, Bogon */
692 10, /* Timer 0 */
693 11, /* Timer 1 */
694 0, 0, /* Bogon, Bogon */
695 15, /* Uncorrectable SBUS Error */
696 15, /* Correctable SBUS Error */
697 15, /* SBUS Error */
698/*XXX*/ 0, /* Power Management (bogon for now) */
699};
700
701/* INO number to IMAP register offset for SYSIO external IRQ's.
702 * This should conform to both Sunfire/Wildfire server and Fusion
703 * desktop designs.
704 */
705#define SYSIO_IMAP_SLOT0 0x2c04UL
706#define SYSIO_IMAP_SLOT1 0x2c0cUL
707#define SYSIO_IMAP_SLOT2 0x2c14UL
708#define SYSIO_IMAP_SLOT3 0x2c1cUL
709#define SYSIO_IMAP_SCSI 0x3004UL
710#define SYSIO_IMAP_ETH 0x300cUL
711#define SYSIO_IMAP_BPP 0x3014UL
712#define SYSIO_IMAP_AUDIO 0x301cUL
713#define SYSIO_IMAP_PFAIL 0x3024UL
714#define SYSIO_IMAP_KMS 0x302cUL
715#define SYSIO_IMAP_FLPY 0x3034UL
716#define SYSIO_IMAP_SHW 0x303cUL
717#define SYSIO_IMAP_KBD 0x3044UL
718#define SYSIO_IMAP_MS 0x304cUL
719#define SYSIO_IMAP_SER 0x3054UL
720#define SYSIO_IMAP_TIM0 0x3064UL
721#define SYSIO_IMAP_TIM1 0x306cUL
722#define SYSIO_IMAP_UE 0x3074UL
723#define SYSIO_IMAP_CE 0x307cUL
724#define SYSIO_IMAP_SBERR 0x3084UL
725#define SYSIO_IMAP_PMGMT 0x308cUL
726#define SYSIO_IMAP_GFX 0x3094UL
727#define SYSIO_IMAP_EUPA 0x309cUL
728
729#define bogon ((unsigned long) -1)
730static unsigned long sysio_irq_offsets[] = {
731 /* SBUS Slot 0 --> 3, level 1 --> 7 */
732 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
733 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
734 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
735 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
736 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
737 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
738 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
739 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
740
741 /* Onboard devices (not relevant/used on SunFire). */
742 SYSIO_IMAP_SCSI,
743 SYSIO_IMAP_ETH,
744 SYSIO_IMAP_BPP,
745 bogon,
746 SYSIO_IMAP_AUDIO,
747 SYSIO_IMAP_PFAIL,
748 bogon,
749 bogon,
750 SYSIO_IMAP_KMS,
751 SYSIO_IMAP_FLPY,
752 SYSIO_IMAP_SHW,
753 SYSIO_IMAP_KBD,
754 SYSIO_IMAP_MS,
755 SYSIO_IMAP_SER,
756 bogon,
757 bogon,
758 SYSIO_IMAP_TIM0,
759 SYSIO_IMAP_TIM1,
760 bogon,
761 bogon,
762 SYSIO_IMAP_UE,
763 SYSIO_IMAP_CE,
764 SYSIO_IMAP_SBERR,
765 SYSIO_IMAP_PMGMT,
766};
767
768#undef bogon
769
770#define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
771
772/* Convert Interrupt Mapping register pointer to associated
773 * Interrupt Clear register pointer, SYSIO specific version.
774 */
775#define SYSIO_ICLR_UNUSED0 0x3400UL
776#define SYSIO_ICLR_SLOT0 0x340cUL
777#define SYSIO_ICLR_SLOT1 0x344cUL
778#define SYSIO_ICLR_SLOT2 0x348cUL
779#define SYSIO_ICLR_SLOT3 0x34ccUL
780static unsigned long sysio_imap_to_iclr(unsigned long imap)
781{
782 unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
783 return imap + diff;
784}
785
786unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
787{
788 struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
789 struct sbus_iommu *iommu = sbus->iommu;
790 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
791 unsigned long imap, iclr;
792 int pil, sbus_level = 0;
793
794 pil = sysio_ino_to_pil[ino];
795 if (!pil) {
796 printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
797 panic("Bad SYSIO IRQ translations...");
798 }
799
800 if (PIL_RESERVED(pil))
801 BUG();
802
803 imap = sysio_irq_offsets[ino];
804 if (imap == ((unsigned long)-1)) {
805 prom_printf("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
806 ino, pil);
807 prom_halt();
808 }
809 imap += reg_base;
810
811 /* SYSIO inconsistency. For external SLOTS, we have to select
812 * the right ICLR register based upon the lower SBUS irq level
813 * bits.
814 */
815 if (ino >= 0x20) {
816 iclr = sysio_imap_to_iclr(imap);
817 } else {
818 int sbus_slot = (ino & 0x18)>>3;
819
820 sbus_level = ino & 0x7;
821
822 switch(sbus_slot) {
823 case 0:
824 iclr = reg_base + SYSIO_ICLR_SLOT0;
825 break;
826 case 1:
827 iclr = reg_base + SYSIO_ICLR_SLOT1;
828 break;
829 case 2:
830 iclr = reg_base + SYSIO_ICLR_SLOT2;
831 break;
832 default:
833 case 3:
834 iclr = reg_base + SYSIO_ICLR_SLOT3;
835 break;
836 };
837
838 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
839 }
840 return build_irq(pil, sbus_level, iclr, imap);
841}
842
843/* Error interrupt handling. */
844#define SYSIO_UE_AFSR 0x0030UL
845#define SYSIO_UE_AFAR 0x0038UL
846#define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
847#define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
848#define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
849#define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
850#define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
851#define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
852#define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
853#define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
854#define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
855#define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
856#define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
857static irqreturn_t sysio_ue_handler(int irq, void *dev_id, struct pt_regs *regs)
858{
859 struct sbus_bus *sbus = dev_id;
860 struct sbus_iommu *iommu = sbus->iommu;
861 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
862 unsigned long afsr_reg, afar_reg;
863 unsigned long afsr, afar, error_bits;
864 int reported;
865
866 afsr_reg = reg_base + SYSIO_UE_AFSR;
867 afar_reg = reg_base + SYSIO_UE_AFAR;
868
869 /* Latch error status. */
870 afsr = upa_readq(afsr_reg);
871 afar = upa_readq(afar_reg);
872
873 /* Clear primary/secondary error status bits. */
874 error_bits = afsr &
875 (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
876 SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
877 upa_writeq(error_bits, afsr_reg);
878
879 /* Log the error. */
880 printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
881 sbus->portid,
882 (((error_bits & SYSIO_UEAFSR_PPIO) ?
883 "PIO" :
884 ((error_bits & SYSIO_UEAFSR_PDRD) ?
885 "DVMA Read" :
886 ((error_bits & SYSIO_UEAFSR_PDWR) ?
887 "DVMA Write" : "???")))));
888 printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
889 sbus->portid,
890 (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
891 (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
892 (afsr & SYSIO_UEAFSR_MID) >> 37UL);
893 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
894 printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
895 reported = 0;
896 if (afsr & SYSIO_UEAFSR_SPIO) {
897 reported++;
898 printk("(PIO)");
899 }
900 if (afsr & SYSIO_UEAFSR_SDRD) {
901 reported++;
902 printk("(DVMA Read)");
903 }
904 if (afsr & SYSIO_UEAFSR_SDWR) {
905 reported++;
906 printk("(DVMA Write)");
907 }
908 if (!reported)
909 printk("(none)");
910 printk("]\n");
911
912 return IRQ_HANDLED;
913}
914
915#define SYSIO_CE_AFSR 0x0040UL
916#define SYSIO_CE_AFAR 0x0048UL
917#define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
918#define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
919#define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
920#define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
921#define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
922#define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
923#define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
924#define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
925#define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
926#define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
927#define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
928#define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
929static irqreturn_t sysio_ce_handler(int irq, void *dev_id, struct pt_regs *regs)
930{
931 struct sbus_bus *sbus = dev_id;
932 struct sbus_iommu *iommu = sbus->iommu;
933 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
934 unsigned long afsr_reg, afar_reg;
935 unsigned long afsr, afar, error_bits;
936 int reported;
937
938 afsr_reg = reg_base + SYSIO_CE_AFSR;
939 afar_reg = reg_base + SYSIO_CE_AFAR;
940
941 /* Latch error status. */
942 afsr = upa_readq(afsr_reg);
943 afar = upa_readq(afar_reg);
944
945 /* Clear primary/secondary error status bits. */
946 error_bits = afsr &
947 (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
948 SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
949 upa_writeq(error_bits, afsr_reg);
950
951 printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
952 sbus->portid,
953 (((error_bits & SYSIO_CEAFSR_PPIO) ?
954 "PIO" :
955 ((error_bits & SYSIO_CEAFSR_PDRD) ?
956 "DVMA Read" :
957 ((error_bits & SYSIO_CEAFSR_PDWR) ?
958 "DVMA Write" : "???")))));
959
960 /* XXX Use syndrome and afar to print out module string just like
961 * XXX UDB CE trap handler does... -DaveM
962 */
963 printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
964 sbus->portid,
965 (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
966 (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
967 (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
968 (afsr & SYSIO_CEAFSR_MID) >> 37UL);
969 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
970
971 printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
972 reported = 0;
973 if (afsr & SYSIO_CEAFSR_SPIO) {
974 reported++;
975 printk("(PIO)");
976 }
977 if (afsr & SYSIO_CEAFSR_SDRD) {
978 reported++;
979 printk("(DVMA Read)");
980 }
981 if (afsr & SYSIO_CEAFSR_SDWR) {
982 reported++;
983 printk("(DVMA Write)");
984 }
985 if (!reported)
986 printk("(none)");
987 printk("]\n");
988
989 return IRQ_HANDLED;
990}
991
992#define SYSIO_SBUS_AFSR 0x2010UL
993#define SYSIO_SBUS_AFAR 0x2018UL
994#define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
995#define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
996#define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
997#define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
998#define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
999#define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
1000#define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
1001#define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
1002#define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
1003#define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
1004#define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
1005#define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
1006static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id, struct pt_regs *regs)
1007{
1008 struct sbus_bus *sbus = dev_id;
1009 struct sbus_iommu *iommu = sbus->iommu;
1010 unsigned long afsr_reg, afar_reg, reg_base;
1011 unsigned long afsr, afar, error_bits;
1012 int reported;
1013
1014 reg_base = iommu->sbus_control_reg - 0x2000UL;
1015 afsr_reg = reg_base + SYSIO_SBUS_AFSR;
1016 afar_reg = reg_base + SYSIO_SBUS_AFAR;
1017
1018 afsr = upa_readq(afsr_reg);
1019 afar = upa_readq(afar_reg);
1020
1021 /* Clear primary/secondary error status bits. */
1022 error_bits = afsr &
1023 (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
1024 SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
1025 upa_writeq(error_bits, afsr_reg);
1026
1027 /* Log the error. */
1028 printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
1029 sbus->portid,
1030 (((error_bits & SYSIO_SBAFSR_PLE) ?
1031 "Late PIO Error" :
1032 ((error_bits & SYSIO_SBAFSR_PTO) ?
1033 "Time Out" :
1034 ((error_bits & SYSIO_SBAFSR_PBERR) ?
1035 "Error Ack" : "???")))),
1036 (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
1037 printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
1038 sbus->portid,
1039 (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
1040 (afsr & SYSIO_SBAFSR_MID) >> 37UL);
1041 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
1042 printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
1043 reported = 0;
1044 if (afsr & SYSIO_SBAFSR_SLE) {
1045 reported++;
1046 printk("(Late PIO Error)");
1047 }
1048 if (afsr & SYSIO_SBAFSR_STO) {
1049 reported++;
1050 printk("(Time Out)");
1051 }
1052 if (afsr & SYSIO_SBAFSR_SBERR) {
1053 reported++;
1054 printk("(Error Ack)");
1055 }
1056 if (!reported)
1057 printk("(none)");
1058 printk("]\n");
1059
1060 /* XXX check iommu/strbuf for further error status XXX */
1061
1062 return IRQ_HANDLED;
1063}
1064
1065#define ECC_CONTROL 0x0020UL
1066#define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
1067#define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
1068#define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
1069
1070#define SYSIO_UE_INO 0x34
1071#define SYSIO_CE_INO 0x35
1072#define SYSIO_SBUSERR_INO 0x36
1073
1074static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
1075{
1076 struct sbus_iommu *iommu = sbus->iommu;
1077 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
1078 unsigned int irq;
1079 u64 control;
1080
1081 irq = sbus_build_irq(sbus, SYSIO_UE_INO);
1082 if (request_irq(irq, sysio_ue_handler,
1083 SA_SHIRQ, "SYSIO UE", sbus) < 0) {
1084 prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
1085 sbus->portid);
1086 prom_halt();
1087 }
1088
1089 irq = sbus_build_irq(sbus, SYSIO_CE_INO);
1090 if (request_irq(irq, sysio_ce_handler,
1091 SA_SHIRQ, "SYSIO CE", sbus) < 0) {
1092 prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
1093 sbus->portid);
1094 prom_halt();
1095 }
1096
1097 irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
1098 if (request_irq(irq, sysio_sbus_error_handler,
1099 SA_SHIRQ, "SYSIO SBUS Error", sbus) < 0) {
1100 prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
1101 sbus->portid);
1102 prom_halt();
1103 }
1104
1105 /* Now turn the error interrupts on and also enable ECC checking. */
1106 upa_writeq((SYSIO_ECNTRL_ECCEN |
1107 SYSIO_ECNTRL_UEEN |
1108 SYSIO_ECNTRL_CEEN),
1109 reg_base + ECC_CONTROL);
1110
1111 control = upa_readq(iommu->sbus_control_reg);
1112 control |= 0x100UL; /* SBUS Error Interrupt Enable */
1113 upa_writeq(control, iommu->sbus_control_reg);
1114}
1115
1116/* Boot time initialization. */
1117void __init sbus_iommu_init(int prom_node, struct sbus_bus *sbus)
1118{
1119 struct linux_prom64_registers rprop;
1120 struct sbus_iommu *iommu;
1121 unsigned long regs, tsb_base;
1122 u64 control;
1123 int err, i;
1124
1125 sbus->portid = prom_getintdefault(sbus->prom_node,
1126 "upa-portid", -1);
1127
1128 err = prom_getproperty(prom_node, "reg",
1129 (char *)&rprop, sizeof(rprop));
1130 if (err < 0) {
1131 prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
1132 prom_halt();
1133 }
1134 regs = rprop.phys_addr;
1135
1136 iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
1137 if (iommu == NULL) {
1138 prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
1139 prom_halt();
1140 }
1141
1142 /* Align on E$ line boundary. */
1143 iommu = (struct sbus_iommu *)
1144 (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
1145 ~(SMP_CACHE_BYTES - 1UL));
1146
1147 memset(iommu, 0, sizeof(*iommu));
1148
1149 /* We start with no consistent mappings. */
1150 iommu->lowest_consistent_map = CLUSTER_NPAGES;
1151
1152 for (i = 0; i < NCLUSTERS; i++) {
1153 iommu->alloc_info[i].flush = 0;
1154 iommu->alloc_info[i].next = 0;
1155 }
1156
1157 /* Setup spinlock. */
1158 spin_lock_init(&iommu->lock);
1159
1160 /* Init register offsets. */
1161 iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
1162 iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
1163
1164 /* The SYSIO SBUS control register is used for dummy reads
1165 * in order to ensure write completion.
1166 */
1167 iommu->sbus_control_reg = regs + 0x2000UL;
1168
1169 /* Link into SYSIO software state. */
1170 sbus->iommu = iommu;
1171
1172 printk("SYSIO: UPA portID %x, at %016lx\n",
1173 sbus->portid, regs);
1174
1175 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1176 control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
1177 control = ((7UL << 16UL) |
1178 (0UL << 2UL) |
1179 (1UL << 1UL) |
1180 (1UL << 0UL));
1181
1182 /* Using the above configuration we need 1MB iommu page
1183 * table (128K ioptes * 8 bytes per iopte). This is
1184 * page order 7 on UltraSparc.
1185 */
1186 tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
1187 if (tsb_base == 0UL) {
1188 prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
1189 prom_halt();
1190 }
1191
1192 iommu->page_table = (iopte_t *) tsb_base;
1193 memset(iommu->page_table, 0, IO_TSB_SIZE);
1194
1195 upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
1196
1197 /* Clean out any cruft in the IOMMU using
1198 * diagnostic accesses.
1199 */
1200 for (i = 0; i < 16; i++) {
1201 unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
1202 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
1203
1204 dram += (unsigned long)i * 8UL;
1205 tag += (unsigned long)i * 8UL;
1206 upa_writeq(0, dram);
1207 upa_writeq(0, tag);
1208 }
1209 upa_readq(iommu->sbus_control_reg);
1210
1211 /* Give the TSB to SYSIO. */
1212 upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
1213
1214 /* Setup streaming buffer, DE=1 SB_EN=1 */
1215 control = (1UL << 1UL) | (1UL << 0UL);
1216 upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
1217
1218 /* Clear out the tags using diagnostics. */
1219 for (i = 0; i < 16; i++) {
1220 unsigned long ptag, ltag;
1221
1222 ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
1223 ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
1224 ptag += (unsigned long)i * 8UL;
1225 ltag += (unsigned long)i * 8UL;
1226
1227 upa_writeq(0UL, ptag);
1228 upa_writeq(0UL, ltag);
1229 }
1230
1231 /* Enable DVMA arbitration for all devices/slots. */
1232 control = upa_readq(iommu->sbus_control_reg);
1233 control |= 0x3fUL;
1234 upa_writeq(control, iommu->sbus_control_reg);
1235
1236 /* Now some Xfire specific grot... */
1237 if (this_is_starfire)
1238 sbus->starfire_cookie = starfire_hookup(sbus->portid);
1239 else
1240 sbus->starfire_cookie = NULL;
1241
1242 sysio_register_error_handlers(sbus);
1243}
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c
new file mode 100644
index 000000000000..63496c43fe17
--- /dev/null
+++ b/arch/sparc64/kernel/semaphore.c
@@ -0,0 +1,251 @@
1/* $Id: semaphore.c,v 1.9 2001/11/18 00:12:56 davem Exp $
2 * semaphore.c: Sparc64 semaphore implementation.
3 *
4 * This is basically the PPC semaphore scheme ported to use
5 * the sparc64 atomic instructions, so see the PPC code for
6 * credits.
7 */
8
9#include <linux/sched.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12
13/*
14 * Atomically update sem->count.
15 * This does the equivalent of the following:
16 *
17 * old_count = sem->count;
18 * tmp = MAX(old_count, 0) + incr;
19 * sem->count = tmp;
20 * return old_count;
21 */
22static __inline__ int __sem_update_count(struct semaphore *sem, int incr)
23{
24 int old_count, tmp;
25
26 __asm__ __volatile__("\n"
27" ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
28"1: ldsw [%3], %0\n"
29" mov %0, %1\n"
30" cmp %0, 0\n"
31" movl %%icc, 0, %1\n"
32" add %1, %4, %1\n"
33" cas [%3], %0, %1\n"
34" cmp %0, %1\n"
35" bne,pn %%icc, 1b\n"
36" membar #StoreLoad | #StoreStore\n"
37 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
38 : "r" (&sem->count), "r" (incr), "m" (sem->count)
39 : "cc");
40
41 return old_count;
42}
43
44static void __up(struct semaphore *sem)
45{
46 __sem_update_count(sem, 1);
47 wake_up(&sem->wait);
48}
49
50void up(struct semaphore *sem)
51{
52 /* This atomically does:
53 * old_val = sem->count;
54 * new_val = sem->count + 1;
55 * sem->count = new_val;
56 * if (old_val < 0)
57 * __up(sem);
58 *
59 * The (old_val < 0) test is equivalent to
60 * the more straightforward (new_val <= 0),
61 * but it is easier to test the former because
62 * of how the CAS instruction works.
63 */
64
65 __asm__ __volatile__("\n"
66" ! up sem(%0)\n"
67" membar #StoreLoad | #LoadLoad\n"
68"1: lduw [%0], %%g1\n"
69" add %%g1, 1, %%g7\n"
70" cas [%0], %%g1, %%g7\n"
71" cmp %%g1, %%g7\n"
72" bne,pn %%icc, 1b\n"
73" addcc %%g7, 1, %%g0\n"
74" ble,pn %%icc, 3f\n"
75" membar #StoreLoad | #StoreStore\n"
76"2:\n"
77" .subsection 2\n"
78"3: mov %0, %%g1\n"
79" save %%sp, -160, %%sp\n"
80" call %1\n"
81" mov %%g1, %%o0\n"
82" ba,pt %%xcc, 2b\n"
83" restore\n"
84" .previous\n"
85 : : "r" (sem), "i" (__up)
86 : "g1", "g2", "g3", "g7", "memory", "cc");
87}
88
89static void __sched __down(struct semaphore * sem)
90{
91 struct task_struct *tsk = current;
92 DECLARE_WAITQUEUE(wait, tsk);
93
94 tsk->state = TASK_UNINTERRUPTIBLE;
95 add_wait_queue_exclusive(&sem->wait, &wait);
96
97 while (__sem_update_count(sem, -1) <= 0) {
98 schedule();
99 tsk->state = TASK_UNINTERRUPTIBLE;
100 }
101 remove_wait_queue(&sem->wait, &wait);
102 tsk->state = TASK_RUNNING;
103
104 wake_up(&sem->wait);
105}
106
107void __sched down(struct semaphore *sem)
108{
109 might_sleep();
110 /* This atomically does:
111 * old_val = sem->count;
112 * new_val = sem->count - 1;
113 * sem->count = new_val;
114 * if (old_val < 1)
115 * __down(sem);
116 *
117 * The (old_val < 1) test is equivalent to
118 * the more straightforward (new_val < 0),
119 * but it is easier to test the former because
120 * of how the CAS instruction works.
121 */
122
123 __asm__ __volatile__("\n"
124" ! down sem(%0)\n"
125"1: lduw [%0], %%g1\n"
126" sub %%g1, 1, %%g7\n"
127" cas [%0], %%g1, %%g7\n"
128" cmp %%g1, %%g7\n"
129" bne,pn %%icc, 1b\n"
130" cmp %%g7, 1\n"
131" bl,pn %%icc, 3f\n"
132" membar #StoreLoad | #StoreStore\n"
133"2:\n"
134" .subsection 2\n"
135"3: mov %0, %%g1\n"
136" save %%sp, -160, %%sp\n"
137" call %1\n"
138" mov %%g1, %%o0\n"
139" ba,pt %%xcc, 2b\n"
140" restore\n"
141" .previous\n"
142 : : "r" (sem), "i" (__down)
143 : "g1", "g2", "g3", "g7", "memory", "cc");
144}
145
146int down_trylock(struct semaphore *sem)
147{
148 int ret;
149
150 /* This atomically does:
151 * old_val = sem->count;
152 * new_val = sem->count - 1;
153 * if (old_val < 1) {
154 * ret = 1;
155 * } else {
156 * sem->count = new_val;
157 * ret = 0;
158 * }
159 *
160 * The (old_val < 1) test is equivalent to
161 * the more straightforward (new_val < 0),
162 * but it is easier to test the former because
163 * of how the CAS instruction works.
164 */
165
166 __asm__ __volatile__("\n"
167" ! down_trylock sem(%1) ret(%0)\n"
168"1: lduw [%1], %%g1\n"
169" sub %%g1, 1, %%g7\n"
170" cmp %%g1, 1\n"
171" bl,pn %%icc, 2f\n"
172" mov 1, %0\n"
173" cas [%1], %%g1, %%g7\n"
174" cmp %%g1, %%g7\n"
175" bne,pn %%icc, 1b\n"
176" mov 0, %0\n"
177" membar #StoreLoad | #StoreStore\n"
178"2:\n"
179 : "=&r" (ret)
180 : "r" (sem)
181 : "g1", "g7", "memory", "cc");
182
183 return ret;
184}
185
186static int __sched __down_interruptible(struct semaphore * sem)
187{
188 int retval = 0;
189 struct task_struct *tsk = current;
190 DECLARE_WAITQUEUE(wait, tsk);
191
192 tsk->state = TASK_INTERRUPTIBLE;
193 add_wait_queue_exclusive(&sem->wait, &wait);
194
195 while (__sem_update_count(sem, -1) <= 0) {
196 if (signal_pending(current)) {
197 __sem_update_count(sem, 0);
198 retval = -EINTR;
199 break;
200 }
201 schedule();
202 tsk->state = TASK_INTERRUPTIBLE;
203 }
204 tsk->state = TASK_RUNNING;
205 remove_wait_queue(&sem->wait, &wait);
206 wake_up(&sem->wait);
207 return retval;
208}
209
210int __sched down_interruptible(struct semaphore *sem)
211{
212 int ret = 0;
213
214 might_sleep();
215 /* This atomically does:
216 * old_val = sem->count;
217 * new_val = sem->count - 1;
218 * sem->count = new_val;
219 * if (old_val < 1)
220 * ret = __down_interruptible(sem);
221 *
222 * The (old_val < 1) test is equivalent to
223 * the more straightforward (new_val < 0),
224 * but it is easier to test the former because
225 * of how the CAS instruction works.
226 */
227
228 __asm__ __volatile__("\n"
229" ! down_interruptible sem(%2) ret(%0)\n"
230"1: lduw [%2], %%g1\n"
231" sub %%g1, 1, %%g7\n"
232" cas [%2], %%g1, %%g7\n"
233" cmp %%g1, %%g7\n"
234" bne,pn %%icc, 1b\n"
235" cmp %%g7, 1\n"
236" bl,pn %%icc, 3f\n"
237" membar #StoreLoad | #StoreStore\n"
238"2:\n"
239" .subsection 2\n"
240"3: mov %2, %%g1\n"
241" save %%sp, -160, %%sp\n"
242" call %3\n"
243" mov %%g1, %%o0\n"
244" ba,pt %%xcc, 2b\n"
245" restore\n"
246" .previous\n"
247 : "=r" (ret)
248 : "0" (ret), "r" (sem), "i" (__down_interruptible)
249 : "g1", "g2", "g3", "g7", "memory", "cc");
250 return ret;
251}
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
new file mode 100644
index 000000000000..12c3d84b7460
--- /dev/null
+++ b/arch/sparc64/kernel/setup.c
@@ -0,0 +1,731 @@
1/* $Id: setup.c,v 1.72 2002/02/09 19:49:30 davem Exp $
2 * linux/arch/sparc64/kernel/setup.c
3 *
4 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/stddef.h>
13#include <linux/unistd.h>
14#include <linux/ptrace.h>
15#include <linux/slab.h>
16#include <asm/smp.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
19#include <linux/tty.h>
20#include <linux/delay.h>
21#include <linux/config.h>
22#include <linux/fs.h>
23#include <linux/seq_file.h>
24#include <linux/syscalls.h>
25#include <linux/kdev_t.h>
26#include <linux/major.h>
27#include <linux/string.h>
28#include <linux/init.h>
29#include <linux/inet.h>
30#include <linux/console.h>
31#include <linux/root_dev.h>
32#include <linux/interrupt.h>
33#include <linux/cpu.h>
34#include <linux/initrd.h>
35
36#include <asm/segment.h>
37#include <asm/system.h>
38#include <asm/io.h>
39#include <asm/processor.h>
40#include <asm/oplib.h>
41#include <asm/page.h>
42#include <asm/pgtable.h>
43#include <asm/idprom.h>
44#include <asm/head.h>
45#include <asm/starfire.h>
46#include <asm/mmu_context.h>
47#include <asm/timer.h>
48#include <asm/sections.h>
49#include <asm/setup.h>
50#include <asm/mmu.h>
51
52#ifdef CONFIG_IP_PNP
53#include <net/ipconfig.h>
54#endif
55
56struct screen_info screen_info = {
57 0, 0, /* orig-x, orig-y */
58 0, /* unused */
59 0, /* orig-video-page */
60 0, /* orig-video-mode */
61 128, /* orig-video-cols */
62 0, 0, 0, /* unused, ega_bx, unused */
63 54, /* orig-video-lines */
64 0, /* orig-video-isVGA */
65 16 /* orig-video-points */
66};
67
68/* Typing sync at the prom prompt calls the function pointed to by
69 * the sync callback which I set to the following function.
70 * This should sync all filesystems and return, for now it just
71 * prints out pretty messages and returns.
72 */
73
74void (*prom_palette)(int);
75void (*prom_keyboard)(void);
76
77static void
78prom_console_write(struct console *con, const char *s, unsigned n)
79{
80 prom_write(s, n);
81}
82
83static struct console prom_console = {
84 .name = "prom",
85 .write = prom_console_write,
86 .flags = CON_CONSDEV | CON_ENABLED,
87 .index = -1,
88};
89
90#define PROM_TRUE -1
91#define PROM_FALSE 0
92
93/* Pretty sick eh? */
94int prom_callback(long *args)
95{
96 struct console *cons, *saved_console = NULL;
97 unsigned long flags;
98 char *cmd;
99 extern spinlock_t prom_entry_lock;
100
101 if (!args)
102 return -1;
103 if (!(cmd = (char *)args[0]))
104 return -1;
105
106 /*
107 * The callback can be invoked on the cpu that first dropped
108 * into prom_cmdline after taking the serial interrupt, or on
109 * a slave processor that was smp_captured() if the
110 * administrator has done a switch-cpu inside obp. In either
111 * case, the cpu is marked as in-interrupt. Drop IRQ locks.
112 */
113 irq_exit();
114
115 /* XXX Revisit the locking here someday. This is a debugging
116 * XXX feature so it isnt all that critical. -DaveM
117 */
118 local_irq_save(flags);
119
120 spin_unlock(&prom_entry_lock);
121 cons = console_drivers;
122 while (cons) {
123 unregister_console(cons);
124 cons->flags &= ~(CON_PRINTBUFFER);
125 cons->next = saved_console;
126 saved_console = cons;
127 cons = console_drivers;
128 }
129 register_console(&prom_console);
130 if (!strcmp(cmd, "sync")) {
131 prom_printf("PROM `%s' command...\n", cmd);
132 show_free_areas();
133 if (current->pid != 0) {
134 local_irq_enable();
135 sys_sync();
136 local_irq_disable();
137 }
138 args[2] = 0;
139 args[args[1] + 3] = -1;
140 prom_printf("Returning to PROM\n");
141 } else if (!strcmp(cmd, "va>tte-data")) {
142 unsigned long ctx, va;
143 unsigned long tte = 0;
144 long res = PROM_FALSE;
145
146 ctx = args[3];
147 va = args[4];
148 if (ctx) {
149 /*
150 * Find process owning ctx, lookup mapping.
151 */
152 struct task_struct *p;
153 struct mm_struct *mm = NULL;
154 pgd_t *pgdp;
155 pud_t *pudp;
156 pmd_t *pmdp;
157 pte_t *ptep;
158
159 for_each_process(p) {
160 mm = p->mm;
161 if (CTX_NRBITS(mm->context) == ctx)
162 break;
163 }
164 if (!mm ||
165 CTX_NRBITS(mm->context) != ctx)
166 goto done;
167
168 pgdp = pgd_offset(mm, va);
169 if (pgd_none(*pgdp))
170 goto done;
171 pudp = pud_offset(pgdp, va);
172 if (pud_none(*pudp))
173 goto done;
174 pmdp = pmd_offset(pudp, va);
175 if (pmd_none(*pmdp))
176 goto done;
177
178 /* Preemption implicitly disabled by virtue of
179 * being called from inside OBP.
180 */
181 ptep = pte_offset_map(pmdp, va);
182 if (pte_present(*ptep)) {
183 tte = pte_val(*ptep);
184 res = PROM_TRUE;
185 }
186 pte_unmap(ptep);
187 goto done;
188 }
189
190 if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
191 unsigned long kernel_pctx = 0;
192
193 if (tlb_type == cheetah_plus)
194 kernel_pctx |= (CTX_CHEETAH_PLUS_NUC |
195 CTX_CHEETAH_PLUS_CTX0);
196
197 /* Spitfire Errata #32 workaround */
198 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
199 "flush %%g6"
200 : /* No outputs */
201 : "r" (kernel_pctx),
202 "r" (PRIMARY_CONTEXT),
203 "i" (ASI_DMMU));
204
205 /*
206 * Locked down tlb entry.
207 */
208
209 if (tlb_type == spitfire)
210 tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
211 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
212 tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
213
214 res = PROM_TRUE;
215 goto done;
216 }
217
218 if (va < PGDIR_SIZE) {
219 /*
220 * vmalloc or prom_inherited mapping.
221 */
222 pgd_t *pgdp;
223 pud_t *pudp;
224 pmd_t *pmdp;
225 pte_t *ptep;
226 int error;
227
228 if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
229 tte = prom_virt_to_phys(va, &error);
230 if (!error)
231 res = PROM_TRUE;
232 goto done;
233 }
234 pgdp = pgd_offset_k(va);
235 if (pgd_none(*pgdp))
236 goto done;
237 pudp = pud_offset(pgdp, va);
238 if (pud_none(*pudp))
239 goto done;
240 pmdp = pmd_offset(pudp, va);
241 if (pmd_none(*pmdp))
242 goto done;
243
244 /* Preemption implicitly disabled by virtue of
245 * being called from inside OBP.
246 */
247 ptep = pte_offset_kernel(pmdp, va);
248 if (pte_present(*ptep)) {
249 tte = pte_val(*ptep);
250 res = PROM_TRUE;
251 }
252 goto done;
253 }
254
255 if (va < PAGE_OFFSET) {
256 /*
257 * No mappings here.
258 */
259 goto done;
260 }
261
262 if (va & (1UL << 40)) {
263 /*
264 * I/O page.
265 */
266
267 tte = (__pa(va) & _PAGE_PADDR) |
268 _PAGE_VALID | _PAGE_SZ4MB |
269 _PAGE_E | _PAGE_P | _PAGE_W;
270 res = PROM_TRUE;
271 goto done;
272 }
273
274 /*
275 * Normal page.
276 */
277 tte = (__pa(va) & _PAGE_PADDR) |
278 _PAGE_VALID | _PAGE_SZ4MB |
279 _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W;
280 res = PROM_TRUE;
281
282 done:
283 if (res == PROM_TRUE) {
284 args[2] = 3;
285 args[args[1] + 3] = 0;
286 args[args[1] + 4] = res;
287 args[args[1] + 5] = tte;
288 } else {
289 args[2] = 2;
290 args[args[1] + 3] = 0;
291 args[args[1] + 4] = res;
292 }
293 } else if (!strcmp(cmd, ".soft1")) {
294 unsigned long tte;
295
296 tte = args[3];
297 prom_printf("%lx:\"%s%s%s%s%s\" ",
298 (tte & _PAGE_SOFT) >> 7,
299 tte & _PAGE_MODIFIED ? "M" : "-",
300 tte & _PAGE_ACCESSED ? "A" : "-",
301 tte & _PAGE_READ ? "W" : "-",
302 tte & _PAGE_WRITE ? "R" : "-",
303 tte & _PAGE_PRESENT ? "P" : "-");
304
305 args[2] = 2;
306 args[args[1] + 3] = 0;
307 args[args[1] + 4] = PROM_TRUE;
308 } else if (!strcmp(cmd, ".soft2")) {
309 unsigned long tte;
310
311 tte = args[3];
312 prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50);
313
314 args[2] = 2;
315 args[args[1] + 3] = 0;
316 args[args[1] + 4] = PROM_TRUE;
317 } else {
318 prom_printf("unknown PROM `%s' command...\n", cmd);
319 }
320 unregister_console(&prom_console);
321 while (saved_console) {
322 cons = saved_console;
323 saved_console = cons->next;
324 register_console(cons);
325 }
326 spin_lock(&prom_entry_lock);
327 local_irq_restore(flags);
328
329 /*
330 * Restore in-interrupt status for a resume from obp.
331 */
332 irq_enter();
333 return 0;
334}
335
336unsigned int boot_flags = 0;
337#define BOOTME_DEBUG 0x1
338#define BOOTME_SINGLE 0x2
339
340/* Exported for mm/init.c:paging_init. */
341unsigned long cmdline_memory_size = 0;
342
343static struct console prom_debug_console = {
344 .name = "debug",
345 .write = prom_console_write,
346 .flags = CON_PRINTBUFFER,
347 .index = -1,
348};
349
350/* XXX Implement this at some point... */
351void kernel_enter_debugger(void)
352{
353}
354
355int obp_system_intr(void)
356{
357 if (boot_flags & BOOTME_DEBUG) {
358 printk("OBP: system interrupted\n");
359 prom_halt();
360 return 1;
361 }
362 return 0;
363}
364
365/*
366 * Process kernel command line switches that are specific to the
367 * SPARC or that require special low-level processing.
368 */
369static void __init process_switch(char c)
370{
371 switch (c) {
372 case 'd':
373 boot_flags |= BOOTME_DEBUG;
374 break;
375 case 's':
376 boot_flags |= BOOTME_SINGLE;
377 break;
378 case 'h':
379 prom_printf("boot_flags_init: Halt!\n");
380 prom_halt();
381 break;
382 case 'p':
383 /* Use PROM debug console. */
384 register_console(&prom_debug_console);
385 break;
386 default:
387 printk("Unknown boot switch (-%c)\n", c);
388 break;
389 }
390}
391
392static void __init process_console(char *commands)
393{
394 serial_console = 0;
395 commands += 8;
396 /* Linux-style serial */
397 if (!strncmp(commands, "ttyS", 4))
398 serial_console = simple_strtoul(commands + 4, NULL, 10) + 1;
399 else if (!strncmp(commands, "tty", 3)) {
400 char c = *(commands + 3);
401 /* Solaris-style serial */
402 if (c == 'a' || c == 'b') {
403 serial_console = c - 'a' + 1;
404 prom_printf ("Using /dev/tty%c as console.\n", c);
405 }
406 /* else Linux-style fbcon, not serial */
407 }
408#if defined(CONFIG_PROM_CONSOLE)
409 if (!strncmp(commands, "prom", 4)) {
410 char *p;
411
412 for (p = commands - 8; *p && *p != ' '; p++)
413 *p = ' ';
414 conswitchp = &prom_con;
415 }
416#endif
417}
418
419static void __init boot_flags_init(char *commands)
420{
421 while (*commands) {
422 /* Move to the start of the next "argument". */
423 while (*commands && *commands == ' ')
424 commands++;
425
426 /* Process any command switches, otherwise skip it. */
427 if (*commands == '\0')
428 break;
429 if (*commands == '-') {
430 commands++;
431 while (*commands && *commands != ' ')
432 process_switch(*commands++);
433 continue;
434 }
435 if (!strncmp(commands, "console=", 8)) {
436 process_console(commands);
437 } else if (!strncmp(commands, "mem=", 4)) {
438 /*
439 * "mem=XXX[kKmM]" overrides the PROM-reported
440 * memory size.
441 */
442 cmdline_memory_size = simple_strtoul(commands + 4,
443 &commands, 0);
444 if (*commands == 'K' || *commands == 'k') {
445 cmdline_memory_size <<= 10;
446 commands++;
447 } else if (*commands=='M' || *commands=='m') {
448 cmdline_memory_size <<= 20;
449 commands++;
450 }
451 }
452 while (*commands && *commands != ' ')
453 commands++;
454 }
455}
456
457extern int prom_probe_memory(void);
458extern unsigned long start, end;
459extern void panic_setup(char *, int *);
460
461extern unsigned short root_flags;
462extern unsigned short root_dev;
463extern unsigned short ram_flags;
464#define RAMDISK_IMAGE_START_MASK 0x07FF
465#define RAMDISK_PROMPT_FLAG 0x8000
466#define RAMDISK_LOAD_FLAG 0x4000
467
468extern int root_mountflags;
469
470char reboot_command[COMMAND_LINE_SIZE];
471
472static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
473
474void register_prom_callbacks(void)
475{
476 prom_setcallback(prom_callback);
477 prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; "
478 "' linux-va>tte-data to va>tte-data");
479 prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; "
480 "' linux-.soft1 to .soft1");
481 prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; "
482 "' linux-.soft2 to .soft2");
483}
484
485extern void paging_init(void);
486
487void __init setup_arch(char **cmdline_p)
488{
489 unsigned long highest_paddr;
490 int i;
491
492 /* Initialize PROM console and command line. */
493 *cmdline_p = prom_getbootargs();
494 strcpy(saved_command_line, *cmdline_p);
495
496 printk("ARCH: SUN4U\n");
497
498#ifdef CONFIG_DUMMY_CONSOLE
499 conswitchp = &dummy_con;
500#elif defined(CONFIG_PROM_CONSOLE)
501 conswitchp = &prom_con;
502#endif
503
504#ifdef CONFIG_SMP
505 i = (unsigned long)&irq_stat[1] - (unsigned long)&irq_stat[0];
506 if ((i == SMP_CACHE_BYTES) || (i == (2 * SMP_CACHE_BYTES))) {
507 extern unsigned int irqsz_patchme[1];
508 irqsz_patchme[0] |= ((i == SMP_CACHE_BYTES) ? SMP_CACHE_BYTES_SHIFT : \
509 SMP_CACHE_BYTES_SHIFT + 1);
510 flushi((long)&irqsz_patchme[0]);
511 } else {
512 prom_printf("Unexpected size of irq_stat[] elements\n");
513 prom_halt();
514 }
515#endif
516 /* Work out if we are starfire early on */
517 check_if_starfire();
518
519 boot_flags_init(*cmdline_p);
520
521 idprom_init();
522 (void) prom_probe_memory();
523
524 /* In paging_init() we tip off this value to see if we need
525 * to change init_mm.pgd to point to the real alias mapping.
526 */
527 phys_base = 0xffffffffffffffffUL;
528 highest_paddr = 0UL;
529 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
530 unsigned long top;
531
532 if (sp_banks[i].base_addr < phys_base)
533 phys_base = sp_banks[i].base_addr;
534 top = sp_banks[i].base_addr +
535 sp_banks[i].num_bytes;
536 if (highest_paddr < top)
537 highest_paddr = top;
538 }
539 pfn_base = phys_base >> PAGE_SHIFT;
540
541 switch (tlb_type) {
542 default:
543 case spitfire:
544 kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent());
545 kern_base &= _PAGE_PADDR_SF;
546 break;
547
548 case cheetah:
549 case cheetah_plus:
550 kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
551 kern_base &= _PAGE_PADDR;
552 break;
553 };
554
555 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
556
557 if (!root_flags)
558 root_mountflags &= ~MS_RDONLY;
559 ROOT_DEV = old_decode_dev(root_dev);
560#ifdef CONFIG_BLK_DEV_INITRD
561 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
562 rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
563 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
564#endif
565
566 init_task.thread_info->kregs = &fake_swapper_regs;
567
568#ifdef CONFIG_IP_PNP
569 if (!ic_set_manually) {
570 int chosen = prom_finddevice ("/chosen");
571 u32 cl, sv, gw;
572
573 cl = prom_getintdefault (chosen, "client-ip", 0);
574 sv = prom_getintdefault (chosen, "server-ip", 0);
575 gw = prom_getintdefault (chosen, "gateway-ip", 0);
576 if (cl && sv) {
577 ic_myaddr = cl;
578 ic_servaddr = sv;
579 if (gw)
580 ic_gateway = gw;
581#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
582 ic_proto_enabled = 0;
583#endif
584 }
585 }
586#endif
587
588 paging_init();
589}
590
591static int __init set_preferred_console(void)
592{
593 int idev, odev;
594
595 /* The user has requested a console so this is already set up. */
596 if (serial_console >= 0)
597 return -EBUSY;
598
599 idev = prom_query_input_device();
600 odev = prom_query_output_device();
601 if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
602 serial_console = 0;
603 } else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
604 serial_console = 1;
605 } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
606 serial_console = 2;
607 } else {
608 prom_printf("Inconsistent console: "
609 "input %d, output %d\n",
610 idev, odev);
611 prom_halt();
612 }
613
614 if (serial_console)
615 return add_preferred_console("ttyS", serial_console - 1, NULL);
616
617 return -ENODEV;
618}
619console_initcall(set_preferred_console);
620
621/* BUFFER is PAGE_SIZE bytes long. */
622
623extern char *sparc_cpu_type;
624extern char *sparc_fpu_type;
625
626extern void smp_info(struct seq_file *);
627extern void smp_bogo(struct seq_file *);
628extern void mmu_info(struct seq_file *);
629
630static int show_cpuinfo(struct seq_file *m, void *__unused)
631{
632 seq_printf(m,
633 "cpu\t\t: %s\n"
634 "fpu\t\t: %s\n"
635 "promlib\t\t: Version 3 Revision %d\n"
636 "prom\t\t: %d.%d.%d\n"
637 "type\t\t: sun4u\n"
638 "ncpus probed\t: %ld\n"
639 "ncpus active\t: %ld\n"
640#ifndef CONFIG_SMP
641 "Cpu0Bogo\t: %lu.%02lu\n"
642 "Cpu0ClkTck\t: %016lx\n"
643#endif
644 ,
645 sparc_cpu_type,
646 sparc_fpu_type,
647 prom_rev,
648 prom_prev >> 16,
649 (prom_prev >> 8) & 0xff,
650 prom_prev & 0xff,
651 (long)num_possible_cpus(),
652 (long)num_online_cpus()
653#ifndef CONFIG_SMP
654 , cpu_data(0).udelay_val/(500000/HZ),
655 (cpu_data(0).udelay_val/(5000/HZ)) % 100,
656 cpu_data(0).clock_tick
657#endif
658 );
659#ifdef CONFIG_SMP
660 smp_bogo(m);
661#endif
662 mmu_info(m);
663#ifdef CONFIG_SMP
664 smp_info(m);
665#endif
666 return 0;
667}
668
669static void *c_start(struct seq_file *m, loff_t *pos)
670{
671 /* The pointer we are returning is arbitrary,
672 * it just has to be non-NULL and not IS_ERR
673 * in the success case.
674 */
675 return *pos == 0 ? &c_start : NULL;
676}
677
678static void *c_next(struct seq_file *m, void *v, loff_t *pos)
679{
680 ++*pos;
681 return c_start(m, pos);
682}
683
684static void c_stop(struct seq_file *m, void *v)
685{
686}
687
688struct seq_operations cpuinfo_op = {
689 .start =c_start,
690 .next = c_next,
691 .stop = c_stop,
692 .show = show_cpuinfo,
693};
694
695extern int stop_a_enabled;
696
697void sun_do_break(void)
698{
699 if (!stop_a_enabled)
700 return;
701
702 prom_printf("\n");
703 flush_user_windows();
704
705 prom_cmdline();
706}
707
708int serial_console = -1;
709int stop_a_enabled = 1;
710
711static int __init topology_init(void)
712{
713 int i, err;
714
715 err = -ENOMEM;
716 for (i = 0; i < NR_CPUS; i++) {
717 if (cpu_possible(i)) {
718 struct cpu *p = kmalloc(sizeof(*p), GFP_KERNEL);
719
720 if (p) {
721 memset(p, 0, sizeof(*p));
722 register_cpu(p, i, NULL);
723 err = 0;
724 }
725 }
726 }
727
728 return err;
729}
730
731subsys_initcall(topology_init);
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
new file mode 100644
index 000000000000..b27934671c35
--- /dev/null
+++ b/arch/sparc64/kernel/signal.c
@@ -0,0 +1,688 @@
1/* $Id: signal.c,v 1.60 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#include <linux/config.h>
12#ifdef CONFIG_SPARC32_COMPAT
13#include <linux/compat.h> /* for compat_old_sigset_t */
14#endif
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/signal.h>
18#include <linux/errno.h>
19#include <linux/wait.h>
20#include <linux/ptrace.h>
21#include <linux/unistd.h>
22#include <linux/mm.h>
23#include <linux/tty.h>
24#include <linux/smp_lock.h>
25#include <linux/binfmts.h>
26#include <linux/bitops.h>
27
28#include <asm/uaccess.h>
29#include <asm/ptrace.h>
30#include <asm/svr4.h>
31#include <asm/pgtable.h>
32#include <asm/fpumacro.h>
33#include <asm/uctx.h>
34#include <asm/siginfo.h>
35#include <asm/visasm.h>
36
37#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
38
39static int do_signal(sigset_t *oldset, struct pt_regs * regs,
40 unsigned long orig_o0, int ret_from_syscall);
41
42/* {set, get}context() needed for 64-bit SparcLinux userland. */
43asmlinkage void sparc64_set_context(struct pt_regs *regs)
44{
45 struct ucontext __user *ucp = (struct ucontext __user *)
46 regs->u_regs[UREG_I0];
47 mc_gregset_t __user *grp;
48 unsigned long pc, npc, tstate;
49 unsigned long fp, i7;
50 unsigned char fenab;
51 int err;
52
53 flush_user_windows();
54 if (get_thread_wsaved() ||
55 (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
56 (!__access_ok(ucp, sizeof(*ucp))))
57 goto do_sigsegv;
58 grp = &ucp->uc_mcontext.mc_gregs;
59 err = __get_user(pc, &((*grp)[MC_PC]));
60 err |= __get_user(npc, &((*grp)[MC_NPC]));
61 if (err || ((pc | npc) & 3))
62 goto do_sigsegv;
63 if (regs->u_regs[UREG_I1]) {
64 sigset_t set;
65
66 if (_NSIG_WORDS == 1) {
67 if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
68 goto do_sigsegv;
69 } else {
70 if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
71 goto do_sigsegv;
72 }
73 sigdelsetmask(&set, ~_BLOCKABLE);
74 spin_lock_irq(&current->sighand->siglock);
75 current->blocked = set;
76 recalc_sigpending();
77 spin_unlock_irq(&current->sighand->siglock);
78 }
79 if (test_thread_flag(TIF_32BIT)) {
80 pc &= 0xffffffff;
81 npc &= 0xffffffff;
82 }
83 regs->tpc = pc;
84 regs->tnpc = npc;
85 err |= __get_user(regs->y, &((*grp)[MC_Y]));
86 err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
87 regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
88 regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
89 err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
90 err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
91 err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
92 err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
93 err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
94 err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
95 err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
96 err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
97 err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
98 err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
99 err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
100 err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
101 err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
102 err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
103 err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
104
105 err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
106 err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
107 err |= __put_user(fp,
108 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
109 err |= __put_user(i7,
110 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
111
112 err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
113 if (fenab) {
114 unsigned long *fpregs = current_thread_info()->fpregs;
115 unsigned long fprs;
116
117 fprs_write(0);
118 err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
119 if (fprs & FPRS_DL)
120 err |= copy_from_user(fpregs,
121 &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
122 (sizeof(unsigned int) * 32));
123 if (fprs & FPRS_DU)
124 err |= copy_from_user(fpregs+16,
125 ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
126 (sizeof(unsigned int) * 32));
127 err |= __get_user(current_thread_info()->xfsr[0],
128 &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
129 err |= __get_user(current_thread_info()->gsr[0],
130 &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
131 regs->tstate &= ~TSTATE_PEF;
132 }
133 if (err)
134 goto do_sigsegv;
135
136 return;
137do_sigsegv:
138 force_sig(SIGSEGV, current);
139}
140
141asmlinkage void sparc64_get_context(struct pt_regs *regs)
142{
143 struct ucontext __user *ucp = (struct ucontext __user *)
144 regs->u_regs[UREG_I0];
145 mc_gregset_t __user *grp;
146 mcontext_t __user *mcp;
147 unsigned long fp, i7;
148 unsigned char fenab;
149 int err;
150
151 synchronize_user_stack();
152 if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
153 goto do_sigsegv;
154
155#if 1
156 fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
157#else
158 fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
159#endif
160
161 mcp = &ucp->uc_mcontext;
162 grp = &mcp->mc_gregs;
163
164 /* Skip over the trap instruction, first. */
165 if (test_thread_flag(TIF_32BIT)) {
166 regs->tpc = (regs->tnpc & 0xffffffff);
167 regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
168 } else {
169 regs->tpc = regs->tnpc;
170 regs->tnpc += 4;
171 }
172 err = 0;
173 if (_NSIG_WORDS == 1)
174 err |= __put_user(current->blocked.sig[0],
175 (unsigned long __user *)&ucp->uc_sigmask);
176 else
177 err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked,
178 sizeof(sigset_t));
179
180 err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
181 err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
182 err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
183 err |= __put_user(regs->y, &((*grp)[MC_Y]));
184 err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
185 err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
186 err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
187 err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
188 err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
189 err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
190 err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
191 err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
192 err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
193 err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
194 err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
195 err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
196 err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
197 err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
198 err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
199
200 err |= __get_user(fp,
201 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
202 err |= __get_user(i7,
203 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
204 err |= __put_user(fp, &(mcp->mc_fp));
205 err |= __put_user(i7, &(mcp->mc_i7));
206
207 err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
208 if (fenab) {
209 unsigned long *fpregs = current_thread_info()->fpregs;
210 unsigned long fprs;
211
212 fprs = current_thread_info()->fpsaved[0];
213 if (fprs & FPRS_DL)
214 err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
215 (sizeof(unsigned int) * 32));
216 if (fprs & FPRS_DU)
217 err |= copy_to_user(
218 ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
219 (sizeof(unsigned int) * 32));
220 err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
221 err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
222 err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
223 }
224 if (err)
225 goto do_sigsegv;
226
227 return;
228do_sigsegv:
229 force_sig(SIGSEGV, current);
230}
231
232struct rt_signal_frame {
233 struct sparc_stackf ss;
234 siginfo_t info;
235 struct pt_regs regs;
236 __siginfo_fpu_t __user *fpu_save;
237 stack_t stack;
238 sigset_t mask;
239 __siginfo_fpu_t fpu_state;
240};
241
242/* Align macros */
243#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
244
245/*
246 * atomically swap in the new signal mask, and wait for a signal.
247 * This is really tricky on the Sparc, watch out...
248 */
249asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
250{
251 sigset_t saveset;
252
253#ifdef CONFIG_SPARC32_COMPAT
254 if (test_thread_flag(TIF_32BIT)) {
255 extern asmlinkage void _sigpause32_common(compat_old_sigset_t,
256 struct pt_regs *);
257 _sigpause32_common(set, regs);
258 return;
259 }
260#endif
261 set &= _BLOCKABLE;
262 spin_lock_irq(&current->sighand->siglock);
263 saveset = current->blocked;
264 siginitset(&current->blocked, set);
265 recalc_sigpending();
266 spin_unlock_irq(&current->sighand->siglock);
267
268 if (test_thread_flag(TIF_32BIT)) {
269 regs->tpc = (regs->tnpc & 0xffffffff);
270 regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
271 } else {
272 regs->tpc = regs->tnpc;
273 regs->tnpc += 4;
274 }
275
276 /* Condition codes and return value where set here for sigpause,
277 * and so got used by setup_frame, which again causes sigreturn()
278 * to return -EINTR.
279 */
280 while (1) {
281 current->state = TASK_INTERRUPTIBLE;
282 schedule();
283 /*
284 * Return -EINTR and set condition code here,
285 * so the interrupted system call actually returns
286 * these.
287 */
288 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
289 regs->u_regs[UREG_I0] = EINTR;
290 if (do_signal(&saveset, regs, 0, 0))
291 return;
292 }
293}
294
295asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
296{
297 _sigpause_common(set, regs);
298}
299
300asmlinkage void do_sigsuspend(struct pt_regs *regs)
301{
302 _sigpause_common(regs->u_regs[UREG_I0], regs);
303}
304
305asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, struct pt_regs *regs)
306{
307 sigset_t oldset, set;
308
309 /* XXX: Don't preclude handling different sized sigset_t's. */
310 if (sigsetsize != sizeof(sigset_t)) {
311 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
312 regs->u_regs[UREG_I0] = EINVAL;
313 return;
314 }
315 if (copy_from_user(&set, uset, sizeof(set))) {
316 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
317 regs->u_regs[UREG_I0] = EFAULT;
318 return;
319 }
320
321 sigdelsetmask(&set, ~_BLOCKABLE);
322 spin_lock_irq(&current->sighand->siglock);
323 oldset = current->blocked;
324 current->blocked = set;
325 recalc_sigpending();
326 spin_unlock_irq(&current->sighand->siglock);
327
328 if (test_thread_flag(TIF_32BIT)) {
329 regs->tpc = (regs->tnpc & 0xffffffff);
330 regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
331 } else {
332 regs->tpc = regs->tnpc;
333 regs->tnpc += 4;
334 }
335
336 /* Condition codes and return value where set here for sigpause,
337 * and so got used by setup_frame, which again causes sigreturn()
338 * to return -EINTR.
339 */
340 while (1) {
341 current->state = TASK_INTERRUPTIBLE;
342 schedule();
343 /*
344 * Return -EINTR and set condition code here,
345 * so the interrupted system call actually returns
346 * these.
347 */
348 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
349 regs->u_regs[UREG_I0] = EINTR;
350 if (do_signal(&oldset, regs, 0, 0))
351 return;
352 }
353}
354
355static inline int
356restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
357{
358 unsigned long *fpregs = current_thread_info()->fpregs;
359 unsigned long fprs;
360 int err;
361
362 err = __get_user(fprs, &fpu->si_fprs);
363 fprs_write(0);
364 regs->tstate &= ~TSTATE_PEF;
365 if (fprs & FPRS_DL)
366 err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
367 (sizeof(unsigned int) * 32));
368 if (fprs & FPRS_DU)
369 err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
370 (sizeof(unsigned int) * 32));
371 err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
372 err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
373 current_thread_info()->fpsaved[0] |= fprs;
374 return err;
375}
376
377void do_rt_sigreturn(struct pt_regs *regs)
378{
379 struct rt_signal_frame __user *sf;
380 unsigned long tpc, tnpc, tstate;
381 __siginfo_fpu_t __user *fpu_save;
382 mm_segment_t old_fs;
383 sigset_t set;
384 stack_t st;
385 int err;
386
387 /* Always make any pending restarted system calls return -EINTR */
388 current_thread_info()->restart_block.fn = do_no_restart_syscall;
389
390 synchronize_user_stack ();
391 sf = (struct rt_signal_frame __user *)
392 (regs->u_regs [UREG_FP] + STACK_BIAS);
393
394 /* 1. Make sure we are not getting garbage from the user */
395 if (((unsigned long) sf) & 3)
396 goto segv;
397
398 err = get_user(tpc, &sf->regs.tpc);
399 err |= __get_user(tnpc, &sf->regs.tnpc);
400 if (test_thread_flag(TIF_32BIT)) {
401 tpc &= 0xffffffff;
402 tnpc &= 0xffffffff;
403 }
404 err |= ((tpc | tnpc) & 3);
405
406 /* 2. Restore the state */
407 err |= __get_user(regs->y, &sf->regs.y);
408 err |= __get_user(tstate, &sf->regs.tstate);
409 err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));
410
411 /* User can only change condition codes and %asi in %tstate. */
412 regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
413 regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
414
415 err |= __get_user(fpu_save, &sf->fpu_save);
416 if (fpu_save)
417 err |= restore_fpu_state(regs, &sf->fpu_state);
418
419 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
420 err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
421
422 if (err)
423 goto segv;
424
425 regs->tpc = tpc;
426 regs->tnpc = tnpc;
427
428 /* It is more difficult to avoid calling this function than to
429 call it and ignore errors. */
430 old_fs = get_fs();
431 set_fs(KERNEL_DS);
432 do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
433 set_fs(old_fs);
434
435 sigdelsetmask(&set, ~_BLOCKABLE);
436 spin_lock_irq(&current->sighand->siglock);
437 current->blocked = set;
438 recalc_sigpending();
439 spin_unlock_irq(&current->sighand->siglock);
440 return;
441segv:
442 force_sig(SIGSEGV, current);
443}
444
445/* Checks if the fp is valid */
446static int invalid_frame_pointer(void __user *fp, int fplen)
447{
448 if (((unsigned long) fp) & 7)
449 return 1;
450 return 0;
451}
452
453static inline int
454save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
455{
456 unsigned long *fpregs = (unsigned long *)(regs+1);
457 unsigned long fprs;
458 int err = 0;
459
460 fprs = current_thread_info()->fpsaved[0];
461 if (fprs & FPRS_DL)
462 err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
463 (sizeof(unsigned int) * 32));
464 if (fprs & FPRS_DU)
465 err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
466 (sizeof(unsigned int) * 32));
467 err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
468 err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
469 err |= __put_user(fprs, &fpu->si_fprs);
470
471 return err;
472}
473
474static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
475{
476 unsigned long sp;
477
478 sp = regs->u_regs[UREG_FP] + STACK_BIAS;
479
480 /* This is the X/Open sanctioned signal stack switching. */
481 if (ka->sa.sa_flags & SA_ONSTACK) {
482 if (!on_sig_stack(sp) &&
483 !((current->sas_ss_sp + current->sas_ss_size) & 7))
484 sp = current->sas_ss_sp + current->sas_ss_size;
485 }
486 return (void __user *)(sp - framesize);
487}
488
489static inline void
490setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
491 int signo, sigset_t *oldset, siginfo_t *info)
492{
493 struct rt_signal_frame __user *sf;
494 int sigframe_size, err;
495
496 /* 1. Make sure everything is clean */
497 synchronize_user_stack();
498 save_and_clear_fpu();
499
500 sigframe_size = RT_ALIGNEDSZ;
501 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
502 sigframe_size -= sizeof(__siginfo_fpu_t);
503
504 sf = (struct rt_signal_frame __user *)
505 get_sigframe(ka, regs, sigframe_size);
506
507 if (invalid_frame_pointer (sf, sigframe_size))
508 goto sigill;
509
510 if (get_thread_wsaved() != 0)
511 goto sigill;
512
513 /* 2. Save the current process state */
514 err = copy_to_user(&sf->regs, regs, sizeof (*regs));
515
516 if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
517 err |= save_fpu_state(regs, &sf->fpu_state);
518 err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
519 } else {
520 err |= __put_user(0, &sf->fpu_save);
521 }
522
523 /* Setup sigaltstack */
524 err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
525 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
526 err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
527
528 err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));
529
530 err |= copy_in_user((u64 __user *)sf,
531 (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS),
532 sizeof(struct reg_window));
533
534 if (info)
535 err |= copy_siginfo_to_user(&sf->info, info);
536 else {
537 err |= __put_user(signo, &sf->info.si_signo);
538 err |= __put_user(SI_NOINFO, &sf->info.si_code);
539 }
540 if (err)
541 goto sigsegv;
542
543 /* 3. signal handler back-trampoline and parameters */
544 regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
545 regs->u_regs[UREG_I0] = signo;
546 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
547
548 /* The sigcontext is passed in this way because of how it
549 * is defined in GLIBC's /usr/include/bits/sigcontext.h
550 * for sparc64. It includes the 128 bytes of siginfo_t.
551 */
552 regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
553
554 /* 5. signal handler */
555 regs->tpc = (unsigned long) ka->sa.sa_handler;
556 regs->tnpc = (regs->tpc + 4);
557 if (test_thread_flag(TIF_32BIT)) {
558 regs->tpc &= 0xffffffff;
559 regs->tnpc &= 0xffffffff;
560 }
561 /* 4. return to kernel instructions */
562 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
563 return;
564
565sigill:
566 do_exit(SIGILL);
567sigsegv:
568 force_sigsegv(signo, current);
569}
570
571static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
572 siginfo_t *info,
573 sigset_t *oldset, struct pt_regs *regs)
574{
575 setup_rt_frame(ka, regs, signr, oldset,
576 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
577 if (!(ka->sa.sa_flags & SA_NOMASK)) {
578 spin_lock_irq(&current->sighand->siglock);
579 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
580 sigaddset(&current->blocked,signr);
581 recalc_sigpending();
582 spin_unlock_irq(&current->sighand->siglock);
583 }
584}
585
586static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
587 struct sigaction *sa)
588{
589 switch (regs->u_regs[UREG_I0]) {
590 case ERESTART_RESTARTBLOCK:
591 case ERESTARTNOHAND:
592 no_system_call_restart:
593 regs->u_regs[UREG_I0] = EINTR;
594 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
595 break;
596 case ERESTARTSYS:
597 if (!(sa->sa_flags & SA_RESTART))
598 goto no_system_call_restart;
599 /* fallthrough */
600 case ERESTARTNOINTR:
601 regs->u_regs[UREG_I0] = orig_i0;
602 regs->tpc -= 4;
603 regs->tnpc -= 4;
604 }
605}
606
607/* Note that 'init' is a special process: it doesn't get signals it doesn't
608 * want to handle. Thus you cannot kill init even with a SIGKILL even by
609 * mistake.
610 */
611static int do_signal(sigset_t *oldset, struct pt_regs * regs,
612 unsigned long orig_i0, int restart_syscall)
613{
614 siginfo_t info;
615 struct signal_deliver_cookie cookie;
616 struct k_sigaction ka;
617 int signr;
618
619 cookie.restart_syscall = restart_syscall;
620 cookie.orig_i0 = orig_i0;
621
622 if (!oldset)
623 oldset = &current->blocked;
624
625#ifdef CONFIG_SPARC32_COMPAT
626 if (test_thread_flag(TIF_32BIT)) {
627 extern int do_signal32(sigset_t *, struct pt_regs *,
628 unsigned long, int);
629 return do_signal32(oldset, regs, orig_i0,
630 cookie.restart_syscall);
631 }
632#endif
633
634 signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
635 if (signr > 0) {
636 if (cookie.restart_syscall)
637 syscall_restart(orig_i0, regs, &ka.sa);
638 handle_signal(signr, &ka, &info, oldset, regs);
639 return 1;
640 }
641 if (cookie.restart_syscall &&
642 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
643 regs->u_regs[UREG_I0] == ERESTARTSYS ||
644 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
645 /* replay the system call when we are done */
646 regs->u_regs[UREG_I0] = cookie.orig_i0;
647 regs->tpc -= 4;
648 regs->tnpc -= 4;
649 }
650 if (cookie.restart_syscall &&
651 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
652 regs->u_regs[UREG_G1] = __NR_restart_syscall;
653 regs->tpc -= 4;
654 regs->tnpc -= 4;
655 }
656 return 0;
657}
658
659void do_notify_resume(sigset_t *oldset, struct pt_regs *regs,
660 unsigned long orig_i0, int restart_syscall,
661 unsigned long thread_info_flags)
662{
663 if (thread_info_flags & _TIF_SIGPENDING)
664 do_signal(oldset, regs, orig_i0, restart_syscall);
665}
666
667void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
668{
669 struct signal_deliver_cookie *cp = cookie;
670
671 if (cp->restart_syscall &&
672 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
673 regs->u_regs[UREG_I0] == ERESTARTSYS ||
674 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
675 /* replay the system call when we are done */
676 regs->u_regs[UREG_I0] = cp->orig_i0;
677 regs->tpc -= 4;
678 regs->tnpc -= 4;
679 cp->restart_syscall = 0;
680 }
681 if (cp->restart_syscall &&
682 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
683 regs->u_regs[UREG_G1] = __NR_restart_syscall;
684 regs->tpc -= 4;
685 regs->tnpc -= 4;
686 cp->restart_syscall = 0;
687 }
688}
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
new file mode 100644
index 000000000000..859255cf6762
--- /dev/null
+++ b/arch/sparc64/kernel/signal32.c
@@ -0,0 +1,1469 @@
1/* $Id: signal32.c,v 1.74 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/signal32.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/errno.h>
15#include <linux/wait.h>
16#include <linux/ptrace.h>
17#include <linux/unistd.h>
18#include <linux/mm.h>
19#include <linux/tty.h>
20#include <linux/smp_lock.h>
21#include <linux/binfmts.h>
22#include <linux/compat.h>
23#include <linux/bitops.h>
24
25#include <asm/uaccess.h>
26#include <asm/ptrace.h>
27#include <asm/svr4.h>
28#include <asm/pgtable.h>
29#include <asm/psrcompat.h>
30#include <asm/fpumacro.h>
31#include <asm/visasm.h>
32
33#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
34
35int do_signal32(sigset_t *oldset, struct pt_regs *regs,
36 unsigned long orig_o0, int ret_from_syscall);
37
38/* Signal frames: the original one (compatible with SunOS):
39 *
40 * Set up a signal frame... Make the stack look the way SunOS
41 * expects it to look which is basically:
42 *
43 * ---------------------------------- <-- %sp at signal time
44 * Struct sigcontext
45 * Signal address
46 * Ptr to sigcontext area above
47 * Signal code
48 * The signal number itself
49 * One register window
50 * ---------------------------------- <-- New %sp
51 */
52struct signal_sframe32 {
53 struct reg_window32 sig_window;
54 int sig_num;
55 int sig_code;
56 /* struct sigcontext32 * */ u32 sig_scptr;
57 int sig_address;
58 struct sigcontext32 sig_context;
59 unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
60};
61
62/* This magic should be in g_upper[0] for all upper parts
63 * to be valid.
64 */
65#define SIGINFO_EXTRA_V8PLUS_MAGIC 0x130e269
66typedef struct {
67 unsigned int g_upper[8];
68 unsigned int o_upper[8];
69 unsigned int asi;
70} siginfo_extra_v8plus_t;
71
72/*
73 * And the new one, intended to be used for Linux applications only
74 * (we have enough in there to work with clone).
75 * All the interesting bits are in the info field.
76 */
77struct new_signal_frame32 {
78 struct sparc_stackf32 ss;
79 __siginfo32_t info;
80 /* __siginfo_fpu32_t * */ u32 fpu_save;
81 unsigned int insns[2];
82 unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
83 unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
84 /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
85 siginfo_extra_v8plus_t v8plus;
86 __siginfo_fpu_t fpu_state;
87};
88
89typedef struct compat_siginfo{
90 int si_signo;
91 int si_errno;
92 int si_code;
93
94 union {
95 int _pad[SI_PAD_SIZE32];
96
97 /* kill() */
98 struct {
99 compat_pid_t _pid; /* sender's pid */
100 unsigned int _uid; /* sender's uid */
101 } _kill;
102
103 /* POSIX.1b timers */
104 struct {
105 timer_t _tid; /* timer id */
106 int _overrun; /* overrun count */
107 compat_sigval_t _sigval; /* same as below */
108 int _sys_private; /* not to be passed to user */
109 } _timer;
110
111 /* POSIX.1b signals */
112 struct {
113 compat_pid_t _pid; /* sender's pid */
114 unsigned int _uid; /* sender's uid */
115 compat_sigval_t _sigval;
116 } _rt;
117
118 /* SIGCHLD */
119 struct {
120 compat_pid_t _pid; /* which child */
121 unsigned int _uid; /* sender's uid */
122 int _status; /* exit code */
123 compat_clock_t _utime;
124 compat_clock_t _stime;
125 } _sigchld;
126
127 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
128 struct {
129 u32 _addr; /* faulting insn/memory ref. */
130 int _trapno;
131 } _sigfault;
132
133 /* SIGPOLL */
134 struct {
135 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
136 int _fd;
137 } _sigpoll;
138 } _sifields;
139}compat_siginfo_t;
140
141struct rt_signal_frame32 {
142 struct sparc_stackf32 ss;
143 compat_siginfo_t info;
144 struct pt_regs32 regs;
145 compat_sigset_t mask;
146 /* __siginfo_fpu32_t * */ u32 fpu_save;
147 unsigned int insns[2];
148 stack_t32 stack;
149 unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
150 /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
151 siginfo_extra_v8plus_t v8plus;
152 __siginfo_fpu_t fpu_state;
153};
154
155/* Align macros */
156#define SF_ALIGNEDSZ (((sizeof(struct signal_sframe32) + 7) & (~7)))
157#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame32) + 7) & (~7)))
158#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
159
160int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
161{
162 int err;
163
164 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
165 return -EFAULT;
166
167 /* If you change siginfo_t structure, please be sure
168 this code is fixed accordingly.
169 It should never copy any pad contained in the structure
170 to avoid security leaks, but must copy the generic
171 3 ints plus the relevant union member.
172 This routine must convert siginfo from 64bit to 32bit as well
173 at the same time. */
174 err = __put_user(from->si_signo, &to->si_signo);
175 err |= __put_user(from->si_errno, &to->si_errno);
176 err |= __put_user((short)from->si_code, &to->si_code);
177 if (from->si_code < 0)
178 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
179 else {
180 switch (from->si_code >> 16) {
181 case __SI_TIMER >> 16:
182 err |= __put_user(from->si_tid, &to->si_tid);
183 err |= __put_user(from->si_overrun, &to->si_overrun);
184 err |= __put_user(from->si_int, &to->si_int);
185 break;
186 case __SI_CHLD >> 16:
187 err |= __put_user(from->si_utime, &to->si_utime);
188 err |= __put_user(from->si_stime, &to->si_stime);
189 err |= __put_user(from->si_status, &to->si_status);
190 default:
191 err |= __put_user(from->si_pid, &to->si_pid);
192 err |= __put_user(from->si_uid, &to->si_uid);
193 break;
194 case __SI_FAULT >> 16:
195 case __SI_POLL >> 16:
196 err |= __put_user(from->si_trapno, &to->si_trapno);
197 err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
198 break;
199 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
200 case __SI_MESGQ >> 16:
201 err |= __put_user(from->si_pid, &to->si_pid);
202 err |= __put_user(from->si_uid, &to->si_uid);
203 err |= __put_user(from->si_int, &to->si_int);
204 break;
205 }
206 }
207 return err;
208}
209
210/* CAUTION: This is just a very minimalist implementation for the
211 * sake of compat_sys_rt_sigqueueinfo()
212 */
213int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
214{
215 if (!access_ok(VERIFY_WRITE, from, sizeof(compat_siginfo_t)))
216 return -EFAULT;
217
218 if (copy_from_user(to, from, 3*sizeof(int)) ||
219 copy_from_user(to->_sifields._pad, from->_sifields._pad,
220 SI_PAD_SIZE))
221 return -EFAULT;
222
223 return 0;
224}
225
226/*
227 * atomically swap in the new signal mask, and wait for a signal.
228 * This is really tricky on the Sparc, watch out...
229 */
230asmlinkage void _sigpause32_common(compat_old_sigset_t set, struct pt_regs *regs)
231{
232 sigset_t saveset;
233
234 set &= _BLOCKABLE;
235 spin_lock_irq(&current->sighand->siglock);
236 saveset = current->blocked;
237 siginitset(&current->blocked, set);
238 recalc_sigpending();
239 spin_unlock_irq(&current->sighand->siglock);
240
241 regs->tpc = regs->tnpc;
242 regs->tnpc += 4;
243 if (test_thread_flag(TIF_32BIT)) {
244 regs->tpc &= 0xffffffff;
245 regs->tnpc &= 0xffffffff;
246 }
247
248 /* Condition codes and return value where set here for sigpause,
249 * and so got used by setup_frame, which again causes sigreturn()
250 * to return -EINTR.
251 */
252 while (1) {
253 current->state = TASK_INTERRUPTIBLE;
254 schedule();
255 /*
256 * Return -EINTR and set condition code here,
257 * so the interrupted system call actually returns
258 * these.
259 */
260 regs->tstate |= TSTATE_ICARRY;
261 regs->u_regs[UREG_I0] = EINTR;
262 if (do_signal32(&saveset, regs, 0, 0))
263 return;
264 }
265}
266
267asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs *regs)
268{
269 sigset_t oldset, set;
270 compat_sigset_t set32;
271
272 /* XXX: Don't preclude handling different sized sigset_t's. */
273 if (((compat_size_t)sigsetsize) != sizeof(sigset_t)) {
274 regs->tstate |= TSTATE_ICARRY;
275 regs->u_regs[UREG_I0] = EINVAL;
276 return;
277 }
278 if (copy_from_user(&set32, compat_ptr(uset), sizeof(set32))) {
279 regs->tstate |= TSTATE_ICARRY;
280 regs->u_regs[UREG_I0] = EFAULT;
281 return;
282 }
283 switch (_NSIG_WORDS) {
284 case 4: set.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32);
285 case 3: set.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32);
286 case 2: set.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32);
287 case 1: set.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32);
288 }
289 sigdelsetmask(&set, ~_BLOCKABLE);
290 spin_lock_irq(&current->sighand->siglock);
291 oldset = current->blocked;
292 current->blocked = set;
293 recalc_sigpending();
294 spin_unlock_irq(&current->sighand->siglock);
295
296 regs->tpc = regs->tnpc;
297 regs->tnpc += 4;
298 if (test_thread_flag(TIF_32BIT)) {
299 regs->tpc &= 0xffffffff;
300 regs->tnpc &= 0xffffffff;
301 }
302
303 /* Condition codes and return value where set here for sigpause,
304 * and so got used by setup_frame, which again causes sigreturn()
305 * to return -EINTR.
306 */
307 while (1) {
308 current->state = TASK_INTERRUPTIBLE;
309 schedule();
310 /*
311 * Return -EINTR and set condition code here,
312 * so the interrupted system call actually returns
313 * these.
314 */
315 regs->tstate |= TSTATE_ICARRY;
316 regs->u_regs[UREG_I0] = EINTR;
317 if (do_signal32(&oldset, regs, 0, 0))
318 return;
319 }
320}
321
322static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
323{
324 unsigned long *fpregs = current_thread_info()->fpregs;
325 unsigned long fprs;
326 int err;
327
328 err = __get_user(fprs, &fpu->si_fprs);
329 fprs_write(0);
330 regs->tstate &= ~TSTATE_PEF;
331 if (fprs & FPRS_DL)
332 err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32));
333 if (fprs & FPRS_DU)
334 err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32));
335 err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
336 err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
337 current_thread_info()->fpsaved[0] |= fprs;
338 return err;
339}
340
341void do_new_sigreturn32(struct pt_regs *regs)
342{
343 struct new_signal_frame32 __user *sf;
344 unsigned int psr;
345 unsigned pc, npc, fpu_save;
346 sigset_t set;
347 unsigned seta[_COMPAT_NSIG_WORDS];
348 int err, i;
349
350 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
351 sf = (struct new_signal_frame32 __user *) regs->u_regs[UREG_FP];
352
353 /* 1. Make sure we are not getting garbage from the user */
354 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
355 (((unsigned long) sf) & 3))
356 goto segv;
357
358 get_user(pc, &sf->info.si_regs.pc);
359 __get_user(npc, &sf->info.si_regs.npc);
360
361 if ((pc | npc) & 3)
362 goto segv;
363
364 if (test_thread_flag(TIF_32BIT)) {
365 pc &= 0xffffffff;
366 npc &= 0xffffffff;
367 }
368 regs->tpc = pc;
369 regs->tnpc = npc;
370
371 /* 2. Restore the state */
372 err = __get_user(regs->y, &sf->info.si_regs.y);
373 err |= __get_user(psr, &sf->info.si_regs.psr);
374
375 for (i = UREG_G1; i <= UREG_I7; i++)
376 err |= __get_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
377 if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
378 err |= __get_user(i, &sf->v8plus.g_upper[0]);
379 if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
380 unsigned long asi;
381
382 for (i = UREG_G1; i <= UREG_I7; i++)
383 err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
384 err |= __get_user(asi, &sf->v8plus.asi);
385 regs->tstate &= ~TSTATE_ASI;
386 regs->tstate |= ((asi & 0xffUL) << 24UL);
387 }
388 }
389
390 /* User can only change condition codes in %tstate. */
391 regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
392 regs->tstate |= psr_to_tstate_icc(psr);
393
394 err |= __get_user(fpu_save, &sf->fpu_save);
395 if (fpu_save)
396 err |= restore_fpu_state32(regs, &sf->fpu_state);
397 err |= __get_user(seta[0], &sf->info.si_mask);
398 err |= copy_from_user(seta+1, &sf->extramask,
399 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
400 if (err)
401 goto segv;
402 switch (_NSIG_WORDS) {
403 case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
404 case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
405 case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
406 case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
407 }
408 sigdelsetmask(&set, ~_BLOCKABLE);
409 spin_lock_irq(&current->sighand->siglock);
410 current->blocked = set;
411 recalc_sigpending();
412 spin_unlock_irq(&current->sighand->siglock);
413 return;
414
415segv:
416 force_sig(SIGSEGV, current);
417}
418
419asmlinkage void do_sigreturn32(struct pt_regs *regs)
420{
421 struct sigcontext32 __user *scptr;
422 unsigned int pc, npc, psr;
423 sigset_t set;
424 unsigned int seta[_COMPAT_NSIG_WORDS];
425 int err;
426
427 /* Always make any pending restarted system calls return -EINTR */
428 current_thread_info()->restart_block.fn = do_no_restart_syscall;
429
430 synchronize_user_stack();
431 if (test_thread_flag(TIF_NEWSIGNALS)) {
432 do_new_sigreturn32(regs);
433 return;
434 }
435
436 scptr = (struct sigcontext32 __user *)
437 (regs->u_regs[UREG_I0] & 0x00000000ffffffffUL);
438 /* Check sanity of the user arg. */
439 if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext32)) ||
440 (((unsigned long) scptr) & 3))
441 goto segv;
442
443 err = __get_user(pc, &scptr->sigc_pc);
444 err |= __get_user(npc, &scptr->sigc_npc);
445
446 if ((pc | npc) & 3)
447 goto segv; /* Nice try. */
448
449 err |= __get_user(seta[0], &scptr->sigc_mask);
450 /* Note that scptr + 1 points to extramask */
451 err |= copy_from_user(seta+1, scptr + 1,
452 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
453 if (err)
454 goto segv;
455 switch (_NSIG_WORDS) {
456 case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
457 case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
458 case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
459 case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
460 }
461 sigdelsetmask(&set, ~_BLOCKABLE);
462 spin_lock_irq(&current->sighand->siglock);
463 current->blocked = set;
464 recalc_sigpending();
465 spin_unlock_irq(&current->sighand->siglock);
466
467 if (test_thread_flag(TIF_32BIT)) {
468 pc &= 0xffffffff;
469 npc &= 0xffffffff;
470 }
471 regs->tpc = pc;
472 regs->tnpc = npc;
473 err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
474 err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
475 err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
476
477 /* User can only change condition codes in %tstate. */
478 err |= __get_user(psr, &scptr->sigc_psr);
479 if (err)
480 goto segv;
481 regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
482 regs->tstate |= psr_to_tstate_icc(psr);
483 return;
484
485segv:
486 force_sig(SIGSEGV, current);
487}
488
489asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
490{
491 struct rt_signal_frame32 __user *sf;
492 unsigned int psr, pc, npc, fpu_save, u_ss_sp;
493 mm_segment_t old_fs;
494 sigset_t set;
495 compat_sigset_t seta;
496 stack_t st;
497 int err, i;
498
499 /* Always make any pending restarted system calls return -EINTR */
500 current_thread_info()->restart_block.fn = do_no_restart_syscall;
501
502 synchronize_user_stack();
503 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
504 sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
505
506 /* 1. Make sure we are not getting garbage from the user */
507 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
508 (((unsigned long) sf) & 3))
509 goto segv;
510
511 get_user(pc, &sf->regs.pc);
512 __get_user(npc, &sf->regs.npc);
513
514 if ((pc | npc) & 3)
515 goto segv;
516
517 if (test_thread_flag(TIF_32BIT)) {
518 pc &= 0xffffffff;
519 npc &= 0xffffffff;
520 }
521 regs->tpc = pc;
522 regs->tnpc = npc;
523
524 /* 2. Restore the state */
525 err = __get_user(regs->y, &sf->regs.y);
526 err |= __get_user(psr, &sf->regs.psr);
527
528 for (i = UREG_G1; i <= UREG_I7; i++)
529 err |= __get_user(regs->u_regs[i], &sf->regs.u_regs[i]);
530 if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
531 err |= __get_user(i, &sf->v8plus.g_upper[0]);
532 if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
533 unsigned long asi;
534
535 for (i = UREG_G1; i <= UREG_I7; i++)
536 err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
537 err |= __get_user(asi, &sf->v8plus.asi);
538 regs->tstate &= ~TSTATE_ASI;
539 regs->tstate |= ((asi & 0xffUL) << 24UL);
540 }
541 }
542
543 /* User can only change condition codes in %tstate. */
544 regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
545 regs->tstate |= psr_to_tstate_icc(psr);
546
547 err |= __get_user(fpu_save, &sf->fpu_save);
548 if (fpu_save)
549 err |= restore_fpu_state32(regs, &sf->fpu_state);
550 err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t));
551 err |= __get_user(u_ss_sp, &sf->stack.ss_sp);
552 st.ss_sp = compat_ptr(u_ss_sp);
553 err |= __get_user(st.ss_flags, &sf->stack.ss_flags);
554 err |= __get_user(st.ss_size, &sf->stack.ss_size);
555 if (err)
556 goto segv;
557
558 /* It is more difficult to avoid calling this function than to
559 call it and ignore errors. */
560 old_fs = get_fs();
561 set_fs(KERNEL_DS);
562 do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf);
563 set_fs(old_fs);
564
565 switch (_NSIG_WORDS) {
566 case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
567 case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
568 case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
569 case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
570 }
571 sigdelsetmask(&set, ~_BLOCKABLE);
572 spin_lock_irq(&current->sighand->siglock);
573 current->blocked = set;
574 recalc_sigpending();
575 spin_unlock_irq(&current->sighand->siglock);
576 return;
577segv:
578 force_sig(SIGSEGV, current);
579}
580
581/* Checks if the fp is valid */
582static int invalid_frame_pointer(void __user *fp, int fplen)
583{
584 if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
585 return 1;
586 return 0;
587}
588
589static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
590{
591 unsigned long sp;
592
593 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
594 sp = regs->u_regs[UREG_FP];
595
596 /* This is the X/Open sanctioned signal stack switching. */
597 if (sa->sa_flags & SA_ONSTACK) {
598 if (!on_sig_stack(sp) && !((current->sas_ss_sp + current->sas_ss_size) & 7))
599 sp = current->sas_ss_sp + current->sas_ss_size;
600 }
601 return (void __user *)(sp - framesize);
602}
603
604static void
605setup_frame32(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info)
606{
607 struct signal_sframe32 __user *sframep;
608 struct sigcontext32 __user *sc;
609 unsigned int seta[_COMPAT_NSIG_WORDS];
610 int err = 0;
611 void __user *sig_address;
612 int sig_code;
613 unsigned long pc = regs->tpc;
614 unsigned long npc = regs->tnpc;
615 unsigned int psr;
616
617 if (test_thread_flag(TIF_32BIT)) {
618 pc &= 0xffffffff;
619 npc &= 0xffffffff;
620 }
621
622 synchronize_user_stack();
623 save_and_clear_fpu();
624
625 sframep = (struct signal_sframe32 __user *)
626 get_sigframe(sa, regs, SF_ALIGNEDSZ);
627 if (invalid_frame_pointer(sframep, sizeof(*sframep))){
628 /* Don't change signal code and address, so that
629 * post mortem debuggers can have a look.
630 */
631 do_exit(SIGILL);
632 }
633
634 sc = &sframep->sig_context;
635
636 /* We've already made sure frame pointer isn't in kernel space... */
637 err = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK),
638 &sc->sigc_onstack);
639
640 switch (_NSIG_WORDS) {
641 case 4: seta[7] = (oldset->sig[3] >> 32);
642 seta[6] = oldset->sig[3];
643 case 3: seta[5] = (oldset->sig[2] >> 32);
644 seta[4] = oldset->sig[2];
645 case 2: seta[3] = (oldset->sig[1] >> 32);
646 seta[2] = oldset->sig[1];
647 case 1: seta[1] = (oldset->sig[0] >> 32);
648 seta[0] = oldset->sig[0];
649 }
650 err |= __put_user(seta[0], &sc->sigc_mask);
651 err |= __copy_to_user(sframep->extramask, seta + 1,
652 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
653 err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
654 err |= __put_user(pc, &sc->sigc_pc);
655 err |= __put_user(npc, &sc->sigc_npc);
656 psr = tstate_to_psr(regs->tstate);
657 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
658 psr |= PSR_EF;
659 err |= __put_user(psr, &sc->sigc_psr);
660 err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
661 err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
662 err |= __put_user(get_thread_wsaved(), &sc->sigc_oswins);
663
664 err |= copy_in_user((u32 __user *)sframep,
665 (u32 __user *)(regs->u_regs[UREG_FP]),
666 sizeof(struct reg_window32));
667
668 set_thread_wsaved(0); /* So process is allowed to execute. */
669 err |= __put_user(signr, &sframep->sig_num);
670 sig_address = NULL;
671 sig_code = 0;
672 if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) {
673 sig_address = info->si_addr;
674 switch (signr) {
675 case SIGSEGV:
676 switch (info->si_code) {
677 case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break;
678 default: sig_code = SUBSIG_PROTECTION; break;
679 }
680 break;
681 case SIGILL:
682 switch (info->si_code) {
683 case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break;
684 case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break;
685 case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break;
686 default: sig_code = SUBSIG_STACK; break;
687 }
688 break;
689 case SIGFPE:
690 switch (info->si_code) {
691 case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break;
692 case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break;
693 case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break;
694 case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break;
695 case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break;
696 case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break;
697 case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break;
698 default: sig_code = SUBSIG_FPERROR; break;
699 }
700 break;
701 case SIGBUS:
702 switch (info->si_code) {
703 case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break;
704 case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break;
705 default: sig_code = SUBSIG_BUSTIMEOUT; break;
706 }
707 break;
708 case SIGEMT:
709 switch (info->si_code) {
710 case EMT_TAGOVF: sig_code = SUBSIG_TAG; break;
711 }
712 break;
713 case SIGSYS:
714 if (info->si_code == (__SI_FAULT|0x100)) {
715 /* See sys_sunos32.c */
716 sig_code = info->si_trapno;
717 break;
718 }
719 default:
720 sig_address = NULL;
721 }
722 }
723 err |= __put_user(ptr_to_compat(sig_address), &sframep->sig_address);
724 err |= __put_user(sig_code, &sframep->sig_code);
725 err |= __put_user(ptr_to_compat(sc), &sframep->sig_scptr);
726 if (err)
727 goto sigsegv;
728
729 regs->u_regs[UREG_FP] = (unsigned long) sframep;
730 regs->tpc = (unsigned long) sa->sa_handler;
731 regs->tnpc = (regs->tpc + 4);
732 if (test_thread_flag(TIF_32BIT)) {
733 regs->tpc &= 0xffffffff;
734 regs->tnpc &= 0xffffffff;
735 }
736 return;
737
738sigsegv:
739 force_sigsegv(signr, current);
740}
741
742
743static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
744{
745 unsigned long *fpregs = current_thread_info()->fpregs;
746 unsigned long fprs;
747 int err = 0;
748
749 fprs = current_thread_info()->fpsaved[0];
750 if (fprs & FPRS_DL)
751 err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
752 (sizeof(unsigned int) * 32));
753 if (fprs & FPRS_DU)
754 err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
755 (sizeof(unsigned int) * 32));
756 err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
757 err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
758 err |= __put_user(fprs, &fpu->si_fprs);
759
760 return err;
761}
762
763static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
764 int signo, sigset_t *oldset)
765{
766 struct new_signal_frame32 __user *sf;
767 int sigframe_size;
768 u32 psr;
769 int i, err;
770 unsigned int seta[_COMPAT_NSIG_WORDS];
771
772 /* 1. Make sure everything is clean */
773 synchronize_user_stack();
774 save_and_clear_fpu();
775
776 sigframe_size = NF_ALIGNEDSZ;
777 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
778 sigframe_size -= sizeof(__siginfo_fpu_t);
779
780 sf = (struct new_signal_frame32 __user *)
781 get_sigframe(&ka->sa, regs, sigframe_size);
782
783 if (invalid_frame_pointer(sf, sigframe_size))
784 goto sigill;
785
786 if (get_thread_wsaved() != 0)
787 goto sigill;
788
789 /* 2. Save the current process state */
790 if (test_thread_flag(TIF_32BIT)) {
791 regs->tpc &= 0xffffffff;
792 regs->tnpc &= 0xffffffff;
793 }
794 err = put_user(regs->tpc, &sf->info.si_regs.pc);
795 err |= __put_user(regs->tnpc, &sf->info.si_regs.npc);
796 err |= __put_user(regs->y, &sf->info.si_regs.y);
797 psr = tstate_to_psr(regs->tstate);
798 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
799 psr |= PSR_EF;
800 err |= __put_user(psr, &sf->info.si_regs.psr);
801 for (i = 0; i < 16; i++)
802 err |= __put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
803 err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
804 err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
805 for (i = 1; i < 16; i++)
806 err |= __put_user(((u32 *)regs->u_regs)[2*i],
807 &sf->v8plus.g_upper[i]);
808 err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
809 &sf->v8plus.asi);
810
811 if (psr & PSR_EF) {
812 err |= save_fpu_state32(regs, &sf->fpu_state);
813 err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
814 } else {
815 err |= __put_user(0, &sf->fpu_save);
816 }
817
818 switch (_NSIG_WORDS) {
819 case 4: seta[7] = (oldset->sig[3] >> 32);
820 seta[6] = oldset->sig[3];
821 case 3: seta[5] = (oldset->sig[2] >> 32);
822 seta[4] = oldset->sig[2];
823 case 2: seta[3] = (oldset->sig[1] >> 32);
824 seta[2] = oldset->sig[1];
825 case 1: seta[1] = (oldset->sig[0] >> 32);
826 seta[0] = oldset->sig[0];
827 }
828 err |= __put_user(seta[0], &sf->info.si_mask);
829 err |= __copy_to_user(sf->extramask, seta + 1,
830 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
831
832 err |= copy_in_user((u32 __user *)sf,
833 (u32 __user *)(regs->u_regs[UREG_FP]),
834 sizeof(struct reg_window32));
835
836 if (err)
837 goto sigsegv;
838
839 /* 3. signal handler back-trampoline and parameters */
840 regs->u_regs[UREG_FP] = (unsigned long) sf;
841 regs->u_regs[UREG_I0] = signo;
842 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
843 regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
844
845 /* 4. signal handler */
846 regs->tpc = (unsigned long) ka->sa.sa_handler;
847 regs->tnpc = (regs->tpc + 4);
848 if (test_thread_flag(TIF_32BIT)) {
849 regs->tpc &= 0xffffffff;
850 regs->tnpc &= 0xffffffff;
851 }
852
853 /* 5. return to kernel instructions */
854 if (ka->ka_restorer) {
855 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
856 } else {
857 /* Flush instruction space. */
858 unsigned long address = ((unsigned long)&(sf->insns[0]));
859 pgd_t *pgdp = pgd_offset(current->mm, address);
860 pud_t *pudp = pud_offset(pgdp, address);
861 pmd_t *pmdp = pmd_offset(pudp, address);
862 pte_t *ptep;
863
864 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
865
866 err = __put_user(0x821020d8, &sf->insns[0]); /*mov __NR_sigreturn, %g1*/
867 err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/
868 if (err)
869 goto sigsegv;
870
871 preempt_disable();
872 ptep = pte_offset_map(pmdp, address);
873 if (pte_present(*ptep)) {
874 unsigned long page = (unsigned long)
875 page_address(pte_page(*ptep));
876
877 __asm__ __volatile__(
878 " membar #StoreStore\n"
879 " flush %0 + %1"
880 : : "r" (page), "r" (address & (PAGE_SIZE - 1))
881 : "memory");
882 }
883 pte_unmap(ptep);
884 preempt_enable();
885 }
886 return;
887
888sigill:
889 do_exit(SIGILL);
890sigsegv:
891 force_sigsegv(signo, current);
892}
893
894/* Setup a Solaris stack frame */
895static void
896setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
897 struct pt_regs *regs, int signr, sigset_t *oldset)
898{
899 svr4_signal_frame_t __user *sfp;
900 svr4_gregset_t __user *gr;
901 svr4_siginfo_t __user *si;
902 svr4_mcontext_t __user *mc;
903 svr4_gwindows_t __user *gw;
904 svr4_ucontext_t __user *uc;
905 svr4_sigset_t setv;
906 unsigned int psr;
907 int i, err;
908
909 synchronize_user_stack();
910 save_and_clear_fpu();
911
912 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
913 sfp = (svr4_signal_frame_t __user *)
914 get_sigframe(sa, regs,
915 sizeof(struct reg_window32) + SVR4_SF_ALIGNED);
916
917 if (invalid_frame_pointer(sfp, sizeof(*sfp)))
918 do_exit(SIGILL);
919
920 /* Start with a clean frame pointer and fill it */
921 err = clear_user(sfp, sizeof(*sfp));
922
923 /* Setup convenience variables */
924 si = &sfp->si;
925 uc = &sfp->uc;
926 gw = &sfp->gw;
927 mc = &uc->mcontext;
928 gr = &mc->greg;
929
930 /* FIXME: where am I supposed to put this?
931 * sc->sigc_onstack = old_status;
932 * anyways, it does not look like it is used for anything at all.
933 */
934 setv.sigbits[0] = oldset->sig[0];
935 setv.sigbits[1] = (oldset->sig[0] >> 32);
936 if (_NSIG_WORDS >= 2) {
937 setv.sigbits[2] = oldset->sig[1];
938 setv.sigbits[3] = (oldset->sig[1] >> 32);
939 err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
940 } else
941 err |= __copy_to_user(&uc->sigmask, &setv,
942 2 * sizeof(unsigned int));
943
944 /* Store registers */
945 if (test_thread_flag(TIF_32BIT)) {
946 regs->tpc &= 0xffffffff;
947 regs->tnpc &= 0xffffffff;
948 }
949 err |= __put_user(regs->tpc, &((*gr)[SVR4_PC]));
950 err |= __put_user(regs->tnpc, &((*gr)[SVR4_NPC]));
951 psr = tstate_to_psr(regs->tstate);
952 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
953 psr |= PSR_EF;
954 err |= __put_user(psr, &((*gr)[SVR4_PSR]));
955 err |= __put_user(regs->y, &((*gr)[SVR4_Y]));
956
957 /* Copy g[1..7] and o[0..7] registers */
958 for (i = 0; i < 7; i++)
959 err |= __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
960 for (i = 0; i < 8; i++)
961 err |= __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
962
963 /* Setup sigaltstack */
964 err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
965 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
966 err |= __put_user(current->sas_ss_size, &uc->stack.size);
967
968 /* Save the currently window file: */
969
970 /* 1. Link sfp->uc->gwins to our windows */
971 err |= __put_user(ptr_to_compat(gw), &mc->gwin);
972
973 /* 2. Number of windows to restore at setcontext (): */
974 err |= __put_user(get_thread_wsaved(), &gw->count);
975
976 /* 3. We just pay attention to the gw->count field on setcontext */
977 set_thread_wsaved(0); /* So process is allowed to execute. */
978
979 /* Setup the signal information. Solaris expects a bunch of
980 * information to be passed to the signal handler, we don't provide
981 * that much currently, should use siginfo.
982 */
983 err |= __put_user(signr, &si->siginfo.signo);
984 err |= __put_user(SVR4_SINOINFO, &si->siginfo.code);
985 if (err)
986 goto sigsegv;
987
988 regs->u_regs[UREG_FP] = (unsigned long) sfp;
989 regs->tpc = (unsigned long) sa->sa_handler;
990 regs->tnpc = (regs->tpc + 4);
991 if (test_thread_flag(TIF_32BIT)) {
992 regs->tpc &= 0xffffffff;
993 regs->tnpc &= 0xffffffff;
994 }
995
996 /* Arguments passed to signal handler */
997 if (regs->u_regs[14]){
998 struct reg_window32 __user *rw = (struct reg_window32 __user *)
999 (regs->u_regs[14] & 0x00000000ffffffffUL);
1000
1001 err |= __put_user(signr, &rw->ins[0]);
1002 err |= __put_user((u64)si, &rw->ins[1]);
1003 err |= __put_user((u64)uc, &rw->ins[2]);
1004 err |= __put_user((u64)sfp, &rw->ins[6]); /* frame pointer */
1005 if (err)
1006 goto sigsegv;
1007
1008 regs->u_regs[UREG_I0] = signr;
1009 regs->u_regs[UREG_I1] = (u32)(u64) si;
1010 regs->u_regs[UREG_I2] = (u32)(u64) uc;
1011 }
1012 return;
1013
1014sigsegv:
1015 force_sigsegv(signr, current);
1016}
1017
1018asmlinkage int
1019svr4_getcontext(svr4_ucontext_t __user *uc, struct pt_regs *regs)
1020{
1021 svr4_gregset_t __user *gr;
1022 svr4_mcontext_t __user *mc;
1023 svr4_sigset_t setv;
1024 int i, err;
1025 u32 psr;
1026
1027 synchronize_user_stack();
1028 save_and_clear_fpu();
1029
1030 if (get_thread_wsaved())
1031 do_exit(SIGSEGV);
1032
1033 err = clear_user(uc, sizeof(*uc));
1034
1035 /* Setup convenience variables */
1036 mc = &uc->mcontext;
1037 gr = &mc->greg;
1038
1039 setv.sigbits[0] = current->blocked.sig[0];
1040 setv.sigbits[1] = (current->blocked.sig[0] >> 32);
1041 if (_NSIG_WORDS >= 2) {
1042 setv.sigbits[2] = current->blocked.sig[1];
1043 setv.sigbits[3] = (current->blocked.sig[1] >> 32);
1044 err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
1045 } else
1046 err |= __copy_to_user(&uc->sigmask, &setv, 2 * sizeof(unsigned));
1047
1048 /* Store registers */
1049 if (test_thread_flag(TIF_32BIT)) {
1050 regs->tpc &= 0xffffffff;
1051 regs->tnpc &= 0xffffffff;
1052 }
1053 err |= __put_user(regs->tpc, &uc->mcontext.greg[SVR4_PC]);
1054 err |= __put_user(regs->tnpc, &uc->mcontext.greg[SVR4_NPC]);
1055
1056 psr = tstate_to_psr(regs->tstate) & ~PSR_EF;
1057 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
1058 psr |= PSR_EF;
1059 err |= __put_user(psr, &uc->mcontext.greg[SVR4_PSR]);
1060
1061 err |= __put_user(regs->y, &uc->mcontext.greg[SVR4_Y]);
1062
1063 /* Copy g[1..7] and o[0..7] registers */
1064 for (i = 0; i < 7; i++)
1065 err |= __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
1066 for (i = 0; i < 8; i++)
1067 err |= __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
1068
1069 /* Setup sigaltstack */
1070 err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
1071 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
1072 err |= __put_user(current->sas_ss_size, &uc->stack.size);
1073
1074 /* The register file is not saved
1075 * we have already stuffed all of it with sync_user_stack
1076 */
1077 return (err ? -EFAULT : 0);
1078}
1079
1080
1081/* Set the context for a svr4 application, this is Solaris way to sigreturn */
1082asmlinkage int svr4_setcontext(svr4_ucontext_t __user *c, struct pt_regs *regs)
1083{
1084 svr4_gregset_t __user *gr;
1085 mm_segment_t old_fs;
1086 u32 pc, npc, psr, u_ss_sp;
1087 sigset_t set;
1088 svr4_sigset_t setv;
1089 int i, err;
1090 stack_t st;
1091
1092 /* Fixme: restore windows, or is this already taken care of in
1093 * svr4_setup_frame when sync_user_windows is done?
1094 */
1095 flush_user_windows();
1096
1097 if (get_thread_wsaved())
1098 goto sigsegv;
1099
1100 if (((unsigned long) c) & 3){
1101 printk("Unaligned structure passed\n");
1102 goto sigsegv;
1103 }
1104
1105 if (!__access_ok(c, sizeof(*c))) {
1106 /* Miguel, add nice debugging msg _here_. ;-) */
1107 goto sigsegv;
1108 }
1109
1110 /* Check for valid PC and nPC */
1111 gr = &c->mcontext.greg;
1112 err = __get_user(pc, &((*gr)[SVR4_PC]));
1113 err |= __get_user(npc, &((*gr)[SVR4_NPC]));
1114 if ((pc | npc) & 3)
1115 goto sigsegv;
1116
1117 /* Retrieve information from passed ucontext */
1118 /* note that nPC is ored a 1, this is used to inform entry.S */
1119 /* that we don't want it to mess with our PC and nPC */
1120
1121 err |= copy_from_user(&setv, &c->sigmask, sizeof(svr4_sigset_t));
1122 set.sig[0] = setv.sigbits[0] | (((long)setv.sigbits[1]) << 32);
1123 if (_NSIG_WORDS >= 2)
1124 set.sig[1] = setv.sigbits[2] | (((long)setv.sigbits[3]) << 32);
1125
1126 err |= __get_user(u_ss_sp, &c->stack.sp);
1127 st.ss_sp = compat_ptr(u_ss_sp);
1128 err |= __get_user(st.ss_flags, &c->stack.flags);
1129 err |= __get_user(st.ss_size, &c->stack.size);
1130 if (err)
1131 goto sigsegv;
1132
1133 /* It is more difficult to avoid calling this function than to
1134 call it and ignore errors. */
1135 old_fs = get_fs();
1136 set_fs(KERNEL_DS);
1137 do_sigaltstack((stack_t __user *) &st, NULL, regs->u_regs[UREG_I6]);
1138 set_fs(old_fs);
1139
1140 sigdelsetmask(&set, ~_BLOCKABLE);
1141 spin_lock_irq(&current->sighand->siglock);
1142 current->blocked = set;
1143 recalc_sigpending();
1144 spin_unlock_irq(&current->sighand->siglock);
1145 regs->tpc = pc;
1146 regs->tnpc = npc | 1;
1147 if (test_thread_flag(TIF_32BIT)) {
1148 regs->tpc &= 0xffffffff;
1149 regs->tnpc &= 0xffffffff;
1150 }
1151 err |= __get_user(regs->y, &((*gr)[SVR4_Y]));
1152 err |= __get_user(psr, &((*gr)[SVR4_PSR]));
1153 regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
1154 regs->tstate |= psr_to_tstate_icc(psr);
1155
1156 /* Restore g[1..7] and o[0..7] registers */
1157 for (i = 0; i < 7; i++)
1158 err |= __get_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
1159 for (i = 0; i < 8; i++)
1160 err |= __get_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
1161 if (err)
1162 goto sigsegv;
1163
1164 return -EINTR;
1165sigsegv:
1166 return -EFAULT;
1167}
1168
1169static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
1170 unsigned long signr, sigset_t *oldset,
1171 siginfo_t *info)
1172{
1173 struct rt_signal_frame32 __user *sf;
1174 int sigframe_size;
1175 u32 psr;
1176 int i, err;
1177 compat_sigset_t seta;
1178
1179 /* 1. Make sure everything is clean */
1180 synchronize_user_stack();
1181 save_and_clear_fpu();
1182
1183 sigframe_size = RT_ALIGNEDSZ;
1184 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
1185 sigframe_size -= sizeof(__siginfo_fpu_t);
1186
1187 sf = (struct rt_signal_frame32 __user *)
1188 get_sigframe(&ka->sa, regs, sigframe_size);
1189
1190 if (invalid_frame_pointer(sf, sigframe_size))
1191 goto sigill;
1192
1193 if (get_thread_wsaved() != 0)
1194 goto sigill;
1195
1196 /* 2. Save the current process state */
1197 if (test_thread_flag(TIF_32BIT)) {
1198 regs->tpc &= 0xffffffff;
1199 regs->tnpc &= 0xffffffff;
1200 }
1201 err = put_user(regs->tpc, &sf->regs.pc);
1202 err |= __put_user(regs->tnpc, &sf->regs.npc);
1203 err |= __put_user(regs->y, &sf->regs.y);
1204 psr = tstate_to_psr(regs->tstate);
1205 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
1206 psr |= PSR_EF;
1207 err |= __put_user(psr, &sf->regs.psr);
1208 for (i = 0; i < 16; i++)
1209 err |= __put_user(regs->u_regs[i], &sf->regs.u_regs[i]);
1210 err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
1211 err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
1212 for (i = 1; i < 16; i++)
1213 err |= __put_user(((u32 *)regs->u_regs)[2*i],
1214 &sf->v8plus.g_upper[i]);
1215 err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
1216 &sf->v8plus.asi);
1217
1218 if (psr & PSR_EF) {
1219 err |= save_fpu_state32(regs, &sf->fpu_state);
1220 err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
1221 } else {
1222 err |= __put_user(0, &sf->fpu_save);
1223 }
1224
1225 /* Update the siginfo structure. */
1226 err |= copy_siginfo_to_user32(&sf->info, info);
1227
1228 /* Setup sigaltstack */
1229 err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
1230 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
1231 err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
1232
1233 switch (_NSIG_WORDS) {
1234 case 4: seta.sig[7] = (oldset->sig[3] >> 32);
1235 seta.sig[6] = oldset->sig[3];
1236 case 3: seta.sig[5] = (oldset->sig[2] >> 32);
1237 seta.sig[4] = oldset->sig[2];
1238 case 2: seta.sig[3] = (oldset->sig[1] >> 32);
1239 seta.sig[2] = oldset->sig[1];
1240 case 1: seta.sig[1] = (oldset->sig[0] >> 32);
1241 seta.sig[0] = oldset->sig[0];
1242 }
1243 err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
1244
1245 err |= copy_in_user((u32 __user *)sf,
1246 (u32 __user *)(regs->u_regs[UREG_FP]),
1247 sizeof(struct reg_window32));
1248 if (err)
1249 goto sigsegv;
1250
1251 /* 3. signal handler back-trampoline and parameters */
1252 regs->u_regs[UREG_FP] = (unsigned long) sf;
1253 regs->u_regs[UREG_I0] = signr;
1254 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
1255 regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
1256
1257 /* 4. signal handler */
1258 regs->tpc = (unsigned long) ka->sa.sa_handler;
1259 regs->tnpc = (regs->tpc + 4);
1260 if (test_thread_flag(TIF_32BIT)) {
1261 regs->tpc &= 0xffffffff;
1262 regs->tnpc &= 0xffffffff;
1263 }
1264
1265 /* 5. return to kernel instructions */
1266 if (ka->ka_restorer)
1267 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
1268 else {
1269 /* Flush instruction space. */
1270 unsigned long address = ((unsigned long)&(sf->insns[0]));
1271 pgd_t *pgdp = pgd_offset(current->mm, address);
1272 pud_t *pudp = pud_offset(pgdp, address);
1273 pmd_t *pmdp = pmd_offset(pudp, address);
1274 pte_t *ptep;
1275
1276 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
1277
1278 /* mov __NR_rt_sigreturn, %g1 */
1279 err |= __put_user(0x82102065, &sf->insns[0]);
1280
1281 /* t 0x10 */
1282 err |= __put_user(0x91d02010, &sf->insns[1]);
1283 if (err)
1284 goto sigsegv;
1285
1286 preempt_disable();
1287 ptep = pte_offset_map(pmdp, address);
1288 if (pte_present(*ptep)) {
1289 unsigned long page = (unsigned long)
1290 page_address(pte_page(*ptep));
1291
1292 __asm__ __volatile__(
1293 " membar #StoreStore\n"
1294 " flush %0 + %1"
1295 : : "r" (page), "r" (address & (PAGE_SIZE - 1))
1296 : "memory");
1297 }
1298 pte_unmap(ptep);
1299 preempt_enable();
1300 }
1301 return;
1302
1303sigill:
1304 do_exit(SIGILL);
1305sigsegv:
1306 force_sigsegv(signr, current);
1307}
1308
1309static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
1310 siginfo_t *info,
1311 sigset_t *oldset, struct pt_regs *regs,
1312 int svr4_signal)
1313{
1314 if (svr4_signal)
1315 setup_svr4_frame32(&ka->sa, regs->tpc, regs->tnpc,
1316 regs, signr, oldset);
1317 else {
1318 if (ka->sa.sa_flags & SA_SIGINFO)
1319 setup_rt_frame32(ka, regs, signr, oldset, info);
1320 else if (test_thread_flag(TIF_NEWSIGNALS))
1321 new_setup_frame32(ka, regs, signr, oldset);
1322 else
1323 setup_frame32(&ka->sa, regs, signr, oldset, info);
1324 }
1325 if (!(ka->sa.sa_flags & SA_NOMASK)) {
1326 spin_lock_irq(&current->sighand->siglock);
1327 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
1328 sigaddset(&current->blocked,signr);
1329 recalc_sigpending();
1330 spin_unlock_irq(&current->sighand->siglock);
1331 }
1332}
1333
1334static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
1335 struct sigaction *sa)
1336{
1337 switch (regs->u_regs[UREG_I0]) {
1338 case ERESTART_RESTARTBLOCK:
1339 case ERESTARTNOHAND:
1340 no_system_call_restart:
1341 regs->u_regs[UREG_I0] = EINTR;
1342 regs->tstate |= TSTATE_ICARRY;
1343 break;
1344 case ERESTARTSYS:
1345 if (!(sa->sa_flags & SA_RESTART))
1346 goto no_system_call_restart;
1347 /* fallthrough */
1348 case ERESTARTNOINTR:
1349 regs->u_regs[UREG_I0] = orig_i0;
1350 regs->tpc -= 4;
1351 regs->tnpc -= 4;
1352 }
1353}
1354
1355/* Note that 'init' is a special process: it doesn't get signals it doesn't
1356 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1357 * mistake.
1358 */
1359int do_signal32(sigset_t *oldset, struct pt_regs * regs,
1360 unsigned long orig_i0, int restart_syscall)
1361{
1362 siginfo_t info;
1363 struct signal_deliver_cookie cookie;
1364 struct k_sigaction ka;
1365 int signr;
1366 int svr4_signal = current->personality == PER_SVR4;
1367
1368 cookie.restart_syscall = restart_syscall;
1369 cookie.orig_i0 = orig_i0;
1370
1371 signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
1372 if (signr > 0) {
1373 if (cookie.restart_syscall)
1374 syscall_restart32(orig_i0, regs, &ka.sa);
1375 handle_signal32(signr, &ka, &info, oldset,
1376 regs, svr4_signal);
1377 return 1;
1378 }
1379 if (cookie.restart_syscall &&
1380 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
1381 regs->u_regs[UREG_I0] == ERESTARTSYS ||
1382 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
1383 /* replay the system call when we are done */
1384 regs->u_regs[UREG_I0] = cookie.orig_i0;
1385 regs->tpc -= 4;
1386 regs->tnpc -= 4;
1387 }
1388 if (cookie.restart_syscall &&
1389 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
1390 regs->u_regs[UREG_G1] = __NR_restart_syscall;
1391 regs->tpc -= 4;
1392 regs->tnpc -= 4;
1393 }
1394 return 0;
1395}
1396
1397struct sigstack32 {
1398 u32 the_stack;
1399 int cur_status;
1400};
1401
1402asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp)
1403{
1404 struct sigstack32 __user *ssptr =
1405 (struct sigstack32 __user *)((unsigned long)(u_ssptr));
1406 struct sigstack32 __user *ossptr =
1407 (struct sigstack32 __user *)((unsigned long)(u_ossptr));
1408 int ret = -EFAULT;
1409
1410 /* First see if old state is wanted. */
1411 if (ossptr) {
1412 if (put_user(current->sas_ss_sp + current->sas_ss_size,
1413 &ossptr->the_stack) ||
1414 __put_user(on_sig_stack(sp), &ossptr->cur_status))
1415 goto out;
1416 }
1417
1418 /* Now see if we want to update the new state. */
1419 if (ssptr) {
1420 u32 ss_sp;
1421
1422 if (get_user(ss_sp, &ssptr->the_stack))
1423 goto out;
1424
1425 /* If the current stack was set with sigaltstack, don't
1426 * swap stacks while we are on it.
1427 */
1428 ret = -EPERM;
1429 if (current->sas_ss_sp && on_sig_stack(sp))
1430 goto out;
1431
1432 /* Since we don't know the extent of the stack, and we don't
1433 * track onstack-ness, but rather calculate it, we must
1434 * presume a size. Ho hum this interface is lossy.
1435 */
1436 current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
1437 current->sas_ss_size = SIGSTKSZ;
1438 }
1439
1440 ret = 0;
1441out:
1442 return ret;
1443}
1444
1445asmlinkage long do_sys32_sigaltstack(u32 ussa, u32 uossa, unsigned long sp)
1446{
1447 stack_t uss, uoss;
1448 u32 u_ss_sp = 0;
1449 int ret;
1450 mm_segment_t old_fs;
1451 stack_t32 __user *uss32 = compat_ptr(ussa);
1452 stack_t32 __user *uoss32 = compat_ptr(uossa);
1453
1454 if (ussa && (get_user(u_ss_sp, &uss32->ss_sp) ||
1455 __get_user(uss.ss_flags, &uss32->ss_flags) ||
1456 __get_user(uss.ss_size, &uss32->ss_size)))
1457 return -EFAULT;
1458 uss.ss_sp = compat_ptr(u_ss_sp);
1459 old_fs = get_fs();
1460 set_fs(KERNEL_DS);
1461 ret = do_sigaltstack(ussa ? (stack_t __user *) &uss : NULL,
1462 uossa ? (stack_t __user *) &uoss : NULL, sp);
1463 set_fs(old_fs);
1464 if (!ret && uossa && (put_user(ptr_to_compat(uoss.ss_sp), &uoss32->ss_sp) ||
1465 __put_user(uoss.ss_flags, &uoss32->ss_flags) ||
1466 __put_user(uoss.ss_size, &uoss32->ss_size)))
1467 return -EFAULT;
1468 return ret;
1469}
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
new file mode 100644
index 000000000000..6dff06a44e76
--- /dev/null
+++ b/arch/sparc64/kernel/smp.c
@@ -0,0 +1,1244 @@
1/* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
13#include <linux/smp_lock.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/seq_file.h>
21#include <linux/cache.h>
22#include <linux/jiffies.h>
23#include <linux/profile.h>
24#include <linux/bootmem.h>
25
26#include <asm/head.h>
27#include <asm/ptrace.h>
28#include <asm/atomic.h>
29#include <asm/tlbflush.h>
30#include <asm/mmu_context.h>
31#include <asm/cpudata.h>
32
33#include <asm/irq.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/oplib.h>
37#include <asm/uaccess.h>
38#include <asm/timer.h>
39#include <asm/starfire.h>
40#include <asm/tlb.h>
41
42extern int linux_num_cpus;
43extern void calibrate_delay(void);
44
45/* Please don't make this stuff initdata!!! --DaveM */
46static unsigned char boot_cpu_id;
47
48cpumask_t cpu_online_map = CPU_MASK_NONE;
49cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
50static cpumask_t smp_commenced_mask;
51static cpumask_t cpu_callout_map;
52
53void smp_info(struct seq_file *m)
54{
55 int i;
56
57 seq_printf(m, "State:\n");
58 for (i = 0; i < NR_CPUS; i++) {
59 if (cpu_online(i))
60 seq_printf(m,
61 "CPU%d:\t\tonline\n", i);
62 }
63}
64
65void smp_bogo(struct seq_file *m)
66{
67 int i;
68
69 for (i = 0; i < NR_CPUS; i++)
70 if (cpu_online(i))
71 seq_printf(m,
72 "Cpu%dBogo\t: %lu.%02lu\n"
73 "Cpu%dClkTck\t: %016lx\n",
74 i, cpu_data(i).udelay_val / (500000/HZ),
75 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
76 i, cpu_data(i).clock_tick);
77}
78
79void __init smp_store_cpu_info(int id)
80{
81 int cpu_node;
82
83 /* multiplier and counter set by
84 smp_setup_percpu_timer() */
85 cpu_data(id).udelay_val = loops_per_jiffy;
86
87 cpu_find_by_mid(id, &cpu_node);
88 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
89 "clock-frequency", 0);
90
91 cpu_data(id).pgcache_size = 0;
92 cpu_data(id).pte_cache[0] = NULL;
93 cpu_data(id).pte_cache[1] = NULL;
94 cpu_data(id).pgd_cache = NULL;
95 cpu_data(id).idle_volume = 1;
96}
97
98static void smp_setup_percpu_timer(void);
99
100static volatile unsigned long callin_flag = 0;
101
102extern void inherit_locked_prom_mappings(int save_p);
103
104static inline void cpu_setup_percpu_base(unsigned long cpu_id)
105{
106 __asm__ __volatile__("mov %0, %%g5\n\t"
107 "stxa %0, [%1] %2\n\t"
108 "membar #Sync"
109 : /* no outputs */
110 : "r" (__per_cpu_offset(cpu_id)),
111 "r" (TSB_REG), "i" (ASI_IMMU));
112}
113
114void __init smp_callin(void)
115{
116 int cpuid = hard_smp_processor_id();
117
118 inherit_locked_prom_mappings(0);
119
120 __flush_tlb_all();
121
122 cpu_setup_percpu_base(cpuid);
123
124 smp_setup_percpu_timer();
125
126 local_irq_enable();
127
128 calibrate_delay();
129 smp_store_cpu_info(cpuid);
130 callin_flag = 1;
131 __asm__ __volatile__("membar #Sync\n\t"
132 "flush %%g6" : : : "memory");
133
134 /* Clear this or we will die instantly when we
135 * schedule back to this idler...
136 */
137 clear_thread_flag(TIF_NEWCHILD);
138
139 /* Attach to the address space of init_task. */
140 atomic_inc(&init_mm.mm_count);
141 current->active_mm = &init_mm;
142
143 while (!cpu_isset(cpuid, smp_commenced_mask))
144 membar("#LoadLoad");
145
146 cpu_set(cpuid, cpu_online_map);
147}
148
149void cpu_panic(void)
150{
151 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
152 panic("SMP bolixed\n");
153}
154
155static unsigned long current_tick_offset;
156
157/* This tick register synchronization scheme is taken entirely from
158 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
159 *
160 * The only change I've made is to rework it so that the master
161 * initiates the synchonization instead of the slave. -DaveM
162 */
163
164#define MASTER 0
165#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
166
167#define NUM_ROUNDS 64 /* magic value */
168#define NUM_ITERS 5 /* likewise */
169
170static DEFINE_SPINLOCK(itc_sync_lock);
171static unsigned long go[SLAVE + 1];
172
173#define DEBUG_TICK_SYNC 0
174
175static inline long get_delta (long *rt, long *master)
176{
177 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
178 unsigned long tcenter, t0, t1, tm;
179 unsigned long i;
180
181 for (i = 0; i < NUM_ITERS; i++) {
182 t0 = tick_ops->get_tick();
183 go[MASTER] = 1;
184 membar("#StoreLoad");
185 while (!(tm = go[SLAVE]))
186 membar("#LoadLoad");
187 go[SLAVE] = 0;
188 membar("#StoreStore");
189 t1 = tick_ops->get_tick();
190
191 if (t1 - t0 < best_t1 - best_t0)
192 best_t0 = t0, best_t1 = t1, best_tm = tm;
193 }
194
195 *rt = best_t1 - best_t0;
196 *master = best_tm - best_t0;
197
198 /* average best_t0 and best_t1 without overflow: */
199 tcenter = (best_t0/2 + best_t1/2);
200 if (best_t0 % 2 + best_t1 % 2 == 2)
201 tcenter++;
202 return tcenter - best_tm;
203}
204
205void smp_synchronize_tick_client(void)
206{
207 long i, delta, adj, adjust_latency = 0, done = 0;
208 unsigned long flags, rt, master_time_stamp, bound;
209#if DEBUG_TICK_SYNC
210 struct {
211 long rt; /* roundtrip time */
212 long master; /* master's timestamp */
213 long diff; /* difference between midpoint and master's timestamp */
214 long lat; /* estimate of itc adjustment latency */
215 } t[NUM_ROUNDS];
216#endif
217
218 go[MASTER] = 1;
219
220 while (go[MASTER])
221 membar("#LoadLoad");
222
223 local_irq_save(flags);
224 {
225 for (i = 0; i < NUM_ROUNDS; i++) {
226 delta = get_delta(&rt, &master_time_stamp);
227 if (delta == 0) {
228 done = 1; /* let's lock on to this... */
229 bound = rt;
230 }
231
232 if (!done) {
233 if (i > 0) {
234 adjust_latency += -delta;
235 adj = -delta + adjust_latency/4;
236 } else
237 adj = -delta;
238
239 tick_ops->add_tick(adj, current_tick_offset);
240 }
241#if DEBUG_TICK_SYNC
242 t[i].rt = rt;
243 t[i].master = master_time_stamp;
244 t[i].diff = delta;
245 t[i].lat = adjust_latency/4;
246#endif
247 }
248 }
249 local_irq_restore(flags);
250
251#if DEBUG_TICK_SYNC
252 for (i = 0; i < NUM_ROUNDS; i++)
253 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
254 t[i].rt, t[i].master, t[i].diff, t[i].lat);
255#endif
256
257 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
258 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
259}
260
261static void smp_start_sync_tick_client(int cpu);
262
263static void smp_synchronize_one_tick(int cpu)
264{
265 unsigned long flags, i;
266
267 go[MASTER] = 0;
268
269 smp_start_sync_tick_client(cpu);
270
271 /* wait for client to be ready */
272 while (!go[MASTER])
273 membar("#LoadLoad");
274
275 /* now let the client proceed into his loop */
276 go[MASTER] = 0;
277 membar("#StoreLoad");
278
279 spin_lock_irqsave(&itc_sync_lock, flags);
280 {
281 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
282 while (!go[MASTER])
283 membar("#LoadLoad");
284 go[MASTER] = 0;
285 membar("#StoreStore");
286 go[SLAVE] = tick_ops->get_tick();
287 membar("#StoreLoad");
288 }
289 }
290 spin_unlock_irqrestore(&itc_sync_lock, flags);
291}
292
293extern unsigned long sparc64_cpu_startup;
294
295/* The OBP cpu startup callback truncates the 3rd arg cookie to
296 * 32-bits (I think) so to be safe we have it read the pointer
297 * contained here so we work on >4GB machines. -DaveM
298 */
299static struct thread_info *cpu_new_thread = NULL;
300
301static int __devinit smp_boot_one_cpu(unsigned int cpu)
302{
303 unsigned long entry =
304 (unsigned long)(&sparc64_cpu_startup);
305 unsigned long cookie =
306 (unsigned long)(&cpu_new_thread);
307 struct task_struct *p;
308 int timeout, ret, cpu_node;
309
310 p = fork_idle(cpu);
311 callin_flag = 0;
312 cpu_new_thread = p->thread_info;
313 cpu_set(cpu, cpu_callout_map);
314
315 cpu_find_by_mid(cpu, &cpu_node);
316 prom_startcpu(cpu_node, entry, cookie);
317
318 for (timeout = 0; timeout < 5000000; timeout++) {
319 if (callin_flag)
320 break;
321 udelay(100);
322 }
323 if (callin_flag) {
324 ret = 0;
325 } else {
326 printk("Processor %d is stuck.\n", cpu);
327 cpu_clear(cpu, cpu_callout_map);
328 ret = -ENODEV;
329 }
330 cpu_new_thread = NULL;
331
332 return ret;
333}
334
335static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
336{
337 u64 result, target;
338 int stuck, tmp;
339
340 if (this_is_starfire) {
341 /* map to real upaid */
342 cpu = (((cpu & 0x3c) << 1) |
343 ((cpu & 0x40) >> 4) |
344 (cpu & 0x3));
345 }
346
347 target = (cpu << 14) | 0x70;
348again:
349 /* Ok, this is the real Spitfire Errata #54.
350 * One must read back from a UDB internal register
351 * after writes to the UDB interrupt dispatch, but
352 * before the membar Sync for that write.
353 * So we use the high UDB control register (ASI 0x7f,
354 * ADDR 0x20) for the dummy read. -DaveM
355 */
356 tmp = 0x40;
357 __asm__ __volatile__(
358 "wrpr %1, %2, %%pstate\n\t"
359 "stxa %4, [%0] %3\n\t"
360 "stxa %5, [%0+%8] %3\n\t"
361 "add %0, %8, %0\n\t"
362 "stxa %6, [%0+%8] %3\n\t"
363 "membar #Sync\n\t"
364 "stxa %%g0, [%7] %3\n\t"
365 "membar #Sync\n\t"
366 "mov 0x20, %%g1\n\t"
367 "ldxa [%%g1] 0x7f, %%g0\n\t"
368 "membar #Sync"
369 : "=r" (tmp)
370 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
371 "r" (data0), "r" (data1), "r" (data2), "r" (target),
372 "r" (0x10), "0" (tmp)
373 : "g1");
374
375 /* NOTE: PSTATE_IE is still clear. */
376 stuck = 100000;
377 do {
378 __asm__ __volatile__("ldxa [%%g0] %1, %0"
379 : "=r" (result)
380 : "i" (ASI_INTR_DISPATCH_STAT));
381 if (result == 0) {
382 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
383 : : "r" (pstate));
384 return;
385 }
386 stuck -= 1;
387 if (stuck == 0)
388 break;
389 } while (result & 0x1);
390 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
391 : : "r" (pstate));
392 if (stuck == 0) {
393 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
394 smp_processor_id(), result);
395 } else {
396 udelay(2);
397 goto again;
398 }
399}
400
401static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
402{
403 u64 pstate;
404 int i;
405
406 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
407 for_each_cpu_mask(i, mask)
408 spitfire_xcall_helper(data0, data1, data2, pstate, i);
409}
410
411/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
412 * packet, but we have no use for that. However we do take advantage of
413 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
414 */
415static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
416{
417 u64 pstate, ver;
418 int nack_busy_id, is_jalapeno;
419
420 if (cpus_empty(mask))
421 return;
422
423 /* Unfortunately, someone at Sun had the brilliant idea to make the
424 * busy/nack fields hard-coded by ITID number for this Ultra-III
425 * derivative processor.
426 */
427 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
428 is_jalapeno = ((ver >> 32) == 0x003e0016);
429
430 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
431
432retry:
433 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
434 : : "r" (pstate), "i" (PSTATE_IE));
435
436 /* Setup the dispatch data registers. */
437 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
438 "stxa %1, [%4] %6\n\t"
439 "stxa %2, [%5] %6\n\t"
440 "membar #Sync\n\t"
441 : /* no outputs */
442 : "r" (data0), "r" (data1), "r" (data2),
443 "r" (0x40), "r" (0x50), "r" (0x60),
444 "i" (ASI_INTR_W));
445
446 nack_busy_id = 0;
447 {
448 int i;
449
450 for_each_cpu_mask(i, mask) {
451 u64 target = (i << 14) | 0x70;
452
453 if (!is_jalapeno)
454 target |= (nack_busy_id << 24);
455 __asm__ __volatile__(
456 "stxa %%g0, [%0] %1\n\t"
457 "membar #Sync\n\t"
458 : /* no outputs */
459 : "r" (target), "i" (ASI_INTR_W));
460 nack_busy_id++;
461 }
462 }
463
464 /* Now, poll for completion. */
465 {
466 u64 dispatch_stat;
467 long stuck;
468
469 stuck = 100000 * nack_busy_id;
470 do {
471 __asm__ __volatile__("ldxa [%%g0] %1, %0"
472 : "=r" (dispatch_stat)
473 : "i" (ASI_INTR_DISPATCH_STAT));
474 if (dispatch_stat == 0UL) {
475 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
476 : : "r" (pstate));
477 return;
478 }
479 if (!--stuck)
480 break;
481 } while (dispatch_stat & 0x5555555555555555UL);
482
483 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
484 : : "r" (pstate));
485
486 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
487 /* Busy bits will not clear, continue instead
488 * of freezing up on this cpu.
489 */
490 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
491 smp_processor_id(), dispatch_stat);
492 } else {
493 int i, this_busy_nack = 0;
494
495 /* Delay some random time with interrupts enabled
496 * to prevent deadlock.
497 */
498 udelay(2 * nack_busy_id);
499
500 /* Clear out the mask bits for cpus which did not
501 * NACK us.
502 */
503 for_each_cpu_mask(i, mask) {
504 u64 check_mask;
505
506 if (is_jalapeno)
507 check_mask = (0x2UL << (2*i));
508 else
509 check_mask = (0x2UL <<
510 this_busy_nack);
511 if ((dispatch_stat & check_mask) == 0)
512 cpu_clear(i, mask);
513 this_busy_nack += 2;
514 }
515
516 goto retry;
517 }
518 }
519}
520
521/* Send cross call to all processors mentioned in MASK
522 * except self.
523 */
524static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
525{
526 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
527 int this_cpu = get_cpu();
528
529 cpus_and(mask, mask, cpu_online_map);
530 cpu_clear(this_cpu, mask);
531
532 if (tlb_type == spitfire)
533 spitfire_xcall_deliver(data0, data1, data2, mask);
534 else
535 cheetah_xcall_deliver(data0, data1, data2, mask);
536 /* NOTE: Caller runs local copy on master. */
537
538 put_cpu();
539}
540
541extern unsigned long xcall_sync_tick;
542
543static void smp_start_sync_tick_client(int cpu)
544{
545 cpumask_t mask = cpumask_of_cpu(cpu);
546
547 smp_cross_call_masked(&xcall_sync_tick,
548 0, 0, 0, mask);
549}
550
551/* Send cross call to all processors except self. */
552#define smp_cross_call(func, ctx, data1, data2) \
553 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
554
555struct call_data_struct {
556 void (*func) (void *info);
557 void *info;
558 atomic_t finished;
559 int wait;
560};
561
562static DEFINE_SPINLOCK(call_lock);
563static struct call_data_struct *call_data;
564
565extern unsigned long xcall_call_function;
566
567/*
568 * You must not call this function with disabled interrupts or from a
569 * hardware interrupt handler or from a bottom half handler.
570 */
571int smp_call_function(void (*func)(void *info), void *info,
572 int nonatomic, int wait)
573{
574 struct call_data_struct data;
575 int cpus = num_online_cpus() - 1;
576 long timeout;
577
578 if (!cpus)
579 return 0;
580
581 /* Can deadlock when called with interrupts disabled */
582 WARN_ON(irqs_disabled());
583
584 data.func = func;
585 data.info = info;
586 atomic_set(&data.finished, 0);
587 data.wait = wait;
588
589 spin_lock(&call_lock);
590
591 call_data = &data;
592
593 smp_cross_call(&xcall_call_function, 0, 0, 0);
594
595 /*
596 * Wait for other cpus to complete function or at
597 * least snap the call data.
598 */
599 timeout = 1000000;
600 while (atomic_read(&data.finished) != cpus) {
601 if (--timeout <= 0)
602 goto out_timeout;
603 barrier();
604 udelay(1);
605 }
606
607 spin_unlock(&call_lock);
608
609 return 0;
610
611out_timeout:
612 spin_unlock(&call_lock);
613 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
614 (long) num_online_cpus() - 1L,
615 (long) atomic_read(&data.finished));
616 return 0;
617}
618
619void smp_call_function_client(int irq, struct pt_regs *regs)
620{
621 void (*func) (void *info) = call_data->func;
622 void *info = call_data->info;
623
624 clear_softint(1 << irq);
625 if (call_data->wait) {
626 /* let initiator proceed only after completion */
627 func(info);
628 atomic_inc(&call_data->finished);
629 } else {
630 /* let initiator proceed after getting data */
631 atomic_inc(&call_data->finished);
632 func(info);
633 }
634}
635
636extern unsigned long xcall_flush_tlb_mm;
637extern unsigned long xcall_flush_tlb_pending;
638extern unsigned long xcall_flush_tlb_kernel_range;
639extern unsigned long xcall_flush_tlb_all_spitfire;
640extern unsigned long xcall_flush_tlb_all_cheetah;
641extern unsigned long xcall_report_regs;
642extern unsigned long xcall_receive_signal;
643
644#ifdef DCACHE_ALIASING_POSSIBLE
645extern unsigned long xcall_flush_dcache_page_cheetah;
646#endif
647extern unsigned long xcall_flush_dcache_page_spitfire;
648
649#ifdef CONFIG_DEBUG_DCFLUSH
650extern atomic_t dcpage_flushes;
651extern atomic_t dcpage_flushes_xcall;
652#endif
653
654static __inline__ void __local_flush_dcache_page(struct page *page)
655{
656#ifdef DCACHE_ALIASING_POSSIBLE
657 __flush_dcache_page(page_address(page),
658 ((tlb_type == spitfire) &&
659 page_mapping(page) != NULL));
660#else
661 if (page_mapping(page) != NULL &&
662 tlb_type == spitfire)
663 __flush_icache_page(__pa(page_address(page)));
664#endif
665}
666
667void smp_flush_dcache_page_impl(struct page *page, int cpu)
668{
669 cpumask_t mask = cpumask_of_cpu(cpu);
670 int this_cpu = get_cpu();
671
672#ifdef CONFIG_DEBUG_DCFLUSH
673 atomic_inc(&dcpage_flushes);
674#endif
675 if (cpu == this_cpu) {
676 __local_flush_dcache_page(page);
677 } else if (cpu_online(cpu)) {
678 void *pg_addr = page_address(page);
679 u64 data0;
680
681 if (tlb_type == spitfire) {
682 data0 =
683 ((u64)&xcall_flush_dcache_page_spitfire);
684 if (page_mapping(page) != NULL)
685 data0 |= ((u64)1 << 32);
686 spitfire_xcall_deliver(data0,
687 __pa(pg_addr),
688 (u64) pg_addr,
689 mask);
690 } else {
691#ifdef DCACHE_ALIASING_POSSIBLE
692 data0 =
693 ((u64)&xcall_flush_dcache_page_cheetah);
694 cheetah_xcall_deliver(data0,
695 __pa(pg_addr),
696 0, mask);
697#endif
698 }
699#ifdef CONFIG_DEBUG_DCFLUSH
700 atomic_inc(&dcpage_flushes_xcall);
701#endif
702 }
703
704 put_cpu();
705}
706
707void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
708{
709 void *pg_addr = page_address(page);
710 cpumask_t mask = cpu_online_map;
711 u64 data0;
712 int this_cpu = get_cpu();
713
714 cpu_clear(this_cpu, mask);
715
716#ifdef CONFIG_DEBUG_DCFLUSH
717 atomic_inc(&dcpage_flushes);
718#endif
719 if (cpus_empty(mask))
720 goto flush_self;
721 if (tlb_type == spitfire) {
722 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
723 if (page_mapping(page) != NULL)
724 data0 |= ((u64)1 << 32);
725 spitfire_xcall_deliver(data0,
726 __pa(pg_addr),
727 (u64) pg_addr,
728 mask);
729 } else {
730#ifdef DCACHE_ALIASING_POSSIBLE
731 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
732 cheetah_xcall_deliver(data0,
733 __pa(pg_addr),
734 0, mask);
735#endif
736 }
737#ifdef CONFIG_DEBUG_DCFLUSH
738 atomic_inc(&dcpage_flushes_xcall);
739#endif
740 flush_self:
741 __local_flush_dcache_page(page);
742
743 put_cpu();
744}
745
746void smp_receive_signal(int cpu)
747{
748 cpumask_t mask = cpumask_of_cpu(cpu);
749
750 if (cpu_online(cpu)) {
751 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
752
753 if (tlb_type == spitfire)
754 spitfire_xcall_deliver(data0, 0, 0, mask);
755 else
756 cheetah_xcall_deliver(data0, 0, 0, mask);
757 }
758}
759
760void smp_receive_signal_client(int irq, struct pt_regs *regs)
761{
762 /* Just return, rtrap takes care of the rest. */
763 clear_softint(1 << irq);
764}
765
766void smp_report_regs(void)
767{
768 smp_cross_call(&xcall_report_regs, 0, 0, 0);
769}
770
771void smp_flush_tlb_all(void)
772{
773 if (tlb_type == spitfire)
774 smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
775 else
776 smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
777 __flush_tlb_all();
778}
779
780/* We know that the window frames of the user have been flushed
781 * to the stack before we get here because all callers of us
782 * are flush_tlb_*() routines, and these run after flush_cache_*()
783 * which performs the flushw.
784 *
785 * The SMP TLB coherency scheme we use works as follows:
786 *
787 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
788 * space has (potentially) executed on, this is the heuristic
789 * we use to avoid doing cross calls.
790 *
791 * Also, for flushing from kswapd and also for clones, we
792 * use cpu_vm_mask as the list of cpus to make run the TLB.
793 *
794 * 2) TLB context numbers are shared globally across all processors
795 * in the system, this allows us to play several games to avoid
796 * cross calls.
797 *
798 * One invariant is that when a cpu switches to a process, and
799 * that processes tsk->active_mm->cpu_vm_mask does not have the
800 * current cpu's bit set, that tlb context is flushed locally.
801 *
802 * If the address space is non-shared (ie. mm->count == 1) we avoid
803 * cross calls when we want to flush the currently running process's
804 * tlb state. This is done by clearing all cpu bits except the current
805 * processor's in current->active_mm->cpu_vm_mask and performing the
806 * flush locally only. This will force any subsequent cpus which run
807 * this task to flush the context from the local tlb if the process
808 * migrates to another cpu (again).
809 *
810 * 3) For shared address spaces (threads) and swapping we bite the
811 * bullet for most cases and perform the cross call (but only to
812 * the cpus listed in cpu_vm_mask).
813 *
814 * The performance gain from "optimizing" away the cross call for threads is
815 * questionable (in theory the big win for threads is the massive sharing of
816 * address space state across processors).
817 */
818void smp_flush_tlb_mm(struct mm_struct *mm)
819{
820 /*
821 * This code is called from two places, dup_mmap and exit_mmap. In the
822 * former case, we really need a flush. In the later case, the callers
823 * are single threaded exec_mmap (really need a flush), multithreaded
824 * exec_mmap case (do not need to flush, since the caller gets a new
825 * context via activate_mm), and all other callers of mmput() whence
826 * the flush can be optimized since the associated threads are dead and
827 * the mm is being torn down (__exit_mm and other mmput callers) or the
828 * owning thread is dissociating itself from the mm. The
829 * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
830 * for single thread exec and dup_mmap cases. An alternate check might
831 * have been (current->mm != mm).
832 * Kanoj Sarcar
833 */
834 if (atomic_read(&mm->mm_users) == 0)
835 return;
836
837 {
838 u32 ctx = CTX_HWBITS(mm->context);
839 int cpu = get_cpu();
840
841 if (atomic_read(&mm->mm_users) == 1) {
842 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
843 goto local_flush_and_out;
844 }
845
846 smp_cross_call_masked(&xcall_flush_tlb_mm,
847 ctx, 0, 0,
848 mm->cpu_vm_mask);
849
850 local_flush_and_out:
851 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
852
853 put_cpu();
854 }
855}
856
857void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
858{
859 u32 ctx = CTX_HWBITS(mm->context);
860 int cpu = get_cpu();
861
862 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
863 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
864 goto local_flush_and_out;
865 } else {
866 /* This optimization is not valid. Normally
867 * we will be holding the page_table_lock, but
868 * there is an exception which is copy_page_range()
869 * when forking. The lock is held during the individual
870 * page table updates in the parent, but not at the
871 * top level, which is where we are invoked.
872 */
873 if (0) {
874 cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
875
876 /* By virtue of running under the mm->page_table_lock,
877 * and mmu_context.h:switch_mm doing the same, the
878 * following operation is safe.
879 */
880 if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
881 goto local_flush_and_out;
882 }
883 }
884
885 smp_cross_call_masked(&xcall_flush_tlb_pending,
886 ctx, nr, (unsigned long) vaddrs,
887 mm->cpu_vm_mask);
888
889local_flush_and_out:
890 __flush_tlb_pending(ctx, nr, vaddrs);
891
892 put_cpu();
893}
894
895void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
896{
897 start &= PAGE_MASK;
898 end = PAGE_ALIGN(end);
899 if (start != end) {
900 smp_cross_call(&xcall_flush_tlb_kernel_range,
901 0, start, end);
902
903 __flush_tlb_kernel_range(start, end);
904 }
905}
906
907/* CPU capture. */
908/* #define CAPTURE_DEBUG */
909extern unsigned long xcall_capture;
910
911static atomic_t smp_capture_depth = ATOMIC_INIT(0);
912static atomic_t smp_capture_registry = ATOMIC_INIT(0);
913static unsigned long penguins_are_doing_time;
914
915void smp_capture(void)
916{
917 int result = atomic_add_ret(1, &smp_capture_depth);
918
919 if (result == 1) {
920 int ncpus = num_online_cpus();
921
922#ifdef CAPTURE_DEBUG
923 printk("CPU[%d]: Sending penguins to jail...",
924 smp_processor_id());
925#endif
926 penguins_are_doing_time = 1;
927 membar("#StoreStore | #LoadStore");
928 atomic_inc(&smp_capture_registry);
929 smp_cross_call(&xcall_capture, 0, 0, 0);
930 while (atomic_read(&smp_capture_registry) != ncpus)
931 membar("#LoadLoad");
932#ifdef CAPTURE_DEBUG
933 printk("done\n");
934#endif
935 }
936}
937
938void smp_release(void)
939{
940 if (atomic_dec_and_test(&smp_capture_depth)) {
941#ifdef CAPTURE_DEBUG
942 printk("CPU[%d]: Giving pardon to "
943 "imprisoned penguins\n",
944 smp_processor_id());
945#endif
946 penguins_are_doing_time = 0;
947 membar("#StoreStore | #StoreLoad");
948 atomic_dec(&smp_capture_registry);
949 }
950}
951
952/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
953 * can service tlb flush xcalls...
954 */
955extern void prom_world(int);
956extern void save_alternate_globals(unsigned long *);
957extern void restore_alternate_globals(unsigned long *);
958void smp_penguin_jailcell(int irq, struct pt_regs *regs)
959{
960 unsigned long global_save[24];
961
962 clear_softint(1 << irq);
963
964 preempt_disable();
965
966 __asm__ __volatile__("flushw");
967 save_alternate_globals(global_save);
968 prom_world(1);
969 atomic_inc(&smp_capture_registry);
970 membar("#StoreLoad | #StoreStore");
971 while (penguins_are_doing_time)
972 membar("#LoadLoad");
973 restore_alternate_globals(global_save);
974 atomic_dec(&smp_capture_registry);
975 prom_world(0);
976
977 preempt_enable();
978}
979
980extern unsigned long xcall_promstop;
981
982void smp_promstop_others(void)
983{
984 smp_cross_call(&xcall_promstop, 0, 0, 0);
985}
986
987#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
988#define prof_counter(__cpu) cpu_data(__cpu).counter
989
990void smp_percpu_timer_interrupt(struct pt_regs *regs)
991{
992 unsigned long compare, tick, pstate;
993 int cpu = smp_processor_id();
994 int user = user_mode(regs);
995
996 /*
997 * Check for level 14 softint.
998 */
999 {
1000 unsigned long tick_mask = tick_ops->softint_mask;
1001
1002 if (!(get_softint() & tick_mask)) {
1003 extern void handler_irq(int, struct pt_regs *);
1004
1005 handler_irq(14, regs);
1006 return;
1007 }
1008 clear_softint(tick_mask);
1009 }
1010
1011 do {
1012 profile_tick(CPU_PROFILING, regs);
1013 if (!--prof_counter(cpu)) {
1014 irq_enter();
1015
1016 if (cpu == boot_cpu_id) {
1017 kstat_this_cpu.irqs[0]++;
1018 timer_tick_interrupt(regs);
1019 }
1020
1021 update_process_times(user);
1022
1023 irq_exit();
1024
1025 prof_counter(cpu) = prof_multiplier(cpu);
1026 }
1027
1028 /* Guarantee that the following sequences execute
1029 * uninterrupted.
1030 */
1031 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1032 "wrpr %0, %1, %%pstate"
1033 : "=r" (pstate)
1034 : "i" (PSTATE_IE));
1035
1036 compare = tick_ops->add_compare(current_tick_offset);
1037 tick = tick_ops->get_tick();
1038
1039 /* Restore PSTATE_IE. */
1040 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1041 : /* no outputs */
1042 : "r" (pstate));
1043 } while (time_after_eq(tick, compare));
1044}
1045
1046static void __init smp_setup_percpu_timer(void)
1047{
1048 int cpu = smp_processor_id();
1049 unsigned long pstate;
1050
1051 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1052
1053 /* Guarantee that the following sequences execute
1054 * uninterrupted.
1055 */
1056 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1057 "wrpr %0, %1, %%pstate"
1058 : "=r" (pstate)
1059 : "i" (PSTATE_IE));
1060
1061 tick_ops->init_tick(current_tick_offset);
1062
1063 /* Restore PSTATE_IE. */
1064 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1065 : /* no outputs */
1066 : "r" (pstate));
1067}
1068
1069void __init smp_tick_init(void)
1070{
1071 boot_cpu_id = hard_smp_processor_id();
1072 current_tick_offset = timer_tick_offset;
1073
1074 cpu_set(boot_cpu_id, cpu_online_map);
1075 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1076}
1077
1078/* /proc/profile writes can call this, don't __init it please. */
1079static DEFINE_SPINLOCK(prof_setup_lock);
1080
1081int setup_profiling_timer(unsigned int multiplier)
1082{
1083 unsigned long flags;
1084 int i;
1085
1086 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1087 return -EINVAL;
1088
1089 spin_lock_irqsave(&prof_setup_lock, flags);
1090 for (i = 0; i < NR_CPUS; i++)
1091 prof_multiplier(i) = multiplier;
1092 current_tick_offset = (timer_tick_offset / multiplier);
1093 spin_unlock_irqrestore(&prof_setup_lock, flags);
1094
1095 return 0;
1096}
1097
1098void __init smp_prepare_cpus(unsigned int max_cpus)
1099{
1100 int instance, mid;
1101
1102 instance = 0;
1103 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1104 if (mid < max_cpus)
1105 cpu_set(mid, phys_cpu_present_map);
1106 instance++;
1107 }
1108
1109 if (num_possible_cpus() > max_cpus) {
1110 instance = 0;
1111 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1112 if (mid != boot_cpu_id) {
1113 cpu_clear(mid, phys_cpu_present_map);
1114 if (num_possible_cpus() <= max_cpus)
1115 break;
1116 }
1117 instance++;
1118 }
1119 }
1120
1121 smp_store_cpu_info(boot_cpu_id);
1122}
1123
1124void __devinit smp_prepare_boot_cpu(void)
1125{
1126 if (hard_smp_processor_id() >= NR_CPUS) {
1127 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1128 prom_halt();
1129 }
1130
1131 current_thread_info()->cpu = hard_smp_processor_id();
1132
1133 cpu_set(smp_processor_id(), cpu_online_map);
1134 cpu_set(smp_processor_id(), phys_cpu_present_map);
1135}
1136
1137int __devinit __cpu_up(unsigned int cpu)
1138{
1139 int ret = smp_boot_one_cpu(cpu);
1140
1141 if (!ret) {
1142 cpu_set(cpu, smp_commenced_mask);
1143 while (!cpu_isset(cpu, cpu_online_map))
1144 mb();
1145 if (!cpu_isset(cpu, cpu_online_map)) {
1146 ret = -ENODEV;
1147 } else {
1148 smp_synchronize_one_tick(cpu);
1149 }
1150 }
1151 return ret;
1152}
1153
1154void __init smp_cpus_done(unsigned int max_cpus)
1155{
1156 unsigned long bogosum = 0;
1157 int i;
1158
1159 for (i = 0; i < NR_CPUS; i++) {
1160 if (cpu_online(i))
1161 bogosum += cpu_data(i).udelay_val;
1162 }
1163 printk("Total of %ld processors activated "
1164 "(%lu.%02lu BogoMIPS).\n",
1165 (long) num_online_cpus(),
1166 bogosum/(500000/HZ),
1167 (bogosum/(5000/HZ))%100);
1168}
1169
1170/* This needn't do anything as we do not sleep the cpu
1171 * inside of the idler task, so an interrupt is not needed
1172 * to get a clean fast response.
1173 *
1174 * XXX Reverify this assumption... -DaveM
1175 *
1176 * Addendum: We do want it to do something for the signal
1177 * delivery case, we detect that by just seeing
1178 * if we are trying to send this to an idler or not.
1179 */
1180void smp_send_reschedule(int cpu)
1181{
1182 if (cpu_data(cpu).idle_volume == 0)
1183 smp_receive_signal(cpu);
1184}
1185
1186/* This is a nop because we capture all other cpus
1187 * anyways when making the PROM active.
1188 */
1189void smp_send_stop(void)
1190{
1191}
1192
1193unsigned long __per_cpu_base;
1194unsigned long __per_cpu_shift;
1195
1196EXPORT_SYMBOL(__per_cpu_base);
1197EXPORT_SYMBOL(__per_cpu_shift);
1198
1199void __init setup_per_cpu_areas(void)
1200{
1201 unsigned long goal, size, i;
1202 char *ptr;
1203 /* Created by linker magic */
1204 extern char __per_cpu_start[], __per_cpu_end[];
1205
1206 /* Copy section for each CPU (we discard the original) */
1207 goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
1208
1209#ifdef CONFIG_MODULES
1210 if (goal < PERCPU_ENOUGH_ROOM)
1211 goal = PERCPU_ENOUGH_ROOM;
1212#endif
1213 __per_cpu_shift = 0;
1214 for (size = 1UL; size < goal; size <<= 1UL)
1215 __per_cpu_shift++;
1216
1217 /* Make sure the resulting __per_cpu_base value
1218 * will fit in the 43-bit sign extended IMMU
1219 * TSB register.
1220 */
1221 ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE,
1222 (unsigned long) __per_cpu_start);
1223
1224 __per_cpu_base = ptr - __per_cpu_start;
1225
1226 if ((__per_cpu_shift < PAGE_SHIFT) ||
1227 (__per_cpu_base & ~PAGE_MASK) ||
1228 (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) {
1229 prom_printf("PER_CPU: Invalid layout, "
1230 "ptr[%p] shift[%lx] base[%lx]\n",
1231 ptr, __per_cpu_shift, __per_cpu_base);
1232 prom_halt();
1233 }
1234
1235 for (i = 0; i < NR_CPUS; i++, ptr += size)
1236 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1237
1238 /* Finally, load in the boot cpu's base value.
1239 * We abuse the IMMU TSB register for trap handler
1240 * entry and exit loading of %g5. That is why it
1241 * has to be page aligned.
1242 */
1243 cpu_setup_percpu_base(hard_smp_processor_id());
1244}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
new file mode 100644
index 000000000000..cad5a1122800
--- /dev/null
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -0,0 +1,432 @@
1/* $Id: sparc64_ksyms.c,v 1.121 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9/* Tell string.h we don't want memcpy etc. as cpp defines */
10#define EXPORT_SYMTAB_STROPS
11#define PROMLIB_INTERNAL
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/sched.h>
18#include <linux/in6.h>
19#include <linux/pci.h>
20#include <linux/interrupt.h>
21#include <linux/fs_struct.h>
22#include <linux/fs.h>
23#include <linux/mm.h>
24#include <linux/socket.h>
25#include <linux/syscalls.h>
26#include <linux/percpu.h>
27#include <linux/init.h>
28#include <net/compat.h>
29
30#include <asm/oplib.h>
31#include <asm/delay.h>
32#include <asm/system.h>
33#include <asm/auxio.h>
34#include <asm/pgtable.h>
35#include <asm/io.h>
36#include <asm/irq.h>
37#include <asm/idprom.h>
38#include <asm/svr4.h>
39#include <asm/elf.h>
40#include <asm/head.h>
41#include <asm/smp.h>
42#include <asm/mostek.h>
43#include <asm/ptrace.h>
44#include <asm/user.h>
45#include <asm/uaccess.h>
46#include <asm/checksum.h>
47#include <asm/fpumacro.h>
48#include <asm/pgalloc.h>
49#include <asm/cacheflush.h>
50#ifdef CONFIG_SBUS
51#include <asm/sbus.h>
52#include <asm/dma.h>
53#endif
54#ifdef CONFIG_PCI
55#include <asm/ebus.h>
56#include <asm/isa.h>
57#endif
58#include <asm/a.out.h>
59#include <asm/ns87303.h>
60#include <asm/timer.h>
61#include <asm/cpudata.h>
62#include <asm/rwsem.h>
63
64struct poll {
65 int fd;
66 short events;
67 short revents;
68};
69
70extern void die_if_kernel(char *str, struct pt_regs *regs);
71extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
72void _sigpause_common (unsigned int set, struct pt_regs *);
73extern void *__bzero(void *, size_t);
74extern void *__memscan_zero(void *, size_t);
75extern void *__memscan_generic(void *, int, size_t);
76extern int __memcmp(const void *, const void *, __kernel_size_t);
77extern __kernel_size_t strlen(const char *);
78extern void linux_sparc_syscall(void);
79extern void rtrap(void);
80extern void show_regs(struct pt_regs *);
81extern void solaris_syscall(void);
82extern void syscall_trace(void);
83extern u32 sunos_sys_table[], sys_call_table32[];
84extern void tl0_solaris(void);
85extern void sys_sigsuspend(void);
86extern int svr4_getcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
87extern int svr4_setcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
88extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
89extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *);
90extern long sparc32_open(const char __user * filename, int flags, int mode);
91extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from,
92 unsigned long offset, unsigned long size, pgprot_t prot, int space);
93extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
94 unsigned long pfn, unsigned long size, pgprot_t prot);
95extern void (*prom_palette)(int);
96
97extern int __ashrdi3(int, int);
98
99extern void dump_thread(struct pt_regs *, struct user *);
100extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
101
102#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
103extern void _do_spin_lock (spinlock_t *lock, char *str);
104extern void _do_spin_unlock (spinlock_t *lock);
105extern int _spin_trylock (spinlock_t *lock);
106extern void _do_read_lock(rwlock_t *rw, char *str);
107extern void _do_read_unlock(rwlock_t *rw, char *str);
108extern void _do_write_lock(rwlock_t *rw, char *str);
109extern void _do_write_unlock(rwlock_t *rw);
110extern int _do_write_trylock(rwlock_t *rw, char *str);
111#endif
112
113extern unsigned long phys_base;
114extern unsigned long pfn_base;
115
116extern unsigned int sys_call_table[];
117
118extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
119extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
120 unsigned long *);
121extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
122 unsigned long *, unsigned long *);
123extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
124 unsigned long *, unsigned long *, unsigned long *);
125
126/* Per-CPU information table */
127EXPORT_PER_CPU_SYMBOL(__cpu_data);
128
129/* used by various drivers */
130#ifdef CONFIG_SMP
131#ifndef CONFIG_DEBUG_SPINLOCK
132/* Out of line rw-locking implementation. */
133EXPORT_SYMBOL(__read_lock);
134EXPORT_SYMBOL(__read_unlock);
135EXPORT_SYMBOL(__write_lock);
136EXPORT_SYMBOL(__write_unlock);
137EXPORT_SYMBOL(__write_trylock);
138/* Out of line spin-locking implementation. */
139EXPORT_SYMBOL(_raw_spin_lock);
140EXPORT_SYMBOL(_raw_spin_lock_flags);
141#endif
142
143/* Hard IRQ locking */
144EXPORT_SYMBOL(synchronize_irq);
145
146#if defined(CONFIG_MCOUNT)
147extern void _mcount(void);
148EXPORT_SYMBOL(_mcount);
149#endif
150
151/* CPU online map and active count. */
152EXPORT_SYMBOL(cpu_online_map);
153EXPORT_SYMBOL(phys_cpu_present_map);
154
155/* Spinlock debugging library, optional. */
156#ifdef CONFIG_DEBUG_SPINLOCK
157EXPORT_SYMBOL(_do_spin_lock);
158EXPORT_SYMBOL(_do_spin_unlock);
159EXPORT_SYMBOL(_spin_trylock);
160EXPORT_SYMBOL(_do_read_lock);
161EXPORT_SYMBOL(_do_read_unlock);
162EXPORT_SYMBOL(_do_write_lock);
163EXPORT_SYMBOL(_do_write_unlock);
164EXPORT_SYMBOL(_do_write_trylock);
165#endif
166
167EXPORT_SYMBOL(smp_call_function);
168#endif /* CONFIG_SMP */
169
170EXPORT_SYMBOL(sparc64_get_clock_tick);
171
172/* semaphores */
173EXPORT_SYMBOL(down);
174EXPORT_SYMBOL(down_trylock);
175EXPORT_SYMBOL(down_interruptible);
176EXPORT_SYMBOL(up);
177
178/* RW semaphores */
179EXPORT_SYMBOL(__down_read);
180EXPORT_SYMBOL(__down_read_trylock);
181EXPORT_SYMBOL(__down_write);
182EXPORT_SYMBOL(__down_write_trylock);
183EXPORT_SYMBOL(__up_read);
184EXPORT_SYMBOL(__up_write);
185EXPORT_SYMBOL(__downgrade_write);
186
187/* Atomic counter implementation. */
188EXPORT_SYMBOL(atomic_add);
189EXPORT_SYMBOL(atomic_add_ret);
190EXPORT_SYMBOL(atomic_sub);
191EXPORT_SYMBOL(atomic_sub_ret);
192EXPORT_SYMBOL(atomic64_add);
193EXPORT_SYMBOL(atomic64_add_ret);
194EXPORT_SYMBOL(atomic64_sub);
195EXPORT_SYMBOL(atomic64_sub_ret);
196#ifdef CONFIG_SMP
197EXPORT_SYMBOL(_atomic_dec_and_lock);
198#endif
199
200/* Atomic bit operations. */
201EXPORT_SYMBOL(test_and_set_bit);
202EXPORT_SYMBOL(test_and_clear_bit);
203EXPORT_SYMBOL(test_and_change_bit);
204EXPORT_SYMBOL(set_bit);
205EXPORT_SYMBOL(clear_bit);
206EXPORT_SYMBOL(change_bit);
207
208/* Bit searching */
209EXPORT_SYMBOL(find_next_bit);
210EXPORT_SYMBOL(find_next_zero_bit);
211EXPORT_SYMBOL(find_next_zero_le_bit);
212
213EXPORT_SYMBOL(ivector_table);
214EXPORT_SYMBOL(enable_irq);
215EXPORT_SYMBOL(disable_irq);
216
217EXPORT_SYMBOL(__flushw_user);
218
219EXPORT_SYMBOL(tlb_type);
220EXPORT_SYMBOL(get_fb_unmapped_area);
221EXPORT_SYMBOL(flush_icache_range);
222
223EXPORT_SYMBOL(flush_dcache_page);
224#ifdef DCACHE_ALIASING_POSSIBLE
225EXPORT_SYMBOL(__flush_dcache_range);
226#endif
227
228EXPORT_SYMBOL(mostek_lock);
229EXPORT_SYMBOL(mstk48t02_regs);
230EXPORT_SYMBOL(request_fast_irq);
231#ifdef CONFIG_SUN_AUXIO
232EXPORT_SYMBOL(auxio_set_led);
233EXPORT_SYMBOL(auxio_set_lte);
234#endif
235#ifdef CONFIG_SBUS
236EXPORT_SYMBOL(sbus_root);
237EXPORT_SYMBOL(dma_chain);
238EXPORT_SYMBOL(sbus_set_sbus64);
239EXPORT_SYMBOL(sbus_alloc_consistent);
240EXPORT_SYMBOL(sbus_free_consistent);
241EXPORT_SYMBOL(sbus_map_single);
242EXPORT_SYMBOL(sbus_unmap_single);
243EXPORT_SYMBOL(sbus_map_sg);
244EXPORT_SYMBOL(sbus_unmap_sg);
245EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
246EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
247EXPORT_SYMBOL(sbus_dma_sync_sg_for_cpu);
248EXPORT_SYMBOL(sbus_dma_sync_sg_for_device);
249#endif
250EXPORT_SYMBOL(outsb);
251EXPORT_SYMBOL(outsw);
252EXPORT_SYMBOL(outsl);
253EXPORT_SYMBOL(insb);
254EXPORT_SYMBOL(insw);
255EXPORT_SYMBOL(insl);
256#ifdef CONFIG_PCI
257EXPORT_SYMBOL(ebus_chain);
258EXPORT_SYMBOL(isa_chain);
259EXPORT_SYMBOL(pci_memspace_mask);
260EXPORT_SYMBOL(pci_alloc_consistent);
261EXPORT_SYMBOL(pci_free_consistent);
262EXPORT_SYMBOL(pci_map_single);
263EXPORT_SYMBOL(pci_unmap_single);
264EXPORT_SYMBOL(pci_map_sg);
265EXPORT_SYMBOL(pci_unmap_sg);
266EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
267EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
268EXPORT_SYMBOL(pci_dma_supported);
269#endif
270
271/* I/O device mmaping on Sparc64. */
272EXPORT_SYMBOL(io_remap_page_range);
273EXPORT_SYMBOL(io_remap_pfn_range);
274
275/* Solaris/SunOS binary compatibility */
276EXPORT_SYMBOL(_sigpause_common);
277EXPORT_SYMBOL(verify_compat_iovec);
278
279EXPORT_SYMBOL(dump_thread);
280EXPORT_SYMBOL(dump_fpu);
281EXPORT_SYMBOL(__pte_alloc_one_kernel);
282#ifndef CONFIG_SMP
283EXPORT_SYMBOL(pgt_quicklists);
284#endif
285EXPORT_SYMBOL(put_fs_struct);
286
287/* math-emu wants this */
288EXPORT_SYMBOL(die_if_kernel);
289
290/* Kernel thread creation. */
291EXPORT_SYMBOL(kernel_thread);
292
293/* prom symbols */
294EXPORT_SYMBOL(idprom);
295EXPORT_SYMBOL(prom_root_node);
296EXPORT_SYMBOL(prom_getchild);
297EXPORT_SYMBOL(prom_getsibling);
298EXPORT_SYMBOL(prom_searchsiblings);
299EXPORT_SYMBOL(prom_firstprop);
300EXPORT_SYMBOL(prom_nextprop);
301EXPORT_SYMBOL(prom_getproplen);
302EXPORT_SYMBOL(prom_getproperty);
303EXPORT_SYMBOL(prom_node_has_property);
304EXPORT_SYMBOL(prom_setprop);
305EXPORT_SYMBOL(saved_command_line);
306EXPORT_SYMBOL(prom_getname);
307EXPORT_SYMBOL(prom_finddevice);
308EXPORT_SYMBOL(prom_feval);
309EXPORT_SYMBOL(prom_getbool);
310EXPORT_SYMBOL(prom_getstring);
311EXPORT_SYMBOL(prom_getint);
312EXPORT_SYMBOL(prom_getintdefault);
313EXPORT_SYMBOL(__prom_getchild);
314EXPORT_SYMBOL(__prom_getsibling);
315
316/* sparc library symbols */
317EXPORT_SYMBOL(strlen);
318EXPORT_SYMBOL(strnlen);
319EXPORT_SYMBOL(__strlen_user);
320EXPORT_SYMBOL(__strnlen_user);
321EXPORT_SYMBOL(strcpy);
322EXPORT_SYMBOL(strncpy);
323EXPORT_SYMBOL(strcat);
324EXPORT_SYMBOL(strncat);
325EXPORT_SYMBOL(strcmp);
326EXPORT_SYMBOL(strchr);
327EXPORT_SYMBOL(strrchr);
328EXPORT_SYMBOL(strpbrk);
329EXPORT_SYMBOL(strstr);
330
331#ifdef CONFIG_SOLARIS_EMUL_MODULE
332EXPORT_SYMBOL(linux_sparc_syscall);
333EXPORT_SYMBOL(rtrap);
334EXPORT_SYMBOL(show_regs);
335EXPORT_SYMBOL(solaris_syscall);
336EXPORT_SYMBOL(syscall_trace);
337EXPORT_SYMBOL(sunos_sys_table);
338EXPORT_SYMBOL(sys_call_table32);
339EXPORT_SYMBOL(tl0_solaris);
340EXPORT_SYMBOL(sys_sigsuspend);
341EXPORT_SYMBOL(sys_getppid);
342EXPORT_SYMBOL(sys_getpid);
343EXPORT_SYMBOL(sys_geteuid);
344EXPORT_SYMBOL(sys_getuid);
345EXPORT_SYMBOL(sys_getegid);
346EXPORT_SYMBOL(sys_getgid);
347EXPORT_SYMBOL(svr4_getcontext);
348EXPORT_SYMBOL(svr4_setcontext);
349EXPORT_SYMBOL(compat_sys_ioctl);
350EXPORT_SYMBOL(sparc32_open);
351EXPORT_SYMBOL(sys_close);
352#endif
353
354/* Special internal versions of library functions. */
355EXPORT_SYMBOL(_clear_page);
356EXPORT_SYMBOL(clear_user_page);
357EXPORT_SYMBOL(copy_user_page);
358EXPORT_SYMBOL(__bzero);
359EXPORT_SYMBOL(__memscan_zero);
360EXPORT_SYMBOL(__memscan_generic);
361EXPORT_SYMBOL(__memcmp);
362EXPORT_SYMBOL(__memset);
363EXPORT_SYMBOL(memchr);
364
365EXPORT_SYMBOL(csum_partial);
366EXPORT_SYMBOL(csum_partial_copy_nocheck);
367EXPORT_SYMBOL(__csum_partial_copy_from_user);
368EXPORT_SYMBOL(__csum_partial_copy_to_user);
369EXPORT_SYMBOL(ip_fast_csum);
370
371/* Moving data to/from/in userspace. */
372EXPORT_SYMBOL(___copy_to_user);
373EXPORT_SYMBOL(___copy_from_user);
374EXPORT_SYMBOL(___copy_in_user);
375EXPORT_SYMBOL(copy_to_user_fixup);
376EXPORT_SYMBOL(copy_from_user_fixup);
377EXPORT_SYMBOL(copy_in_user_fixup);
378EXPORT_SYMBOL(__strncpy_from_user);
379EXPORT_SYMBOL(__bzero_noasi);
380
381/* Various address conversion macros use this. */
382EXPORT_SYMBOL(phys_base);
383EXPORT_SYMBOL(pfn_base);
384EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
385EXPORT_SYMBOL(page_to_pfn);
386EXPORT_SYMBOL(pfn_to_page);
387
388/* No version information on this, heavily used in inline asm,
389 * and will always be 'void __ret_efault(void)'.
390 */
391EXPORT_SYMBOL(__ret_efault);
392
393/* No version information on these, as gcc produces such symbols. */
394EXPORT_SYMBOL(memcmp);
395EXPORT_SYMBOL(memcpy);
396EXPORT_SYMBOL(memset);
397EXPORT_SYMBOL(memmove);
398EXPORT_SYMBOL(strncmp);
399
400/* Delay routines. */
401EXPORT_SYMBOL(__udelay);
402EXPORT_SYMBOL(__ndelay);
403EXPORT_SYMBOL(__const_udelay);
404EXPORT_SYMBOL(__delay);
405
406void VISenter(void);
407/* RAID code needs this */
408EXPORT_SYMBOL(VISenter);
409
410/* for input/keybdev */
411EXPORT_SYMBOL(sun_do_break);
412EXPORT_SYMBOL(serial_console);
413EXPORT_SYMBOL(stop_a_enabled);
414
415#ifdef CONFIG_DEBUG_BUGVERBOSE
416EXPORT_SYMBOL(do_BUG);
417#endif
418
419/* for ns8703 */
420EXPORT_SYMBOL(ns87303_lock);
421
422/* for solaris compat module */
423EXPORT_SYMBOL_GPL(sys_call_table);
424
425EXPORT_SYMBOL(tick_ops);
426
427EXPORT_SYMBOL(xor_vis_2);
428EXPORT_SYMBOL(xor_vis_3);
429EXPORT_SYMBOL(xor_vis_4);
430EXPORT_SYMBOL(xor_vis_5);
431
432EXPORT_SYMBOL(prom_palette);
diff --git a/arch/sparc64/kernel/starfire.c b/arch/sparc64/kernel/starfire.c
new file mode 100644
index 000000000000..ae859d40771e
--- /dev/null
+++ b/arch/sparc64/kernel/starfire.c
@@ -0,0 +1,123 @@
1/* $Id: starfire.c,v 1.10 2001/04/14 21:13:45 davem Exp $
2 * starfire.c: Starfire/E10000 support.
3 *
4 * Copyright (C) 1998 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10
11#include <asm/page.h>
12#include <asm/oplib.h>
13#include <asm/smp.h>
14#include <asm/upa.h>
15#include <asm/starfire.h>
16
17/*
18 * A few places around the kernel check this to see if
19 * they need to call us to do things in a Starfire specific
20 * way.
21 */
22int this_is_starfire = 0;
23
24void check_if_starfire(void)
25{
26 int ssnode = prom_finddevice("/ssp-serial");
27 if (ssnode != 0 && ssnode != -1)
28 this_is_starfire = 1;
29}
30
31void starfire_cpu_setup(void)
32{
33 /* Currently, nothing to do. */
34}
35
36int starfire_hard_smp_processor_id(void)
37{
38 return upa_readl(0x1fff40000d0UL);
39}
40
41/*
42 * Each Starfire board has 32 registers which perform translation
43 * and delivery of traditional interrupt packets into the extended
44 * Starfire hardware format. Essentially UPAID's now have 2 more
45 * bits than in all previous Sun5 systems.
46 */
47struct starfire_irqinfo {
48 unsigned long imap_slots[32];
49 unsigned long tregs[32];
50 struct starfire_irqinfo *next;
51 int upaid, hwmid;
52};
53
54static struct starfire_irqinfo *sflist = NULL;
55
56/* Beam me up Scott(McNeil)y... */
57void *starfire_hookup(int upaid)
58{
59 struct starfire_irqinfo *p;
60 unsigned long treg_base, hwmid, i;
61
62 p = kmalloc(sizeof(*p), GFP_KERNEL);
63 if (!p) {
64 prom_printf("starfire_hookup: No memory, this is insane.\n");
65 prom_halt();
66 }
67 treg_base = 0x100fc000000UL;
68 hwmid = ((upaid & 0x3c) << 1) |
69 ((upaid & 0x40) >> 4) |
70 (upaid & 0x3);
71 p->hwmid = hwmid;
72 treg_base += (hwmid << 33UL);
73 treg_base += 0x200UL;
74 for (i = 0; i < 32; i++) {
75 p->imap_slots[i] = 0UL;
76 p->tregs[i] = treg_base + (i * 0x10UL);
77 /* Lets play it safe and not overwrite existing mappings */
78 if (upa_readl(p->tregs[i]) != 0)
79 p->imap_slots[i] = 0xdeadbeaf;
80 }
81 p->upaid = upaid;
82 p->next = sflist;
83 sflist = p;
84
85 return (void *) p;
86}
87
88unsigned int starfire_translate(unsigned long imap,
89 unsigned int upaid)
90{
91 struct starfire_irqinfo *p;
92 unsigned int bus_hwmid;
93 unsigned int i;
94
95 bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f;
96 for (p = sflist; p != NULL; p = p->next)
97 if (p->hwmid == bus_hwmid)
98 break;
99 if (p == NULL) {
100 prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n",
101 ((unsigned long)imap));
102 prom_halt();
103 }
104 for (i = 0; i < 32; i++) {
105 if (p->imap_slots[i] == imap ||
106 p->imap_slots[i] == 0UL)
107 break;
108 }
109 if (i == 32) {
110 printk("starfire_translate: Are you kidding me?\n");
111 panic("Lucy in the sky....");
112 }
113 p->imap_slots[i] = imap;
114
115 /* map to real upaid */
116 upaid = (((upaid & 0x3c) << 1) |
117 ((upaid & 0x40) >> 4) |
118 (upaid & 0x3));
119
120 upa_writel(upaid, p->tregs[i]);
121
122 return i;
123}
diff --git a/arch/sparc64/kernel/sunos_ioctl32.c b/arch/sparc64/kernel/sunos_ioctl32.c
new file mode 100644
index 000000000000..87c1aeb02220
--- /dev/null
+++ b/arch/sparc64/kernel/sunos_ioctl32.c
@@ -0,0 +1,275 @@
1/* $Id: sunos_ioctl32.c,v 1.11 2000/07/30 23:12:24 davem Exp $
2 * sunos_ioctl32.c: SunOS ioctl compatibility on sparc64.
3 *
4 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
5 * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <asm/uaccess.h>
9
10#include <linux/sched.h>
11#include <linux/errno.h>
12#include <linux/string.h>
13#include <linux/termios.h>
14#include <linux/ioctl.h>
15#include <linux/route.h>
16#include <linux/sockios.h>
17#include <linux/if.h>
18#include <linux/netdevice.h>
19#include <linux/if_arp.h>
20#include <linux/fs.h>
21#include <linux/file.h>
22#include <linux/mm.h>
23#include <linux/smp.h>
24#include <linux/smp_lock.h>
25#include <linux/syscalls.h>
26#include <linux/compat.h>
27#include <asm/kbio.h>
28
29#define SUNOS_NR_OPEN 256
30
31struct rtentry32 {
32 u32 rt_pad1;
33 struct sockaddr rt_dst; /* target address */
34 struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
35 struct sockaddr rt_genmask; /* target network mask (IP) */
36 unsigned short rt_flags;
37 short rt_pad2;
38 u32 rt_pad3;
39 unsigned char rt_tos;
40 unsigned char rt_class;
41 short rt_pad4;
42 short rt_metric; /* +1 for binary compatibility! */
43 /* char * */ u32 rt_dev; /* forcing the device at add */
44 u32 rt_mtu; /* per route MTU/Window */
45 u32 rt_window; /* Window clamping */
46 unsigned short rt_irtt; /* Initial RTT */
47
48};
49
50struct ifmap32 {
51 u32 mem_start;
52 u32 mem_end;
53 unsigned short base_addr;
54 unsigned char irq;
55 unsigned char dma;
56 unsigned char port;
57};
58
59struct ifreq32 {
60#define IFHWADDRLEN 6
61#define IFNAMSIZ 16
62 union {
63 char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
64 } ifr_ifrn;
65 union {
66 struct sockaddr ifru_addr;
67 struct sockaddr ifru_dstaddr;
68 struct sockaddr ifru_broadaddr;
69 struct sockaddr ifru_netmask;
70 struct sockaddr ifru_hwaddr;
71 short ifru_flags;
72 int ifru_ivalue;
73 int ifru_mtu;
74 struct ifmap32 ifru_map;
75 char ifru_slave[IFNAMSIZ]; /* Just fits the size */
76 compat_caddr_t ifru_data;
77 } ifr_ifru;
78};
79
80struct ifconf32 {
81 int ifc_len; /* size of buffer */
82 compat_caddr_t ifcbuf;
83};
84
85extern asmlinkage int compat_sys_ioctl(unsigned int, unsigned int, u32);
86
87asmlinkage int sunos_ioctl (int fd, u32 cmd, u32 arg)
88{
89 int ret = -EBADF;
90
91 if(fd >= SUNOS_NR_OPEN)
92 goto out;
93 if(!fcheck(fd))
94 goto out;
95
96 if(cmd == TIOCSETD) {
97 mm_segment_t old_fs = get_fs();
98 int __user *p;
99 int ntty = N_TTY;
100 int tmp;
101
102 p = (int __user *) (unsigned long) arg;
103 ret = -EFAULT;
104 if(get_user(tmp, p))
105 goto out;
106 if(tmp == 2) {
107 set_fs(KERNEL_DS);
108 ret = sys_ioctl(fd, cmd, (unsigned long) &ntty);
109 set_fs(old_fs);
110 ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
111 goto out;
112 }
113 }
114 if(cmd == TIOCNOTTY) {
115 ret = sys_setsid();
116 goto out;
117 }
118 switch(cmd) {
119 case _IOW('r', 10, struct rtentry32):
120 ret = compat_sys_ioctl(fd, SIOCADDRT, arg);
121 goto out;
122 case _IOW('r', 11, struct rtentry32):
123 ret = compat_sys_ioctl(fd, SIOCDELRT, arg);
124 goto out;
125
126 case _IOW('i', 12, struct ifreq32):
127 ret = compat_sys_ioctl(fd, SIOCSIFADDR, arg);
128 goto out;
129 case _IOWR('i', 13, struct ifreq32):
130 ret = compat_sys_ioctl(fd, SIOCGIFADDR, arg);
131 goto out;
132 case _IOW('i', 14, struct ifreq32):
133 ret = compat_sys_ioctl(fd, SIOCSIFDSTADDR, arg);
134 goto out;
135 case _IOWR('i', 15, struct ifreq32):
136 ret = compat_sys_ioctl(fd, SIOCGIFDSTADDR, arg);
137 goto out;
138 case _IOW('i', 16, struct ifreq32):
139 ret = compat_sys_ioctl(fd, SIOCSIFFLAGS, arg);
140 goto out;
141 case _IOWR('i', 17, struct ifreq32):
142 ret = compat_sys_ioctl(fd, SIOCGIFFLAGS, arg);
143 goto out;
144 case _IOW('i', 18, struct ifreq32):
145 ret = compat_sys_ioctl(fd, SIOCSIFMEM, arg);
146 goto out;
147 case _IOWR('i', 19, struct ifreq32):
148 ret = compat_sys_ioctl(fd, SIOCGIFMEM, arg);
149 goto out;
150
151 case _IOWR('i', 20, struct ifconf32):
152 ret = compat_sys_ioctl(fd, SIOCGIFCONF, arg);
153 goto out;
154
155 case _IOW('i', 21, struct ifreq): /* SIOCSIFMTU */
156 ret = sys_ioctl(fd, SIOCSIFMTU, arg);
157 goto out;
158 case _IOWR('i', 22, struct ifreq): /* SIOCGIFMTU */
159 ret = sys_ioctl(fd, SIOCGIFMTU, arg);
160 goto out;
161
162 case _IOWR('i', 23, struct ifreq32):
163 ret = compat_sys_ioctl(fd, SIOCGIFBRDADDR, arg);
164 goto out;
165 case _IOW('i', 24, struct ifreq32):
166 ret = compat_sys_ioctl(fd, SIOCSIFBRDADDR, arg);
167 goto out;
168 case _IOWR('i', 25, struct ifreq32):
169 ret = compat_sys_ioctl(fd, SIOCGIFNETMASK, arg);
170 goto out;
171 case _IOW('i', 26, struct ifreq32):
172 ret = compat_sys_ioctl(fd, SIOCSIFNETMASK, arg);
173 goto out;
174 case _IOWR('i', 27, struct ifreq32):
175 ret = compat_sys_ioctl(fd, SIOCGIFMETRIC, arg);
176 goto out;
177 case _IOW('i', 28, struct ifreq32):
178 ret = compat_sys_ioctl(fd, SIOCSIFMETRIC, arg);
179 goto out;
180
181 case _IOW('i', 30, struct arpreq):
182 ret = compat_sys_ioctl(fd, SIOCSARP, arg);
183 goto out;
184 case _IOWR('i', 31, struct arpreq):
185 ret = compat_sys_ioctl(fd, SIOCGARP, arg);
186 goto out;
187 case _IOW('i', 32, struct arpreq):
188 ret = compat_sys_ioctl(fd, SIOCDARP, arg);
189 goto out;
190
191 case _IOW('i', 40, struct ifreq32): /* SIOCUPPER */
192 case _IOW('i', 41, struct ifreq32): /* SIOCLOWER */
193 case _IOW('i', 44, struct ifreq32): /* SIOCSETSYNC */
194 case _IOW('i', 45, struct ifreq32): /* SIOCGETSYNC */
195 case _IOW('i', 46, struct ifreq32): /* SIOCSSDSTATS */
196 case _IOW('i', 47, struct ifreq32): /* SIOCSSESTATS */
197 case _IOW('i', 48, struct ifreq32): /* SIOCSPROMISC */
198 ret = -EOPNOTSUPP;
199 goto out;
200
201 case _IOW('i', 49, struct ifreq32):
202 ret = compat_sys_ioctl(fd, SIOCADDMULTI, arg);
203 goto out;
204 case _IOW('i', 50, struct ifreq32):
205 ret = compat_sys_ioctl(fd, SIOCDELMULTI, arg);
206 goto out;
207
208 /* FDDI interface ioctls, unsupported. */
209
210 case _IOW('i', 51, struct ifreq32): /* SIOCFDRESET */
211 case _IOW('i', 52, struct ifreq32): /* SIOCFDSLEEP */
212 case _IOW('i', 53, struct ifreq32): /* SIOCSTRTFMWAR */
213 case _IOW('i', 54, struct ifreq32): /* SIOCLDNSTRTFW */
214 case _IOW('i', 55, struct ifreq32): /* SIOCGETFDSTAT */
215 case _IOW('i', 56, struct ifreq32): /* SIOCFDNMIINT */
216 case _IOW('i', 57, struct ifreq32): /* SIOCFDEXUSER */
217 case _IOW('i', 58, struct ifreq32): /* SIOCFDGNETMAP */
218 case _IOW('i', 59, struct ifreq32): /* SIOCFDGIOCTL */
219 printk("FDDI ioctl, returning EOPNOTSUPP\n");
220 ret = -EOPNOTSUPP;
221 goto out;
222
223 case _IOW('t', 125, int):
224 /* More stupid tty sunos ioctls, just
225 * say it worked.
226 */
227 ret = 0;
228 goto out;
229
230 /* Non posix grp */
231 case _IOW('t', 118, int): {
232 int oldval, newval, __user *ptr;
233
234 cmd = TIOCSPGRP;
235 ptr = (int __user *) (unsigned long) arg;
236 ret = -EFAULT;
237 if(get_user(oldval, ptr))
238 goto out;
239 ret = compat_sys_ioctl(fd, cmd, arg);
240 __get_user(newval, ptr);
241 if(newval == -1) {
242 __put_user(oldval, ptr);
243 ret = -EIO;
244 }
245 if(ret == -ENOTTY)
246 ret = -EIO;
247 goto out;
248 }
249
250 case _IOR('t', 119, int): {
251 int oldval, newval, __user *ptr;
252
253 cmd = TIOCGPGRP;
254 ptr = (int __user *) (unsigned long) arg;
255 ret = -EFAULT;
256 if(get_user(oldval, ptr))
257 goto out;
258 ret = compat_sys_ioctl(fd, cmd, arg);
259 __get_user(newval, ptr);
260 if(newval == -1) {
261 __put_user(oldval, ptr);
262 ret = -EIO;
263 }
264 if(ret == -ENOTTY)
265 ret = -EIO;
266 goto out;
267 }
268 };
269
270 ret = compat_sys_ioctl(fd, cmd, arg);
271 /* so stupid... */
272 ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
273out:
274 return ret;
275}
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
new file mode 100644
index 000000000000..5a95e98c5317
--- /dev/null
+++ b/arch/sparc64/kernel/sys32.S
@@ -0,0 +1,327 @@
1/* $Id: sys32.S,v 1.12 2000/03/24 04:17:37 davem Exp $
2 * sys32.S: I-cache tricks for 32-bit compatibility layer simple
3 * conversions.
4 *
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/config.h>
10#include <asm/errno.h>
11
12/* NOTE: call as jump breaks return stack, we have to avoid that */
13
14 .text
15
16#define SIGN1(STUB,SYSCALL,REG1) \
17 .align 32; \
18 .globl STUB; \
19STUB: sethi %hi(SYSCALL), %g1; \
20 jmpl %g1 + %lo(SYSCALL), %g0; \
21 sra REG1, 0, REG1
22
23#define SIGN2(STUB,SYSCALL,REG1,REG2) \
24 .align 32; \
25 .globl STUB; \
26STUB: sethi %hi(SYSCALL), %g1; \
27 sra REG1, 0, REG1; \
28 jmpl %g1 + %lo(SYSCALL), %g0; \
29 sra REG2, 0, REG2
30
31#define SIGN3(STUB,SYSCALL,REG1,REG2,REG3) \
32 .align 32; \
33 .globl STUB; \
34STUB: sra REG1, 0, REG1; \
35 sethi %hi(SYSCALL), %g1; \
36 sra REG2, 0, REG2; \
37 jmpl %g1 + %lo(SYSCALL), %g0; \
38 sra REG3, 0, REG3
39
40#define SIGN4(STUB,SYSCALL,REG1,REG2,REG3,REG4) \
41 .align 32; \
42 .globl STUB; \
43STUB: sra REG1, 0, REG1; \
44 sethi %hi(SYSCALL), %g1; \
45 sra REG2, 0, REG2; \
46 sra REG3, 0, REG3; \
47 jmpl %g1 + %lo(SYSCALL), %g0; \
48 sra REG4, 0, REG4
49
50SIGN1(sys32_exit, sparc_exit, %o0)
51SIGN1(sys32_exit_group, sys_exit_group, %o0)
52SIGN1(sys32_wait4, compat_sys_wait4, %o2)
53SIGN1(sys32_creat, sys_creat, %o1)
54SIGN1(sys32_mknod, sys_mknod, %o1)
55SIGN1(sys32_perfctr, sys_perfctr, %o0)
56SIGN1(sys32_umount, sys_umount, %o1)
57SIGN1(sys32_signal, sys_signal, %o0)
58SIGN1(sys32_access, sys_access, %o1)
59SIGN1(sys32_msync, sys_msync, %o2)
60SIGN2(sys32_reboot, sys_reboot, %o0, %o1)
61SIGN1(sys32_setitimer, compat_sys_setitimer, %o0)
62SIGN1(sys32_getitimer, compat_sys_getitimer, %o0)
63SIGN1(sys32_sethostname, sys_sethostname, %o1)
64SIGN1(sys32_swapon, sys_swapon, %o1)
65SIGN1(sys32_sigaction, compat_sys_sigaction, %o0)
66SIGN1(sys32_rt_sigaction, compat_sys_rt_sigaction, %o0)
67SIGN1(sys32_sigprocmask, compat_sys_sigprocmask, %o0)
68SIGN1(sys32_rt_sigprocmask, compat_sys_rt_sigprocmask, %o0)
69SIGN2(sys32_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo, %o0, %o1)
70SIGN1(sys32_getrusage, compat_sys_getrusage, %o0)
71SIGN1(sys32_setxattr, sys_setxattr, %o4)
72SIGN1(sys32_lsetxattr, sys_lsetxattr, %o4)
73SIGN1(sys32_fsetxattr, sys_fsetxattr, %o4)
74SIGN1(sys32_fgetxattr, sys_fgetxattr, %o0)
75SIGN1(sys32_flistxattr, sys_flistxattr, %o0)
76SIGN1(sys32_fremovexattr, sys_fremovexattr, %o0)
77SIGN2(sys32_tkill, sys_tkill, %o0, %o1)
78SIGN1(sys32_epoll_create, sys_epoll_create, %o0)
79SIGN3(sys32_epoll_ctl, sys_epoll_ctl, %o0, %o1, %o2)
80SIGN3(sys32_epoll_wait, sys_epoll_wait, %o0, %o2, %o3)
81SIGN1(sys32_readahead, compat_sys_readahead, %o0)
82SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4)
83SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
84SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1)
85SIGN1(sys32_mlockall, sys_mlockall, %o0)
86SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0)
87SIGN1(sys32_clock_settime, compat_sys_clock_settime, %o1)
88SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1)
89SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
90SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
91SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
92SIGN1(sys32_select, compat_sys_select, %o0)
93SIGN1(sys32_mkdir, sys_mkdir, %o1)
94SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
95SIGN1(sys32_sysfs, compat_sys_sysfs, %o0)
96SIGN3(sys32_ipc, compat_sys_ipc, %o1, %o2, %o3)
97SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1)
98SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1)
99SIGN1(sys32_prctl, sys_prctl, %o0)
100SIGN1(sys32_sched_rr_get_interval, compat_sys_sched_rr_get_interval, %o0)
101SIGN2(sys32_waitpid, sys_waitpid, %o0, %o2)
102SIGN1(sys32_getgroups, sys_getgroups, %o0)
103SIGN1(sys32_getpgid, sys_getpgid, %o0)
104SIGN2(sys32_getpriority, sys_getpriority, %o0, %o1)
105SIGN1(sys32_getsid, sys_getsid, %o0)
106SIGN2(sys32_kill, sys_kill, %o0, %o1)
107SIGN1(sys32_nice, sys_nice, %o0)
108SIGN1(sys32_lseek, sys_lseek, %o1)
109SIGN2(sys32_open, sparc32_open, %o1, %o2)
110SIGN1(sys32_readlink, sys_readlink, %o2)
111SIGN1(sys32_sched_get_priority_max, sys_sched_get_priority_max, %o0)
112SIGN1(sys32_sched_get_priority_min, sys_sched_get_priority_min, %o0)
113SIGN1(sys32_sched_getparam, sys_sched_getparam, %o0)
114SIGN1(sys32_sched_getscheduler, sys_sched_getscheduler, %o0)
115SIGN1(sys32_sched_setparam, sys_sched_setparam, %o0)
116SIGN2(sys32_sched_setscheduler, sys_sched_setscheduler, %o0, %o1)
117SIGN1(sys32_getdomainname, sys_getdomainname, %o1)
118SIGN1(sys32_setdomainname, sys_setdomainname, %o1)
119SIGN1(sys32_setgroups, sys_setgroups, %o0)
120SIGN2(sys32_setpgid, sys_setpgid, %o0, %o1)
121SIGN3(sys32_setpriority, sys_setpriority, %o0, %o1, %o2)
122SIGN1(sys32_ssetmask, sys_ssetmask, %o0)
123SIGN2(sys32_syslog, sys_syslog, %o0, %o2)
124SIGN1(sys32_umask, sys_umask, %o0)
125SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2)
126SIGN1(sys32_sendto, sys_sendto, %o0)
127SIGN1(sys32_recvfrom, sys_recvfrom, %o0)
128SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2)
129SIGN2(sys32_connect, sys_connect, %o0, %o2)
130SIGN2(sys32_bind, sys_bind, %o0, %o2)
131SIGN2(sys32_listen, sys_listen, %o0, %o1)
132SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
133SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
134SIGN2(sys32_shutdown, sys_shutdown, %o0, %o1)
135SIGN3(sys32_socketpair, sys_socketpair, %o0, %o1, %o2)
136SIGN1(sys32_getpeername, sys_getpeername, %o0)
137SIGN1(sys32_getsockname, sys_getsockname, %o0)
138
139 .globl sys32_mmap2
140sys32_mmap2:
141 sethi %hi(sys_mmap), %g1
142 jmpl %g1 + %lo(sys_mmap), %g0
143 sllx %o5, 12, %o5
144
145 .align 32
146 .globl sys32_socketcall
147sys32_socketcall: /* %o0=call, %o1=args */
148 cmp %o0, 1
149 bl,pn %xcc, do_einval
150 cmp %o0, 17
151 bg,pn %xcc, do_einval
152 sub %o0, 1, %o0
153 sllx %o0, 5, %o0
154 sethi %hi(__socketcall_table_begin), %g2
155 or %g2, %lo(__socketcall_table_begin), %g2
156 jmpl %g2 + %o0, %g0
157 nop
158
159 /* Each entry is exactly 32 bytes. */
160 .align 32
161__socketcall_table_begin:
162do_sys_socket: /* sys_socket(int, int, int) */
163 ldswa [%o1 + 0x0] %asi, %o0
164 sethi %hi(sys_socket), %g1
165 ldswa [%o1 + 0x8] %asi, %o2
166 jmpl %g1 + %lo(sys_socket), %g0
167 ldswa [%o1 + 0x4] %asi, %o1
168 nop
169 nop
170 nop
171do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
172 ldswa [%o1 + 0x0] %asi, %o0
173 sethi %hi(sys_bind), %g1
174 ldswa [%o1 + 0x8] %asi, %o2
175 jmpl %g1 + %lo(sys_bind), %g0
176 lduwa [%o1 + 0x4] %asi, %o1
177 nop
178 nop
179 nop
180do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
181 ldswa [%o1 + 0x0] %asi, %o0
182 sethi %hi(sys_connect), %g1
183 ldswa [%o1 + 0x8] %asi, %o2
184 jmpl %g1 + %lo(sys_connect), %g0
185 lduwa [%o1 + 0x4] %asi, %o1
186 nop
187 nop
188 nop
189do_sys_listen: /* sys_listen(int, int) */
190 ldswa [%o1 + 0x0] %asi, %o0
191 sethi %hi(sys_listen), %g1
192 jmpl %g1 + %lo(sys_listen), %g0
193 ldswa [%o1 + 0x4] %asi, %o1
194 nop
195 nop
196 nop
197 nop
198do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
199 ldswa [%o1 + 0x0] %asi, %o0
200 sethi %hi(sys_accept), %g1
201 lduwa [%o1 + 0x8] %asi, %o2
202 jmpl %g1 + %lo(sys_accept), %g0
203 lduwa [%o1 + 0x4] %asi, %o1
204 nop
205 nop
206 nop
207do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
208 ldswa [%o1 + 0x0] %asi, %o0
209 sethi %hi(sys_getsockname), %g1
210 lduwa [%o1 + 0x8] %asi, %o2
211 jmpl %g1 + %lo(sys_getsockname), %g0
212 lduwa [%o1 + 0x4] %asi, %o1
213 nop
214 nop
215 nop
216do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
217 ldswa [%o1 + 0x0] %asi, %o0
218 sethi %hi(sys_getpeername), %g1
219 lduwa [%o1 + 0x8] %asi, %o2
220 jmpl %g1 + %lo(sys_getpeername), %g0
221 lduwa [%o1 + 0x4] %asi, %o1
222 nop
223 nop
224 nop
225do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
226 ldswa [%o1 + 0x0] %asi, %o0
227 sethi %hi(sys_socketpair), %g1
228 ldswa [%o1 + 0x8] %asi, %o2
229 lduwa [%o1 + 0xc] %asi, %o3
230 jmpl %g1 + %lo(sys_socketpair), %g0
231 ldswa [%o1 + 0x4] %asi, %o1
232 nop
233 nop
234do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
235 ldswa [%o1 + 0x0] %asi, %o0
236 sethi %hi(sys_send), %g1
237 lduwa [%o1 + 0x8] %asi, %o2
238 lduwa [%o1 + 0xc] %asi, %o3
239 jmpl %g1 + %lo(sys_send), %g0
240 lduwa [%o1 + 0x4] %asi, %o1
241 nop
242 nop
243do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
244 ldswa [%o1 + 0x0] %asi, %o0
245 sethi %hi(sys_recv), %g1
246 lduwa [%o1 + 0x8] %asi, %o2
247 lduwa [%o1 + 0xc] %asi, %o3
248 jmpl %g1 + %lo(sys_recv), %g0
249 lduwa [%o1 + 0x4] %asi, %o1
250 nop
251 nop
252do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
253 ldswa [%o1 + 0x0] %asi, %o0
254 sethi %hi(sys_sendto), %g1
255 lduwa [%o1 + 0x8] %asi, %o2
256 lduwa [%o1 + 0xc] %asi, %o3
257 lduwa [%o1 + 0x10] %asi, %o4
258 ldswa [%o1 + 0x14] %asi, %o5
259 jmpl %g1 + %lo(sys_sendto), %g0
260 lduwa [%o1 + 0x4] %asi, %o1
261do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
262 ldswa [%o1 + 0x0] %asi, %o0
263 sethi %hi(sys_recvfrom), %g1
264 lduwa [%o1 + 0x8] %asi, %o2
265 lduwa [%o1 + 0xc] %asi, %o3
266 lduwa [%o1 + 0x10] %asi, %o4
267 lduwa [%o1 + 0x14] %asi, %o5
268 jmpl %g1 + %lo(sys_recvfrom), %g0
269 lduwa [%o1 + 0x4] %asi, %o1
270do_sys_shutdown: /* sys_shutdown(int, int) */
271 ldswa [%o1 + 0x0] %asi, %o0
272 sethi %hi(sys_shutdown), %g1
273 jmpl %g1 + %lo(sys_shutdown), %g0
274 ldswa [%o1 + 0x4] %asi, %o1
275 nop
276 nop
277 nop
278 nop
279do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
280 ldswa [%o1 + 0x0] %asi, %o0
281 sethi %hi(compat_sys_setsockopt), %g1
282 ldswa [%o1 + 0x8] %asi, %o2
283 lduwa [%o1 + 0xc] %asi, %o3
284 ldswa [%o1 + 0x10] %asi, %o4
285 jmpl %g1 + %lo(compat_sys_setsockopt), %g0
286 ldswa [%o1 + 0x4] %asi, %o1
287 nop
288do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
289 ldswa [%o1 + 0x0] %asi, %o0
290 sethi %hi(compat_sys_getsockopt), %g1
291 ldswa [%o1 + 0x8] %asi, %o2
292 lduwa [%o1 + 0xc] %asi, %o3
293 lduwa [%o1 + 0x10] %asi, %o4
294 jmpl %g1 + %lo(compat_sys_getsockopt), %g0
295 ldswa [%o1 + 0x4] %asi, %o1
296 nop
297do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
298 ldswa [%o1 + 0x0] %asi, %o0
299 sethi %hi(compat_sys_sendmsg), %g1
300 lduwa [%o1 + 0x8] %asi, %o2
301 jmpl %g1 + %lo(compat_sys_sendmsg), %g0
302 lduwa [%o1 + 0x4] %asi, %o1
303 nop
304 nop
305 nop
306do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
307 ldswa [%o1 + 0x0] %asi, %o0
308 sethi %hi(compat_sys_recvmsg), %g1
309 lduwa [%o1 + 0x8] %asi, %o2
310 jmpl %g1 + %lo(compat_sys_recvmsg), %g0
311 lduwa [%o1 + 0x4] %asi, %o1
312 nop
313 nop
314 nop
315__socketcall_table_end:
316
317do_einval:
318 retl
319 mov -EINVAL, %o0
320do_efault:
321 retl
322 mov -EFAULT, %o0
323
324 .section __ex_table
325 .align 4
326 .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault
327 .previous
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
new file mode 100644
index 000000000000..0077f02f4b37
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -0,0 +1,723 @@
1/* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
2 * linux/arch/sparc64/kernel/sys_sparc.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
6 * platform.
7 */
8
9#include <linux/config.h>
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/sched.h>
13#include <linux/fs.h>
14#include <linux/file.h>
15#include <linux/mm.h>
16#include <linux/sem.h>
17#include <linux/msg.h>
18#include <linux/shm.h>
19#include <linux/stat.h>
20#include <linux/mman.h>
21#include <linux/utsname.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/slab.h>
25#include <linux/syscalls.h>
26#include <linux/ipc.h>
27#include <linux/personality.h>
28
29#include <asm/uaccess.h>
30#include <asm/ipc.h>
31#include <asm/utrap.h>
32#include <asm/perfctr.h>
33
34/* #define DEBUG_UNIMP_SYSCALL */
35
36/* XXX Make this per-binary type, this way we can detect the type of
37 * XXX a binary. Every Sparc executable calls this very early on.
38 */
39asmlinkage unsigned long sys_getpagesize(void)
40{
41 return PAGE_SIZE;
42}
43
44#define COLOUR_ALIGN(addr,pgoff) \
45 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
46 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
47
48unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
49{
50 struct mm_struct *mm = current->mm;
51 struct vm_area_struct * vma;
52 unsigned long task_size = TASK_SIZE;
53 unsigned long start_addr;
54 int do_color_align;
55
56 if (flags & MAP_FIXED) {
57 /* We do not accept a shared mapping if it would violate
58 * cache aliasing constraints.
59 */
60 if ((flags & MAP_SHARED) &&
61 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
62 return -EINVAL;
63 return addr;
64 }
65
66 if (test_thread_flag(TIF_32BIT))
67 task_size = 0xf0000000UL;
68 if (len > task_size || len > -PAGE_OFFSET)
69 return -ENOMEM;
70
71 do_color_align = 0;
72 if (filp || (flags & MAP_SHARED))
73 do_color_align = 1;
74
75 if (addr) {
76 if (do_color_align)
77 addr = COLOUR_ALIGN(addr, pgoff);
78 else
79 addr = PAGE_ALIGN(addr);
80
81 vma = find_vma(mm, addr);
82 if (task_size - len >= addr &&
83 (!vma || addr + len <= vma->vm_start))
84 return addr;
85 }
86
87 start_addr = addr = mm->free_area_cache;
88
89 task_size -= len;
90
91full_search:
92 if (do_color_align)
93 addr = COLOUR_ALIGN(addr, pgoff);
94 else
95 addr = PAGE_ALIGN(addr);
96
97 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
98 /* At this point: (!vma || addr < vma->vm_end). */
99 if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) {
100 addr = PAGE_OFFSET;
101 vma = find_vma(mm, PAGE_OFFSET);
102 }
103 if (task_size < addr) {
104 if (start_addr != TASK_UNMAPPED_BASE) {
105 start_addr = addr = TASK_UNMAPPED_BASE;
106 goto full_search;
107 }
108 return -ENOMEM;
109 }
110 if (!vma || addr + len <= vma->vm_start) {
111 /*
112 * Remember the place where we stopped the search:
113 */
114 mm->free_area_cache = addr + len;
115 return addr;
116 }
117 addr = vma->vm_end;
118 if (do_color_align)
119 addr = COLOUR_ALIGN(addr, pgoff);
120 }
121}
122
123/* Try to align mapping such that we align it as much as possible. */
124unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
125{
126 unsigned long align_goal, addr = -ENOMEM;
127
128 if (flags & MAP_FIXED) {
129 /* Ok, don't mess with it. */
130 return get_unmapped_area(NULL, addr, len, pgoff, flags);
131 }
132 flags &= ~MAP_SHARED;
133
134 align_goal = PAGE_SIZE;
135 if (len >= (4UL * 1024 * 1024))
136 align_goal = (4UL * 1024 * 1024);
137 else if (len >= (512UL * 1024))
138 align_goal = (512UL * 1024);
139 else if (len >= (64UL * 1024))
140 align_goal = (64UL * 1024);
141
142 do {
143 addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
144 if (!(addr & ~PAGE_MASK)) {
145 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
146 break;
147 }
148
149 if (align_goal == (4UL * 1024 * 1024))
150 align_goal = (512UL * 1024);
151 else if (align_goal == (512UL * 1024))
152 align_goal = (64UL * 1024);
153 else
154 align_goal = PAGE_SIZE;
155 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
156
157 /* Mapping is smaller than 64K or larger areas could not
158 * be obtained.
159 */
160 if (addr & ~PAGE_MASK)
161 addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
162
163 return addr;
164}
165
166asmlinkage unsigned long sparc_brk(unsigned long brk)
167{
168 /* People could try to be nasty and use ta 0x6d in 32bit programs */
169 if (test_thread_flag(TIF_32BIT) &&
170 brk >= 0xf0000000UL)
171 return current->mm->brk;
172
173 if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET))
174 return current->mm->brk;
175 return sys_brk(brk);
176}
177
178/*
179 * sys_pipe() is the normal C calling standard for creating
180 * a pipe. It's not the way unix traditionally does this, though.
181 */
182asmlinkage long sparc_pipe(struct pt_regs *regs)
183{
184 int fd[2];
185 int error;
186
187 error = do_pipe(fd);
188 if (error)
189 goto out;
190 regs->u_regs[UREG_I1] = fd[1];
191 error = fd[0];
192out:
193 return error;
194}
195
196/*
197 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
198 *
199 * This is really horribly ugly.
200 */
201
202asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
203 unsigned long third, void __user *ptr, long fifth)
204{
205 int err;
206
207 /* No need for backward compatibility. We can start fresh... */
208 if (call <= SEMCTL) {
209 switch (call) {
210 case SEMOP:
211 err = sys_semtimedop(first, ptr,
212 (unsigned)second, NULL);
213 goto out;
214 case SEMTIMEDOP:
215 err = sys_semtimedop(first, ptr, (unsigned)second,
216 (const struct timespec __user *) fifth);
217 goto out;
218 case SEMGET:
219 err = sys_semget(first, (int)second, (int)third);
220 goto out;
221 case SEMCTL: {
222 union semun fourth;
223 err = -EINVAL;
224 if (!ptr)
225 goto out;
226 err = -EFAULT;
227 if (get_user(fourth.__pad,
228 (void __user * __user *) ptr))
229 goto out;
230 err = sys_semctl(first, (int)second | IPC_64,
231 (int)third, fourth);
232 goto out;
233 }
234 default:
235 err = -ENOSYS;
236 goto out;
237 };
238 }
239 if (call <= MSGCTL) {
240 switch (call) {
241 case MSGSND:
242 err = sys_msgsnd(first, ptr, (size_t)second,
243 (int)third);
244 goto out;
245 case MSGRCV:
246 err = sys_msgrcv(first, ptr, (size_t)second, fifth,
247 (int)third);
248 goto out;
249 case MSGGET:
250 err = sys_msgget((key_t)first, (int)second);
251 goto out;
252 case MSGCTL:
253 err = sys_msgctl(first, (int)second | IPC_64, ptr);
254 goto out;
255 default:
256 err = -ENOSYS;
257 goto out;
258 };
259 }
260 if (call <= SHMCTL) {
261 switch (call) {
262 case SHMAT: {
263 ulong raddr;
264 err = do_shmat(first, ptr, (int)second, &raddr);
265 if (!err) {
266 if (put_user(raddr,
267 (ulong __user *) third))
268 err = -EFAULT;
269 }
270 goto out;
271 }
272 case SHMDT:
273 err = sys_shmdt(ptr);
274 goto out;
275 case SHMGET:
276 err = sys_shmget(first, (size_t)second, (int)third);
277 goto out;
278 case SHMCTL:
279 err = sys_shmctl(first, (int)second | IPC_64, ptr);
280 goto out;
281 default:
282 err = -ENOSYS;
283 goto out;
284 };
285 } else {
286 err = -ENOSYS;
287 }
288out:
289 return err;
290}
291
292asmlinkage long sparc64_newuname(struct new_utsname __user *name)
293{
294 int ret = sys_newuname(name);
295
296 if (current->personality == PER_LINUX32 && !ret) {
297 ret = (copy_to_user(name->machine, "sparc\0\0", 8)
298 ? -EFAULT : 0);
299 }
300 return ret;
301}
302
303asmlinkage long sparc64_personality(unsigned long personality)
304{
305 int ret;
306
307 if (current->personality == PER_LINUX32 &&
308 personality == PER_LINUX)
309 personality = PER_LINUX32;
310 ret = sys_personality(personality);
311 if (ret == PER_LINUX32)
312 ret = PER_LINUX;
313
314 return ret;
315}
316
317/* Linux version of mmap */
318asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
319 unsigned long prot, unsigned long flags, unsigned long fd,
320 unsigned long off)
321{
322 struct file * file = NULL;
323 unsigned long retval = -EBADF;
324
325 if (!(flags & MAP_ANONYMOUS)) {
326 file = fget(fd);
327 if (!file)
328 goto out;
329 }
330 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
331 len = PAGE_ALIGN(len);
332 retval = -EINVAL;
333
334 if (test_thread_flag(TIF_32BIT)) {
335 if (len > 0xf0000000UL ||
336 ((flags & MAP_FIXED) && addr > 0xf0000000UL - len))
337 goto out_putf;
338 } else {
339 if (len > -PAGE_OFFSET ||
340 ((flags & MAP_FIXED) &&
341 addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
342 goto out_putf;
343 }
344
345 down_write(&current->mm->mmap_sem);
346 retval = do_mmap(file, addr, len, prot, flags, off);
347 up_write(&current->mm->mmap_sem);
348
349out_putf:
350 if (file)
351 fput(file);
352out:
353 return retval;
354}
355
356asmlinkage long sys64_munmap(unsigned long addr, size_t len)
357{
358 long ret;
359
360 if (len > -PAGE_OFFSET ||
361 (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
362 return -EINVAL;
363 down_write(&current->mm->mmap_sem);
364 ret = do_munmap(current->mm, addr, len);
365 up_write(&current->mm->mmap_sem);
366 return ret;
367}
368
369extern unsigned long do_mremap(unsigned long addr,
370 unsigned long old_len, unsigned long new_len,
371 unsigned long flags, unsigned long new_addr);
372
373asmlinkage unsigned long sys64_mremap(unsigned long addr,
374 unsigned long old_len, unsigned long new_len,
375 unsigned long flags, unsigned long new_addr)
376{
377 struct vm_area_struct *vma;
378 unsigned long ret = -EINVAL;
379 if (test_thread_flag(TIF_32BIT))
380 goto out;
381 if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET)
382 goto out;
383 if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET)
384 goto out;
385 down_write(&current->mm->mmap_sem);
386 if (flags & MREMAP_FIXED) {
387 if (new_addr < PAGE_OFFSET &&
388 new_addr + new_len > -PAGE_OFFSET)
389 goto out_sem;
390 } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) {
391 unsigned long map_flags = 0;
392 struct file *file = NULL;
393
394 ret = -ENOMEM;
395 if (!(flags & MREMAP_MAYMOVE))
396 goto out_sem;
397
398 vma = find_vma(current->mm, addr);
399 if (vma) {
400 if (vma->vm_flags & VM_SHARED)
401 map_flags |= MAP_SHARED;
402 file = vma->vm_file;
403 }
404
405 /* MREMAP_FIXED checked above. */
406 new_addr = get_unmapped_area(file, addr, new_len,
407 vma ? vma->vm_pgoff : 0,
408 map_flags);
409 ret = new_addr;
410 if (new_addr & ~PAGE_MASK)
411 goto out_sem;
412 flags |= MREMAP_FIXED;
413 }
414 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
415out_sem:
416 up_write(&current->mm->mmap_sem);
417out:
418 return ret;
419}
420
421/* we come to here via sys_nis_syscall so it can setup the regs argument */
422asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
423{
424 static int count;
425
426 /* Don't make the system unusable, if someone goes stuck */
427 if (count++ > 5)
428 return -ENOSYS;
429
430 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
431#ifdef DEBUG_UNIMP_SYSCALL
432 show_regs (regs);
433#endif
434
435 return -ENOSYS;
436}
437
438/* #define DEBUG_SPARC_BREAKPOINT */
439
440asmlinkage void sparc_breakpoint(struct pt_regs *regs)
441{
442 siginfo_t info;
443
444 if (test_thread_flag(TIF_32BIT)) {
445 regs->tpc &= 0xffffffff;
446 regs->tnpc &= 0xffffffff;
447 }
448#ifdef DEBUG_SPARC_BREAKPOINT
449 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
450#endif
451 info.si_signo = SIGTRAP;
452 info.si_errno = 0;
453 info.si_code = TRAP_BRKPT;
454 info.si_addr = (void __user *)regs->tpc;
455 info.si_trapno = 0;
456 force_sig_info(SIGTRAP, &info, current);
457#ifdef DEBUG_SPARC_BREAKPOINT
458 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
459#endif
460}
461
462extern void check_pending(int signum);
463
464asmlinkage long sys_getdomainname(char __user *name, int len)
465{
466 int nlen;
467 int err = -EFAULT;
468
469 down_read(&uts_sem);
470
471 nlen = strlen(system_utsname.domainname) + 1;
472
473 if (nlen < len)
474 len = nlen;
475 if (len > __NEW_UTS_LEN)
476 goto done;
477 if (copy_to_user(name, system_utsname.domainname, len))
478 goto done;
479 err = 0;
480done:
481 up_read(&uts_sem);
482 return err;
483}
484
485asmlinkage long solaris_syscall(struct pt_regs *regs)
486{
487 static int count;
488
489 regs->tpc = regs->tnpc;
490 regs->tnpc += 4;
491 if (test_thread_flag(TIF_32BIT)) {
492 regs->tpc &= 0xffffffff;
493 regs->tnpc &= 0xffffffff;
494 }
495 if (++count <= 5) {
496 printk ("For Solaris binary emulation you need solaris module loaded\n");
497 show_regs (regs);
498 }
499 send_sig(SIGSEGV, current, 1);
500
501 return -ENOSYS;
502}
503
504#ifndef CONFIG_SUNOS_EMUL
505asmlinkage long sunos_syscall(struct pt_regs *regs)
506{
507 static int count;
508
509 regs->tpc = regs->tnpc;
510 regs->tnpc += 4;
511 if (test_thread_flag(TIF_32BIT)) {
512 regs->tpc &= 0xffffffff;
513 regs->tnpc &= 0xffffffff;
514 }
515 if (++count <= 20)
516 printk ("SunOS binary emulation not compiled in\n");
517 force_sig(SIGSEGV, current);
518
519 return -ENOSYS;
520}
521#endif
522
523asmlinkage long sys_utrap_install(utrap_entry_t type,
524 utrap_handler_t new_p,
525 utrap_handler_t new_d,
526 utrap_handler_t __user *old_p,
527 utrap_handler_t __user *old_d)
528{
529 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
530 return -EINVAL;
531 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
532 if (old_p) {
533 if (!current_thread_info()->utraps) {
534 if (put_user(NULL, old_p))
535 return -EFAULT;
536 } else {
537 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
538 return -EFAULT;
539 }
540 }
541 if (old_d) {
542 if (put_user(NULL, old_d))
543 return -EFAULT;
544 }
545 return 0;
546 }
547 if (!current_thread_info()->utraps) {
548 current_thread_info()->utraps =
549 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
550 if (!current_thread_info()->utraps)
551 return -ENOMEM;
552 current_thread_info()->utraps[0] = 1;
553 memset(current_thread_info()->utraps+1, 0,
554 UT_TRAP_INSTRUCTION_31*sizeof(long));
555 } else {
556 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
557 current_thread_info()->utraps[0] > 1) {
558 long *p = current_thread_info()->utraps;
559
560 current_thread_info()->utraps =
561 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
562 GFP_KERNEL);
563 if (!current_thread_info()->utraps) {
564 current_thread_info()->utraps = p;
565 return -ENOMEM;
566 }
567 p[0]--;
568 current_thread_info()->utraps[0] = 1;
569 memcpy(current_thread_info()->utraps+1, p+1,
570 UT_TRAP_INSTRUCTION_31*sizeof(long));
571 }
572 }
573 if (old_p) {
574 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
575 return -EFAULT;
576 }
577 if (old_d) {
578 if (put_user(NULL, old_d))
579 return -EFAULT;
580 }
581 current_thread_info()->utraps[type] = (long)new_p;
582
583 return 0;
584}
585
586long sparc_memory_ordering(unsigned long model, struct pt_regs *regs)
587{
588 if (model >= 3)
589 return -EINVAL;
590 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
591 return 0;
592}
593
594asmlinkage long sys_rt_sigaction(int sig,
595 const struct sigaction __user *act,
596 struct sigaction __user *oact,
597 void __user *restorer,
598 size_t sigsetsize)
599{
600 struct k_sigaction new_ka, old_ka;
601 int ret;
602
603 /* XXX: Don't preclude handling different sized sigset_t's. */
604 if (sigsetsize != sizeof(sigset_t))
605 return -EINVAL;
606
607 if (act) {
608 new_ka.ka_restorer = restorer;
609 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
610 return -EFAULT;
611 }
612
613 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
614
615 if (!ret && oact) {
616 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
617 return -EFAULT;
618 }
619
620 return ret;
621}
622
623/* Invoked by rtrap code to update performance counters in
624 * user space.
625 */
626asmlinkage void update_perfctrs(void)
627{
628 unsigned long pic, tmp;
629
630 read_pic(pic);
631 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
632 __put_user(tmp, current_thread_info()->user_cntd0);
633 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
634 __put_user(tmp, current_thread_info()->user_cntd1);
635 reset_pic();
636}
637
638asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
639{
640 int err = 0;
641
642 switch(opcode) {
643 case PERFCTR_ON:
644 current_thread_info()->pcr_reg = arg2;
645 current_thread_info()->user_cntd0 = (u64 __user *) arg0;
646 current_thread_info()->user_cntd1 = (u64 __user *) arg1;
647 current_thread_info()->kernel_cntd0 =
648 current_thread_info()->kernel_cntd1 = 0;
649 write_pcr(arg2);
650 reset_pic();
651 set_thread_flag(TIF_PERFCTR);
652 break;
653
654 case PERFCTR_OFF:
655 err = -EINVAL;
656 if (test_thread_flag(TIF_PERFCTR)) {
657 current_thread_info()->user_cntd0 =
658 current_thread_info()->user_cntd1 = NULL;
659 current_thread_info()->pcr_reg = 0;
660 write_pcr(0);
661 clear_thread_flag(TIF_PERFCTR);
662 err = 0;
663 }
664 break;
665
666 case PERFCTR_READ: {
667 unsigned long pic, tmp;
668
669 if (!test_thread_flag(TIF_PERFCTR)) {
670 err = -EINVAL;
671 break;
672 }
673 read_pic(pic);
674 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
675 err |= __put_user(tmp, current_thread_info()->user_cntd0);
676 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
677 err |= __put_user(tmp, current_thread_info()->user_cntd1);
678 reset_pic();
679 break;
680 }
681
682 case PERFCTR_CLRPIC:
683 if (!test_thread_flag(TIF_PERFCTR)) {
684 err = -EINVAL;
685 break;
686 }
687 current_thread_info()->kernel_cntd0 =
688 current_thread_info()->kernel_cntd1 = 0;
689 reset_pic();
690 break;
691
692 case PERFCTR_SETPCR: {
693 u64 __user *user_pcr = (u64 __user *)arg0;
694
695 if (!test_thread_flag(TIF_PERFCTR)) {
696 err = -EINVAL;
697 break;
698 }
699 err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
700 write_pcr(current_thread_info()->pcr_reg);
701 current_thread_info()->kernel_cntd0 =
702 current_thread_info()->kernel_cntd1 = 0;
703 reset_pic();
704 break;
705 }
706
707 case PERFCTR_GETPCR: {
708 u64 __user *user_pcr = (u64 __user *)arg0;
709
710 if (!test_thread_flag(TIF_PERFCTR)) {
711 err = -EINVAL;
712 break;
713 }
714 err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
715 break;
716 }
717
718 default:
719 err = -EINVAL;
720 break;
721 };
722 return err;
723}
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
new file mode 100644
index 000000000000..567c91c77b20
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -0,0 +1,1118 @@
1/* $Id: sys_sparc32.c,v 1.184 2002/02/09 19:49:31 davem Exp $
2 * sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * These routines maintain argument size conversion between 32bit and 64bit
8 * environment.
9 */
10
11#include <linux/config.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/fs.h>
15#include <linux/mm.h>
16#include <linux/file.h>
17#include <linux/signal.h>
18#include <linux/resource.h>
19#include <linux/times.h>
20#include <linux/utsname.h>
21#include <linux/timex.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/sem.h>
25#include <linux/msg.h>
26#include <linux/shm.h>
27#include <linux/slab.h>
28#include <linux/uio.h>
29#include <linux/nfs_fs.h>
30#include <linux/quota.h>
31#include <linux/module.h>
32#include <linux/sunrpc/svc.h>
33#include <linux/nfsd/nfsd.h>
34#include <linux/nfsd/cache.h>
35#include <linux/nfsd/xdr.h>
36#include <linux/nfsd/syscall.h>
37#include <linux/poll.h>
38#include <linux/personality.h>
39#include <linux/stat.h>
40#include <linux/filter.h>
41#include <linux/highmem.h>
42#include <linux/highuid.h>
43#include <linux/mman.h>
44#include <linux/ipv6.h>
45#include <linux/in.h>
46#include <linux/icmpv6.h>
47#include <linux/syscalls.h>
48#include <linux/sysctl.h>
49#include <linux/binfmts.h>
50#include <linux/dnotify.h>
51#include <linux/security.h>
52#include <linux/compat.h>
53#include <linux/vfs.h>
54#include <linux/netfilter_ipv4/ip_tables.h>
55#include <linux/ptrace.h>
56#include <linux/highuid.h>
57
58#include <asm/types.h>
59#include <asm/ipc.h>
60#include <asm/uaccess.h>
61#include <asm/fpumacro.h>
62#include <asm/semaphore.h>
63#include <asm/mmu_context.h>
64
65asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group)
66{
67 return sys_chown(filename, low2highuid(user), low2highgid(group));
68}
69
70asmlinkage long sys32_lchown16(const char __user * filename, u16 user, u16 group)
71{
72 return sys_lchown(filename, low2highuid(user), low2highgid(group));
73}
74
75asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
76{
77 return sys_fchown(fd, low2highuid(user), low2highgid(group));
78}
79
80asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
81{
82 return sys_setregid(low2highgid(rgid), low2highgid(egid));
83}
84
85asmlinkage long sys32_setgid16(u16 gid)
86{
87 return sys_setgid((gid_t)gid);
88}
89
90asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
91{
92 return sys_setreuid(low2highuid(ruid), low2highuid(euid));
93}
94
95asmlinkage long sys32_setuid16(u16 uid)
96{
97 return sys_setuid((uid_t)uid);
98}
99
100asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
101{
102 return sys_setresuid(low2highuid(ruid), low2highuid(euid),
103 low2highuid(suid));
104}
105
106asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid)
107{
108 int retval;
109
110 if (!(retval = put_user(high2lowuid(current->uid), ruid)) &&
111 !(retval = put_user(high2lowuid(current->euid), euid)))
112 retval = put_user(high2lowuid(current->suid), suid);
113
114 return retval;
115}
116
117asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
118{
119 return sys_setresgid(low2highgid(rgid), low2highgid(egid),
120 low2highgid(sgid));
121}
122
123asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
124{
125 int retval;
126
127 if (!(retval = put_user(high2lowgid(current->gid), rgid)) &&
128 !(retval = put_user(high2lowgid(current->egid), egid)))
129 retval = put_user(high2lowgid(current->sgid), sgid);
130
131 return retval;
132}
133
134asmlinkage long sys32_setfsuid16(u16 uid)
135{
136 return sys_setfsuid((uid_t)uid);
137}
138
139asmlinkage long sys32_setfsgid16(u16 gid)
140{
141 return sys_setfsgid((gid_t)gid);
142}
143
144static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
145{
146 int i;
147 u16 group;
148
149 for (i = 0; i < group_info->ngroups; i++) {
150 group = (u16)GROUP_AT(group_info, i);
151 if (put_user(group, grouplist+i))
152 return -EFAULT;
153 }
154
155 return 0;
156}
157
158static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist)
159{
160 int i;
161 u16 group;
162
163 for (i = 0; i < group_info->ngroups; i++) {
164 if (get_user(group, grouplist+i))
165 return -EFAULT;
166 GROUP_AT(group_info, i) = (gid_t)group;
167 }
168
169 return 0;
170}
171
172asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist)
173{
174 int i;
175
176 if (gidsetsize < 0)
177 return -EINVAL;
178
179 get_group_info(current->group_info);
180 i = current->group_info->ngroups;
181 if (gidsetsize) {
182 if (i > gidsetsize) {
183 i = -EINVAL;
184 goto out;
185 }
186 if (groups16_to_user(grouplist, current->group_info)) {
187 i = -EFAULT;
188 goto out;
189 }
190 }
191out:
192 put_group_info(current->group_info);
193 return i;
194}
195
196asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
197{
198 struct group_info *group_info;
199 int retval;
200
201 if (!capable(CAP_SETGID))
202 return -EPERM;
203 if ((unsigned)gidsetsize > NGROUPS_MAX)
204 return -EINVAL;
205
206 group_info = groups_alloc(gidsetsize);
207 if (!group_info)
208 return -ENOMEM;
209 retval = groups16_from_user(group_info, grouplist);
210 if (retval) {
211 put_group_info(group_info);
212 return retval;
213 }
214
215 retval = set_current_groups(group_info);
216 put_group_info(group_info);
217
218 return retval;
219}
220
221asmlinkage long sys32_getuid16(void)
222{
223 return high2lowuid(current->uid);
224}
225
226asmlinkage long sys32_geteuid16(void)
227{
228 return high2lowuid(current->euid);
229}
230
231asmlinkage long sys32_getgid16(void)
232{
233 return high2lowgid(current->gid);
234}
235
236asmlinkage long sys32_getegid16(void)
237{
238 return high2lowgid(current->egid);
239}
240
241/* 32-bit timeval and related flotsam. */
242
243static long get_tv32(struct timeval *o, struct compat_timeval __user *i)
244{
245 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
246 (__get_user(o->tv_sec, &i->tv_sec) |
247 __get_user(o->tv_usec, &i->tv_usec)));
248}
249
250static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
251{
252 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
253 (__put_user(i->tv_sec, &o->tv_sec) |
254 __put_user(i->tv_usec, &o->tv_usec)));
255}
256
257#ifdef CONFIG_SYSVIPC
258asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth)
259{
260 int version;
261
262 version = call >> 16; /* hack for backward compatibility */
263 call &= 0xffff;
264
265 switch (call) {
266 case SEMTIMEDOP:
267 if (fifth)
268 /* sign extend semid */
269 return compat_sys_semtimedop((int)first,
270 compat_ptr(ptr), second,
271 compat_ptr(fifth));
272 /* else fall through for normal semop() */
273 case SEMOP:
274 /* struct sembuf is the same on 32 and 64bit :)) */
275 /* sign extend semid */
276 return sys_semtimedop((int)first, compat_ptr(ptr), second,
277 NULL);
278 case SEMGET:
279 /* sign extend key, nsems */
280 return sys_semget((int)first, (int)second, third);
281 case SEMCTL:
282 /* sign extend semid, semnum */
283 return compat_sys_semctl((int)first, (int)second, third,
284 compat_ptr(ptr));
285
286 case MSGSND:
287 /* sign extend msqid */
288 return compat_sys_msgsnd((int)first, (int)second, third,
289 compat_ptr(ptr));
290 case MSGRCV:
291 /* sign extend msqid, msgtyp */
292 return compat_sys_msgrcv((int)first, second, (int)fifth,
293 third, version, compat_ptr(ptr));
294 case MSGGET:
295 /* sign extend key */
296 return sys_msgget((int)first, second);
297 case MSGCTL:
298 /* sign extend msqid */
299 return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
300
301 case SHMAT:
302 /* sign extend shmid */
303 return compat_sys_shmat((int)first, second, third, version,
304 compat_ptr(ptr));
305 case SHMDT:
306 return sys_shmdt(compat_ptr(ptr));
307 case SHMGET:
308 /* sign extend key_t */
309 return sys_shmget((int)first, second, third);
310 case SHMCTL:
311 /* sign extend shmid */
312 return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
313
314 default:
315 return -ENOSYS;
316 };
317
318 return -ENOSYS;
319}
320#endif
321
322asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
323{
324 if ((int)high < 0)
325 return -EINVAL;
326 else
327 return sys_truncate(path, (high << 32) | low);
328}
329
330asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
331{
332 if ((int)high < 0)
333 return -EINVAL;
334 else
335 return sys_ftruncate(fd, (high << 32) | low);
336}
337
338int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
339{
340 int err;
341
342 if (stat->size > MAX_NON_LFS || !old_valid_dev(stat->dev) ||
343 !old_valid_dev(stat->rdev))
344 return -EOVERFLOW;
345
346 err = put_user(old_encode_dev(stat->dev), &statbuf->st_dev);
347 err |= put_user(stat->ino, &statbuf->st_ino);
348 err |= put_user(stat->mode, &statbuf->st_mode);
349 err |= put_user(stat->nlink, &statbuf->st_nlink);
350 err |= put_user(high2lowuid(stat->uid), &statbuf->st_uid);
351 err |= put_user(high2lowgid(stat->gid), &statbuf->st_gid);
352 err |= put_user(old_encode_dev(stat->rdev), &statbuf->st_rdev);
353 err |= put_user(stat->size, &statbuf->st_size);
354 err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
355 err |= put_user(0, &statbuf->__unused1);
356 err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
357 err |= put_user(0, &statbuf->__unused2);
358 err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
359 err |= put_user(0, &statbuf->__unused3);
360 err |= put_user(stat->blksize, &statbuf->st_blksize);
361 err |= put_user(stat->blocks, &statbuf->st_blocks);
362 err |= put_user(0, &statbuf->__unused4[0]);
363 err |= put_user(0, &statbuf->__unused4[1]);
364
365 return err;
366}
367
368asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2)
369{
370 return sys_sysfs(option, arg1, arg2);
371}
372
373struct sysinfo32 {
374 s32 uptime;
375 u32 loads[3];
376 u32 totalram;
377 u32 freeram;
378 u32 sharedram;
379 u32 bufferram;
380 u32 totalswap;
381 u32 freeswap;
382 unsigned short procs;
383 unsigned short pad;
384 u32 totalhigh;
385 u32 freehigh;
386 u32 mem_unit;
387 char _f[20-2*sizeof(int)-sizeof(int)];
388};
389
390asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
391{
392 struct sysinfo s;
393 int ret, err;
394 int bitcount = 0;
395 mm_segment_t old_fs = get_fs ();
396
397 set_fs(KERNEL_DS);
398 ret = sys_sysinfo((struct sysinfo __user *) &s);
399 set_fs(old_fs);
400 /* Check to see if any memory value is too large for 32-bit and
401 * scale down if needed.
402 */
403 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
404 while (s.mem_unit < PAGE_SIZE) {
405 s.mem_unit <<= 1;
406 bitcount++;
407 }
408 s.totalram >>= bitcount;
409 s.freeram >>= bitcount;
410 s.sharedram >>= bitcount;
411 s.bufferram >>= bitcount;
412 s.totalswap >>= bitcount;
413 s.freeswap >>= bitcount;
414 s.totalhigh >>= bitcount;
415 s.freehigh >>= bitcount;
416 }
417
418 err = put_user (s.uptime, &info->uptime);
419 err |= __put_user (s.loads[0], &info->loads[0]);
420 err |= __put_user (s.loads[1], &info->loads[1]);
421 err |= __put_user (s.loads[2], &info->loads[2]);
422 err |= __put_user (s.totalram, &info->totalram);
423 err |= __put_user (s.freeram, &info->freeram);
424 err |= __put_user (s.sharedram, &info->sharedram);
425 err |= __put_user (s.bufferram, &info->bufferram);
426 err |= __put_user (s.totalswap, &info->totalswap);
427 err |= __put_user (s.freeswap, &info->freeswap);
428 err |= __put_user (s.procs, &info->procs);
429 err |= __put_user (s.totalhigh, &info->totalhigh);
430 err |= __put_user (s.freehigh, &info->freehigh);
431 err |= __put_user (s.mem_unit, &info->mem_unit);
432 if (err)
433 return -EFAULT;
434 return ret;
435}
436
437asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
438{
439 struct timespec t;
440 int ret;
441 mm_segment_t old_fs = get_fs ();
442
443 set_fs (KERNEL_DS);
444 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
445 set_fs (old_fs);
446 if (put_compat_timespec(&t, interval))
447 return -EFAULT;
448 return ret;
449}
450
451asmlinkage long compat_sys_rt_sigprocmask(int how,
452 compat_sigset_t __user *set,
453 compat_sigset_t __user *oset,
454 compat_size_t sigsetsize)
455{
456 sigset_t s;
457 compat_sigset_t s32;
458 int ret;
459 mm_segment_t old_fs = get_fs();
460
461 if (set) {
462 if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
463 return -EFAULT;
464 switch (_NSIG_WORDS) {
465 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
466 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
467 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
468 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
469 }
470 }
471 set_fs (KERNEL_DS);
472 ret = sys_rt_sigprocmask(how,
473 set ? (sigset_t __user *) &s : NULL,
474 oset ? (sigset_t __user *) &s : NULL,
475 sigsetsize);
476 set_fs (old_fs);
477 if (ret) return ret;
478 if (oset) {
479 switch (_NSIG_WORDS) {
480 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
481 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
482 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
483 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
484 }
485 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
486 return -EFAULT;
487 }
488 return 0;
489}
490
491asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
492 compat_size_t sigsetsize)
493{
494 sigset_t s;
495 compat_sigset_t s32;
496 int ret;
497 mm_segment_t old_fs = get_fs();
498
499 set_fs (KERNEL_DS);
500 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
501 set_fs (old_fs);
502 if (!ret) {
503 switch (_NSIG_WORDS) {
504 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
505 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
506 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
507 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
508 }
509 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
510 return -EFAULT;
511 }
512 return ret;
513}
514
515asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig,
516 struct compat_siginfo __user *uinfo)
517{
518 siginfo_t info;
519 int ret;
520 mm_segment_t old_fs = get_fs();
521
522 if (copy_siginfo_from_user32(&info, uinfo))
523 return -EFAULT;
524
525 set_fs (KERNEL_DS);
526 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
527 set_fs (old_fs);
528 return ret;
529}
530
531asmlinkage long compat_sys_sigaction(int sig, struct old_sigaction32 __user *act,
532 struct old_sigaction32 __user *oact)
533{
534 struct k_sigaction new_ka, old_ka;
535 int ret;
536
537 if (sig < 0) {
538 set_thread_flag(TIF_NEWSIGNALS);
539 sig = -sig;
540 }
541
542 if (act) {
543 compat_old_sigset_t mask;
544 u32 u_handler, u_restorer;
545
546 ret = get_user(u_handler, &act->sa_handler);
547 new_ka.sa.sa_handler = compat_ptr(u_handler);
548 ret |= __get_user(u_restorer, &act->sa_restorer);
549 new_ka.sa.sa_restorer = compat_ptr(u_restorer);
550 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
551 ret |= __get_user(mask, &act->sa_mask);
552 if (ret)
553 return ret;
554 new_ka.ka_restorer = NULL;
555 siginitset(&new_ka.sa.sa_mask, mask);
556 }
557
558 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
559
560 if (!ret && oact) {
561 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
562 ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
563 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
564 ret |= __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
565 }
566
567 return ret;
568}
569
570asmlinkage long compat_sys_rt_sigaction(int sig,
571 struct sigaction32 __user *act,
572 struct sigaction32 __user *oact,
573 void __user *restorer,
574 compat_size_t sigsetsize)
575{
576 struct k_sigaction new_ka, old_ka;
577 int ret;
578 compat_sigset_t set32;
579
580 /* XXX: Don't preclude handling different sized sigset_t's. */
581 if (sigsetsize != sizeof(compat_sigset_t))
582 return -EINVAL;
583
584 /* All tasks which use RT signals (effectively) use
585 * new style signals.
586 */
587 set_thread_flag(TIF_NEWSIGNALS);
588
589 if (act) {
590 u32 u_handler, u_restorer;
591
592 new_ka.ka_restorer = restorer;
593 ret = get_user(u_handler, &act->sa_handler);
594 new_ka.sa.sa_handler = compat_ptr(u_handler);
595 ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
596 switch (_NSIG_WORDS) {
597 case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32);
598 case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32);
599 case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32);
600 case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32);
601 }
602 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
603 ret |= __get_user(u_restorer, &act->sa_restorer);
604 new_ka.sa.sa_restorer = compat_ptr(u_restorer);
605 if (ret)
606 return -EFAULT;
607 }
608
609 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
610
611 if (!ret && oact) {
612 switch (_NSIG_WORDS) {
613 case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3];
614 case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2];
615 case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1];
616 case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0];
617 }
618 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
619 ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
620 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
621 ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
622 if (ret)
623 ret = -EFAULT;
624 }
625
626 return ret;
627}
628
629/*
630 * sparc32_execve() executes a new program after the asm stub has set
631 * things up for us. This should basically do what I want it to.
632 */
633asmlinkage long sparc32_execve(struct pt_regs *regs)
634{
635 int error, base = 0;
636 char *filename;
637
638 /* User register window flush is done by entry.S */
639
640 /* Check for indirect call. */
641 if ((u32)regs->u_regs[UREG_G1] == 0)
642 base = 1;
643
644 filename = getname(compat_ptr(regs->u_regs[base + UREG_I0]));
645 error = PTR_ERR(filename);
646 if (IS_ERR(filename))
647 goto out;
648
649 error = compat_do_execve(filename,
650 compat_ptr(regs->u_regs[base + UREG_I1]),
651 compat_ptr(regs->u_regs[base + UREG_I2]), regs);
652
653 putname(filename);
654
655 if (!error) {
656 fprs_write(0);
657 current_thread_info()->xfsr[0] = 0;
658 current_thread_info()->fpsaved[0] = 0;
659 regs->tstate &= ~TSTATE_PEF;
660 task_lock(current);
661 current->ptrace &= ~PT_DTRACE;
662 task_unlock(current);
663 }
664out:
665 return error;
666}
667
668#ifdef CONFIG_MODULES
669
670asmlinkage long sys32_init_module(void __user *umod, u32 len,
671 const char __user *uargs)
672{
673 return sys_init_module(umod, len, uargs);
674}
675
676asmlinkage long sys32_delete_module(const char __user *name_user,
677 unsigned int flags)
678{
679 return sys_delete_module(name_user, flags);
680}
681
682#else /* CONFIG_MODULES */
683
684asmlinkage long sys32_init_module(const char __user *name_user,
685 struct module __user *mod_user)
686{
687 return -ENOSYS;
688}
689
690asmlinkage long sys32_delete_module(const char __user *name_user)
691{
692 return -ENOSYS;
693}
694
695#endif /* CONFIG_MODULES */
696
697/* Translations due to time_t size differences. Which affects all
698 sorts of things, like timeval and itimerval. */
699
700extern struct timezone sys_tz;
701
702asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv,
703 struct timezone __user *tz)
704{
705 if (tv) {
706 struct timeval ktv;
707 do_gettimeofday(&ktv);
708 if (put_tv32(tv, &ktv))
709 return -EFAULT;
710 }
711 if (tz) {
712 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
713 return -EFAULT;
714 }
715 return 0;
716}
717
718static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
719{
720 long usec;
721
722 if (!access_ok(VERIFY_READ, i, sizeof(*i)))
723 return -EFAULT;
724 if (__get_user(o->tv_sec, &i->tv_sec))
725 return -EFAULT;
726 if (__get_user(usec, &i->tv_usec))
727 return -EFAULT;
728 o->tv_nsec = usec * 1000;
729 return 0;
730}
731
732asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv,
733 struct timezone __user *tz)
734{
735 struct timespec kts;
736 struct timezone ktz;
737
738 if (tv) {
739 if (get_ts32(&kts, tv))
740 return -EFAULT;
741 }
742 if (tz) {
743 if (copy_from_user(&ktz, tz, sizeof(ktz)))
744 return -EFAULT;
745 }
746
747 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
748}
749
750asmlinkage long sys32_utimes(char __user *filename,
751 struct compat_timeval __user *tvs)
752{
753 struct timeval ktvs[2];
754
755 if (tvs) {
756 if (get_tv32(&ktvs[0], tvs) ||
757 get_tv32(&ktvs[1], 1+tvs))
758 return -EFAULT;
759 }
760
761 return do_utimes(filename, (tvs ? &ktvs[0] : NULL));
762}
763
764/* These are here just in case some old sparc32 binary calls it. */
765asmlinkage long sys32_pause(void)
766{
767 current->state = TASK_INTERRUPTIBLE;
768 schedule();
769 return -ERESTARTNOHAND;
770}
771
772asmlinkage compat_ssize_t sys32_pread64(unsigned int fd,
773 char __user *ubuf,
774 compat_size_t count,
775 unsigned long poshi,
776 unsigned long poslo)
777{
778 return sys_pread64(fd, ubuf, count, (poshi << 32) | poslo);
779}
780
781asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd,
782 char __user *ubuf,
783 compat_size_t count,
784 unsigned long poshi,
785 unsigned long poslo)
786{
787 return sys_pwrite64(fd, ubuf, count, (poshi << 32) | poslo);
788}
789
790asmlinkage long compat_sys_readahead(int fd,
791 unsigned long offhi,
792 unsigned long offlo,
793 compat_size_t count)
794{
795 return sys_readahead(fd, (offhi << 32) | offlo, count);
796}
797
798long compat_sys_fadvise64(int fd,
799 unsigned long offhi,
800 unsigned long offlo,
801 compat_size_t len, int advice)
802{
803 return sys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice);
804}
805
806long compat_sys_fadvise64_64(int fd,
807 unsigned long offhi, unsigned long offlo,
808 unsigned long lenhi, unsigned long lenlo,
809 int advice)
810{
811 return sys_fadvise64_64(fd,
812 (offhi << 32) | offlo,
813 (lenhi << 32) | lenlo,
814 advice);
815}
816
817asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
818 compat_off_t __user *offset,
819 compat_size_t count)
820{
821 mm_segment_t old_fs = get_fs();
822 int ret;
823 off_t of;
824
825 if (offset && get_user(of, offset))
826 return -EFAULT;
827
828 set_fs(KERNEL_DS);
829 ret = sys_sendfile(out_fd, in_fd,
830 offset ? (off_t __user *) &of : NULL,
831 count);
832 set_fs(old_fs);
833
834 if (offset && put_user(of, offset))
835 return -EFAULT;
836
837 return ret;
838}
839
840asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
841 compat_loff_t __user *offset,
842 compat_size_t count)
843{
844 mm_segment_t old_fs = get_fs();
845 int ret;
846 loff_t lof;
847
848 if (offset && get_user(lof, offset))
849 return -EFAULT;
850
851 set_fs(KERNEL_DS);
852 ret = sys_sendfile64(out_fd, in_fd,
853 offset ? (loff_t __user *) &lof : NULL,
854 count);
855 set_fs(old_fs);
856
857 if (offset && put_user(lof, offset))
858 return -EFAULT;
859
860 return ret;
861}
862
863/* Handle adjtimex compatibility. */
864
865struct timex32 {
866 u32 modes;
867 s32 offset, freq, maxerror, esterror;
868 s32 status, constant, precision, tolerance;
869 struct compat_timeval time;
870 s32 tick;
871 s32 ppsfreq, jitter, shift, stabil;
872 s32 jitcnt, calcnt, errcnt, stbcnt;
873 s32 :32; s32 :32; s32 :32; s32 :32;
874 s32 :32; s32 :32; s32 :32; s32 :32;
875 s32 :32; s32 :32; s32 :32; s32 :32;
876};
877
878extern int do_adjtimex(struct timex *);
879
880asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
881{
882 struct timex txc;
883 int ret;
884
885 memset(&txc, 0, sizeof(struct timex));
886
887 if (get_user(txc.modes, &utp->modes) ||
888 __get_user(txc.offset, &utp->offset) ||
889 __get_user(txc.freq, &utp->freq) ||
890 __get_user(txc.maxerror, &utp->maxerror) ||
891 __get_user(txc.esterror, &utp->esterror) ||
892 __get_user(txc.status, &utp->status) ||
893 __get_user(txc.constant, &utp->constant) ||
894 __get_user(txc.precision, &utp->precision) ||
895 __get_user(txc.tolerance, &utp->tolerance) ||
896 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
897 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
898 __get_user(txc.tick, &utp->tick) ||
899 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
900 __get_user(txc.jitter, &utp->jitter) ||
901 __get_user(txc.shift, &utp->shift) ||
902 __get_user(txc.stabil, &utp->stabil) ||
903 __get_user(txc.jitcnt, &utp->jitcnt) ||
904 __get_user(txc.calcnt, &utp->calcnt) ||
905 __get_user(txc.errcnt, &utp->errcnt) ||
906 __get_user(txc.stbcnt, &utp->stbcnt))
907 return -EFAULT;
908
909 ret = do_adjtimex(&txc);
910
911 if (put_user(txc.modes, &utp->modes) ||
912 __put_user(txc.offset, &utp->offset) ||
913 __put_user(txc.freq, &utp->freq) ||
914 __put_user(txc.maxerror, &utp->maxerror) ||
915 __put_user(txc.esterror, &utp->esterror) ||
916 __put_user(txc.status, &utp->status) ||
917 __put_user(txc.constant, &utp->constant) ||
918 __put_user(txc.precision, &utp->precision) ||
919 __put_user(txc.tolerance, &utp->tolerance) ||
920 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
921 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
922 __put_user(txc.tick, &utp->tick) ||
923 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
924 __put_user(txc.jitter, &utp->jitter) ||
925 __put_user(txc.shift, &utp->shift) ||
926 __put_user(txc.stabil, &utp->stabil) ||
927 __put_user(txc.jitcnt, &utp->jitcnt) ||
928 __put_user(txc.calcnt, &utp->calcnt) ||
929 __put_user(txc.errcnt, &utp->errcnt) ||
930 __put_user(txc.stbcnt, &utp->stbcnt))
931 ret = -EFAULT;
932
933 return ret;
934}
935
936/* This is just a version for 32-bit applications which does
937 * not force O_LARGEFILE on.
938 */
939
940asmlinkage long sparc32_open(const char __user *filename,
941 int flags, int mode)
942{
943 char * tmp;
944 int fd, error;
945
946 tmp = getname(filename);
947 fd = PTR_ERR(tmp);
948 if (!IS_ERR(tmp)) {
949 fd = get_unused_fd();
950 if (fd >= 0) {
951 struct file * f = filp_open(tmp, flags, mode);
952 error = PTR_ERR(f);
953 if (IS_ERR(f))
954 goto out_error;
955 fd_install(fd, f);
956 }
957out:
958 putname(tmp);
959 }
960 return fd;
961
962out_error:
963 put_unused_fd(fd);
964 fd = error;
965 goto out;
966}
967
968extern unsigned long do_mremap(unsigned long addr,
969 unsigned long old_len, unsigned long new_len,
970 unsigned long flags, unsigned long new_addr);
971
972asmlinkage unsigned long sys32_mremap(unsigned long addr,
973 unsigned long old_len, unsigned long new_len,
974 unsigned long flags, u32 __new_addr)
975{
976 struct vm_area_struct *vma;
977 unsigned long ret = -EINVAL;
978 unsigned long new_addr = __new_addr;
979
980 if (old_len > 0xf0000000UL || new_len > 0xf0000000UL)
981 goto out;
982 if (addr > 0xf0000000UL - old_len)
983 goto out;
984 down_write(&current->mm->mmap_sem);
985 if (flags & MREMAP_FIXED) {
986 if (new_addr > 0xf0000000UL - new_len)
987 goto out_sem;
988 } else if (addr > 0xf0000000UL - new_len) {
989 unsigned long map_flags = 0;
990 struct file *file = NULL;
991
992 ret = -ENOMEM;
993 if (!(flags & MREMAP_MAYMOVE))
994 goto out_sem;
995
996 vma = find_vma(current->mm, addr);
997 if (vma) {
998 if (vma->vm_flags & VM_SHARED)
999 map_flags |= MAP_SHARED;
1000 file = vma->vm_file;
1001 }
1002
1003 /* MREMAP_FIXED checked above. */
1004 new_addr = get_unmapped_area(file, addr, new_len,
1005 vma ? vma->vm_pgoff : 0,
1006 map_flags);
1007 ret = new_addr;
1008 if (new_addr & ~PAGE_MASK)
1009 goto out_sem;
1010 flags |= MREMAP_FIXED;
1011 }
1012 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1013out_sem:
1014 up_write(&current->mm->mmap_sem);
1015out:
1016 return ret;
1017}
1018
1019struct __sysctl_args32 {
1020 u32 name;
1021 int nlen;
1022 u32 oldval;
1023 u32 oldlenp;
1024 u32 newval;
1025 u32 newlen;
1026 u32 __unused[4];
1027};
1028
1029asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
1030{
1031#ifndef CONFIG_SYSCTL
1032 return -ENOSYS;
1033#else
1034 struct __sysctl_args32 tmp;
1035 int error;
1036 size_t oldlen, __user *oldlenp = NULL;
1037 unsigned long addr = (((unsigned long)&args->__unused[0]) + 7UL) & ~7UL;
1038
1039 if (copy_from_user(&tmp, args, sizeof(tmp)))
1040 return -EFAULT;
1041
1042 if (tmp.oldval && tmp.oldlenp) {
1043 /* Duh, this is ugly and might not work if sysctl_args
1044 is in read-only memory, but do_sysctl does indirectly
1045 a lot of uaccess in both directions and we'd have to
1046 basically copy the whole sysctl.c here, and
1047 glibc's __sysctl uses rw memory for the structure
1048 anyway. */
1049 if (get_user(oldlen, (u32 __user *)(unsigned long)tmp.oldlenp) ||
1050 put_user(oldlen, (size_t __user *)addr))
1051 return -EFAULT;
1052 oldlenp = (size_t __user *)addr;
1053 }
1054
1055 lock_kernel();
1056 error = do_sysctl((int __user *)(unsigned long) tmp.name,
1057 tmp.nlen,
1058 (void __user *)(unsigned long) tmp.oldval,
1059 oldlenp,
1060 (void __user *)(unsigned long) tmp.newval,
1061 tmp.newlen);
1062 unlock_kernel();
1063 if (oldlenp) {
1064 if (!error) {
1065 if (get_user(oldlen, (size_t __user *)addr) ||
1066 put_user(oldlen, (u32 __user *)(unsigned long) tmp.oldlenp))
1067 error = -EFAULT;
1068 }
1069 if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
1070 error = -EFAULT;
1071 }
1072 return error;
1073#endif
1074}
1075
1076long sys32_lookup_dcookie(unsigned long cookie_high,
1077 unsigned long cookie_low,
1078 char __user *buf, size_t len)
1079{
1080 return sys_lookup_dcookie((cookie_high << 32) | cookie_low,
1081 buf, len);
1082}
1083
1084extern asmlinkage long
1085sys_timer_create(clockid_t which_clock,
1086 struct sigevent __user *timer_event_spec,
1087 timer_t __user *created_timer_id);
1088
1089long
1090sys32_timer_create(u32 clock, struct compat_sigevent __user *se32,
1091 timer_t __user *timer_id)
1092{
1093 struct sigevent se;
1094 mm_segment_t oldfs;
1095 timer_t t;
1096 long err;
1097
1098 if (se32 == NULL)
1099 return sys_timer_create(clock, NULL, timer_id);
1100
1101 if (get_compat_sigevent(&se, se32))
1102 return -EFAULT;
1103
1104 if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
1105 return -EFAULT;
1106
1107 oldfs = get_fs();
1108 set_fs(KERNEL_DS);
1109 err = sys_timer_create(clock,
1110 (struct sigevent __user *) &se,
1111 (timer_t __user *) &t);
1112 set_fs(oldfs);
1113
1114 if (!err)
1115 err = __put_user (t, timer_id);
1116
1117 return err;
1118}
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
new file mode 100644
index 000000000000..d0592ed54ea5
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -0,0 +1,1343 @@
1/* $Id: sys_sunos32.c,v 1.64 2002/02/09 19:49:31 davem Exp $
2 * sys_sunos32.c: SunOS binary compatibility layer on sparc64.
3 *
4 * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 *
7 * Based upon preliminary work which is:
8 *
9 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
10 */
11
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/types.h>
15#include <linux/compat.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/resource.h>
22#include <linux/ipc.h>
23#include <linux/shm.h>
24#include <linux/msg.h>
25#include <linux/sem.h>
26#include <linux/signal.h>
27#include <linux/uio.h>
28#include <linux/utsname.h>
29#include <linux/major.h>
30#include <linux/stat.h>
31#include <linux/slab.h>
32#include <linux/pagemap.h>
33#include <linux/errno.h>
34#include <linux/smp.h>
35#include <linux/smp_lock.h>
36#include <linux/syscalls.h>
37
38#include <asm/uaccess.h>
39#include <asm/page.h>
40#include <asm/pgtable.h>
41#include <asm/pconf.h>
42#include <asm/idprom.h> /* for gethostid() */
43#include <asm/unistd.h>
44#include <asm/system.h>
45
46/* For the nfs mount emulation */
47#include <linux/socket.h>
48#include <linux/in.h>
49#include <linux/nfs.h>
50#include <linux/nfs2.h>
51#include <linux/nfs_mount.h>
52
53/* for sunos_select */
54#include <linux/time.h>
55#include <linux/personality.h>
56
57/* For SOCKET_I */
58#include <linux/socket.h>
59#include <net/sock.h>
60#include <net/compat.h>
61
62#define SUNOS_NR_OPEN 256
63
64asmlinkage u32 sunos_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u32 off)
65{
66 struct file *file = NULL;
67 unsigned long retval, ret_type;
68
69 if (flags & MAP_NORESERVE) {
70 static int cnt;
71 if (cnt++ < 10)
72 printk("%s: unimplemented SunOS MAP_NORESERVE mmap() flag\n",
73 current->comm);
74 flags &= ~MAP_NORESERVE;
75 }
76 retval = -EBADF;
77 if (!(flags & MAP_ANONYMOUS)) {
78 struct inode * inode;
79 if (fd >= SUNOS_NR_OPEN)
80 goto out;
81 file = fget(fd);
82 if (!file)
83 goto out;
84 inode = file->f_dentry->d_inode;
85 if (imajor(inode) == MEM_MAJOR && iminor(inode) == 5) {
86 flags |= MAP_ANONYMOUS;
87 fput(file);
88 file = NULL;
89 }
90 }
91
92 retval = -EINVAL;
93 if (!(flags & MAP_FIXED))
94 addr = 0;
95 else if (len > 0xf0000000 || addr > 0xf0000000 - len)
96 goto out_putf;
97 ret_type = flags & _MAP_NEW;
98 flags &= ~_MAP_NEW;
99
100 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
101 down_write(&current->mm->mmap_sem);
102 retval = do_mmap(file,
103 (unsigned long) addr, (unsigned long) len,
104 (unsigned long) prot, (unsigned long) flags,
105 (unsigned long) off);
106 up_write(&current->mm->mmap_sem);
107 if (!ret_type)
108 retval = ((retval < 0xf0000000) ? 0 : retval);
109out_putf:
110 if (file)
111 fput(file);
112out:
113 return (u32) retval;
114}
115
116asmlinkage int sunos_mctl(u32 addr, u32 len, int function, u32 arg)
117{
118 return 0;
119}
120
121asmlinkage int sunos_brk(u32 baddr)
122{
123 int freepages, retval = -ENOMEM;
124 unsigned long rlim;
125 unsigned long newbrk, oldbrk, brk = (unsigned long) baddr;
126
127 down_write(&current->mm->mmap_sem);
128 if (brk < current->mm->end_code)
129 goto out;
130 newbrk = PAGE_ALIGN(brk);
131 oldbrk = PAGE_ALIGN(current->mm->brk);
132 retval = 0;
133 if (oldbrk == newbrk) {
134 current->mm->brk = brk;
135 goto out;
136 }
137 /* Always allow shrinking brk. */
138 if (brk <= current->mm->brk) {
139 current->mm->brk = brk;
140 do_munmap(current->mm, newbrk, oldbrk-newbrk);
141 goto out;
142 }
143 /* Check against rlimit and stack.. */
144 retval = -ENOMEM;
145 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
146 if (rlim >= RLIM_INFINITY)
147 rlim = ~0;
148 if (brk - current->mm->end_code > rlim)
149 goto out;
150 /* Check against existing mmap mappings. */
151 if (find_vma_intersection(current->mm, oldbrk, newbrk+PAGE_SIZE))
152 goto out;
153 /* stupid algorithm to decide if we have enough memory: while
154 * simple, it hopefully works in most obvious cases.. Easy to
155 * fool it, but this should catch most mistakes.
156 */
157 freepages = get_page_cache_size();
158 freepages >>= 1;
159 freepages += nr_free_pages();
160 freepages += nr_swap_pages;
161 freepages -= num_physpages >> 4;
162 freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
163 if (freepages < 0)
164 goto out;
165 /* Ok, we have probably got enough memory - let it rip. */
166 current->mm->brk = brk;
167 do_brk(oldbrk, newbrk-oldbrk);
168 retval = 0;
169out:
170 up_write(&current->mm->mmap_sem);
171 return retval;
172}
173
174asmlinkage u32 sunos_sbrk(int increment)
175{
176 int error, oldbrk;
177
178 /* This should do it hopefully... */
179 oldbrk = (int)current->mm->brk;
180 error = sunos_brk(((int) current->mm->brk) + increment);
181 if (!error)
182 error = oldbrk;
183 return error;
184}
185
186asmlinkage u32 sunos_sstk(int increment)
187{
188 printk("%s: Call to sunos_sstk(increment<%d>) is unsupported\n",
189 current->comm, increment);
190
191 return (u32)-1;
192}
193
194/* Give hints to the kernel as to what paging strategy to use...
195 * Completely bogus, don't remind me.
196 */
197#define VA_NORMAL 0 /* Normal vm usage expected */
198#define VA_ABNORMAL 1 /* Abnormal/random vm usage probable */
199#define VA_SEQUENTIAL 2 /* Accesses will be of a sequential nature */
200#define VA_INVALIDATE 3 /* Page table entries should be flushed ??? */
201static char *vstrings[] = {
202 "VA_NORMAL",
203 "VA_ABNORMAL",
204 "VA_SEQUENTIAL",
205 "VA_INVALIDATE",
206};
207
208asmlinkage void sunos_vadvise(u32 strategy)
209{
210 static int count;
211
212 /* I wanna see who uses this... */
213 if (count++ < 5)
214 printk("%s: Advises us to use %s paging strategy\n",
215 current->comm,
216 strategy <= 3 ? vstrings[strategy] : "BOGUS");
217}
218
219/* This just wants the soft limit (ie. rlim_cur element) of the RLIMIT_NOFILE
220 * resource limit and is for backwards compatibility with older sunos
221 * revs.
222 */
223asmlinkage int sunos_getdtablesize(void)
224{
225 return SUNOS_NR_OPEN;
226}
227
228
229#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
230
231asmlinkage u32 sunos_sigblock(u32 blk_mask)
232{
233 u32 old;
234
235 spin_lock_irq(&current->sighand->siglock);
236 old = (u32) current->blocked.sig[0];
237 current->blocked.sig[0] |= (blk_mask & _BLOCKABLE);
238 recalc_sigpending();
239 spin_unlock_irq(&current->sighand->siglock);
240 return old;
241}
242
243asmlinkage u32 sunos_sigsetmask(u32 newmask)
244{
245 u32 retval;
246
247 spin_lock_irq(&current->sighand->siglock);
248 retval = (u32) current->blocked.sig[0];
249 current->blocked.sig[0] = (newmask & _BLOCKABLE);
250 recalc_sigpending();
251 spin_unlock_irq(&current->sighand->siglock);
252 return retval;
253}
254
255/* SunOS getdents is very similar to the newer Linux (iBCS2 compliant) */
256/* getdents system call, the format of the structure just has a different */
257/* layout (d_off+d_ino instead of d_ino+d_off) */
258struct sunos_dirent {
259 s32 d_off;
260 u32 d_ino;
261 u16 d_reclen;
262 u16 d_namlen;
263 char d_name[1];
264};
265
266struct sunos_dirent_callback {
267 struct sunos_dirent __user *curr;
268 struct sunos_dirent __user *previous;
269 int count;
270 int error;
271};
272
273#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
274#define ROUND_UP(x) (((x)+sizeof(s32)-1) & ~(sizeof(s32)-1))
275
276static int sunos_filldir(void * __buf, const char * name, int namlen,
277 loff_t offset, ino_t ino, unsigned int d_type)
278{
279 struct sunos_dirent __user *dirent;
280 struct sunos_dirent_callback * buf = (struct sunos_dirent_callback *) __buf;
281 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
282
283 buf->error = -EINVAL; /* only used if we fail.. */
284 if (reclen > buf->count)
285 return -EINVAL;
286 dirent = buf->previous;
287 if (dirent)
288 put_user(offset, &dirent->d_off);
289 dirent = buf->curr;
290 buf->previous = dirent;
291 put_user(ino, &dirent->d_ino);
292 put_user(namlen, &dirent->d_namlen);
293 put_user(reclen, &dirent->d_reclen);
294 if (copy_to_user(dirent->d_name, name, namlen))
295 return -EFAULT;
296 put_user(0, dirent->d_name + namlen);
297 dirent = (void __user *) dirent + reclen;
298 buf->curr = dirent;
299 buf->count -= reclen;
300 return 0;
301}
302
303asmlinkage int sunos_getdents(unsigned int fd, void __user *dirent, int cnt)
304{
305 struct file * file;
306 struct sunos_dirent __user *lastdirent;
307 struct sunos_dirent_callback buf;
308 int error = -EBADF;
309
310 if (fd >= SUNOS_NR_OPEN)
311 goto out;
312
313 file = fget(fd);
314 if (!file)
315 goto out;
316
317 error = -EINVAL;
318 if (cnt < (sizeof(struct sunos_dirent) + 255))
319 goto out_putf;
320
321 buf.curr = (struct sunos_dirent __user *) dirent;
322 buf.previous = NULL;
323 buf.count = cnt;
324 buf.error = 0;
325
326 error = vfs_readdir(file, sunos_filldir, &buf);
327 if (error < 0)
328 goto out_putf;
329
330 lastdirent = buf.previous;
331 error = buf.error;
332 if (lastdirent) {
333 put_user(file->f_pos, &lastdirent->d_off);
334 error = cnt - buf.count;
335 }
336
337out_putf:
338 fput(file);
339out:
340 return error;
341}
342
343/* Old sunos getdirentries, severely broken compatibility stuff here. */
344struct sunos_direntry {
345 u32 d_ino;
346 u16 d_reclen;
347 u16 d_namlen;
348 char d_name[1];
349};
350
351struct sunos_direntry_callback {
352 struct sunos_direntry __user *curr;
353 struct sunos_direntry __user *previous;
354 int count;
355 int error;
356};
357
358static int sunos_filldirentry(void * __buf, const char * name, int namlen,
359 loff_t offset, ino_t ino, unsigned int d_type)
360{
361 struct sunos_direntry __user *dirent;
362 struct sunos_direntry_callback * buf =
363 (struct sunos_direntry_callback *) __buf;
364 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
365
366 buf->error = -EINVAL; /* only used if we fail.. */
367 if (reclen > buf->count)
368 return -EINVAL;
369 dirent = buf->previous;
370 dirent = buf->curr;
371 buf->previous = dirent;
372 put_user(ino, &dirent->d_ino);
373 put_user(namlen, &dirent->d_namlen);
374 put_user(reclen, &dirent->d_reclen);
375 if (copy_to_user(dirent->d_name, name, namlen))
376 return -EFAULT;
377 put_user(0, dirent->d_name + namlen);
378 dirent = (void __user *) dirent + reclen;
379 buf->curr = dirent;
380 buf->count -= reclen;
381 return 0;
382}
383
384asmlinkage int sunos_getdirentries(unsigned int fd,
385 void __user *dirent,
386 int cnt,
387 unsigned int __user *basep)
388{
389 struct file * file;
390 struct sunos_direntry __user *lastdirent;
391 int error = -EBADF;
392 struct sunos_direntry_callback buf;
393
394 if (fd >= SUNOS_NR_OPEN)
395 goto out;
396
397 file = fget(fd);
398 if (!file)
399 goto out;
400
401 error = -EINVAL;
402 if (cnt < (sizeof(struct sunos_direntry) + 255))
403 goto out_putf;
404
405 buf.curr = (struct sunos_direntry __user *) dirent;
406 buf.previous = NULL;
407 buf.count = cnt;
408 buf.error = 0;
409
410 error = vfs_readdir(file, sunos_filldirentry, &buf);
411 if (error < 0)
412 goto out_putf;
413
414 lastdirent = buf.previous;
415 error = buf.error;
416 if (lastdirent) {
417 put_user(file->f_pos, basep);
418 error = cnt - buf.count;
419 }
420
421out_putf:
422 fput(file);
423out:
424 return error;
425}
426
427struct sunos_utsname {
428 char sname[9];
429 char nname[9];
430 char nnext[56];
431 char rel[9];
432 char ver[9];
433 char mach[9];
434};
435
436asmlinkage int sunos_uname(struct sunos_utsname __user *name)
437{
438 int ret;
439
440 down_read(&uts_sem);
441 ret = copy_to_user(&name->sname[0], &system_utsname.sysname[0],
442 sizeof(name->sname) - 1);
443 ret |= copy_to_user(&name->nname[0], &system_utsname.nodename[0],
444 sizeof(name->nname) - 1);
445 ret |= put_user('\0', &name->nname[8]);
446 ret |= copy_to_user(&name->rel[0], &system_utsname.release[0],
447 sizeof(name->rel) - 1);
448 ret |= copy_to_user(&name->ver[0], &system_utsname.version[0],
449 sizeof(name->ver) - 1);
450 ret |= copy_to_user(&name->mach[0], &system_utsname.machine[0],
451 sizeof(name->mach) - 1);
452 up_read(&uts_sem);
453 return (ret ? -EFAULT : 0);
454}
455
456asmlinkage int sunos_nosys(void)
457{
458 struct pt_regs *regs;
459 siginfo_t info;
460 static int cnt;
461
462 regs = current_thread_info()->kregs;
463 if (test_thread_flag(TIF_32BIT)) {
464 regs->tpc &= 0xffffffff;
465 regs->tnpc &= 0xffffffff;
466 }
467 info.si_signo = SIGSYS;
468 info.si_errno = 0;
469 info.si_code = __SI_FAULT|0x100;
470 info.si_addr = (void __user *)regs->tpc;
471 info.si_trapno = regs->u_regs[UREG_G1];
472 send_sig_info(SIGSYS, &info, current);
473 if (cnt++ < 4) {
474 printk("Process makes ni_syscall number %d, register dump:\n",
475 (int) regs->u_regs[UREG_G1]);
476 show_regs(regs);
477 }
478 return -ENOSYS;
479}
480
481/* This is not a real and complete implementation yet, just to keep
482 * the easy SunOS binaries happy.
483 */
484asmlinkage int sunos_fpathconf(int fd, int name)
485{
486 int ret;
487
488 switch(name) {
489 case _PCONF_LINK:
490 ret = LINK_MAX;
491 break;
492 case _PCONF_CANON:
493 ret = MAX_CANON;
494 break;
495 case _PCONF_INPUT:
496 ret = MAX_INPUT;
497 break;
498 case _PCONF_NAME:
499 ret = NAME_MAX;
500 break;
501 case _PCONF_PATH:
502 ret = PATH_MAX;
503 break;
504 case _PCONF_PIPE:
505 ret = PIPE_BUF;
506 break;
507 case _PCONF_CHRESTRICT: /* XXX Investigate XXX */
508 ret = 1;
509 break;
510 case _PCONF_NOTRUNC: /* XXX Investigate XXX */
511 case _PCONF_VDISABLE:
512 ret = 0;
513 break;
514 default:
515 ret = -EINVAL;
516 break;
517 }
518 return ret;
519}
520
521asmlinkage int sunos_pathconf(u32 u_path, int name)
522{
523 int ret;
524
525 ret = sunos_fpathconf(0, name); /* XXX cheese XXX */
526 return ret;
527}
528
529asmlinkage int sunos_select(int width, u32 inp, u32 outp, u32 exp, u32 tvp_x)
530{
531 int ret;
532
533 /* SunOS binaries expect that select won't change the tvp contents */
534 ret = compat_sys_select(width, compat_ptr(inp), compat_ptr(outp),
535 compat_ptr(exp), compat_ptr(tvp_x));
536 if (ret == -EINTR && tvp_x) {
537 struct compat_timeval __user *tvp = compat_ptr(tvp_x);
538 time_t sec, usec;
539
540 __get_user(sec, &tvp->tv_sec);
541 __get_user(usec, &tvp->tv_usec);
542 if (sec == 0 && usec == 0)
543 ret = 0;
544 }
545 return ret;
546}
547
548asmlinkage void sunos_nop(void)
549{
550 return;
551}
552
553#if 0 /* This code doesn't translate user pointers correctly,
554 * disable for now. -DaveM
555 */
556
557/* XXXXXXXXXX SunOS mount/umount. XXXXXXXXXXX */
558#define SMNT_RDONLY 1
559#define SMNT_NOSUID 2
560#define SMNT_NEWTYPE 4
561#define SMNT_GRPID 8
562#define SMNT_REMOUNT 16
563#define SMNT_NOSUB 32
564#define SMNT_MULTI 64
565#define SMNT_SYS5 128
566
567struct sunos_fh_t {
568 char fh_data [NFS_FHSIZE];
569};
570
571struct sunos_nfs_mount_args {
572 struct sockaddr_in *addr; /* file server address */
573 struct nfs_fh *fh; /* File handle to be mounted */
574 int flags; /* flags */
575 int wsize; /* write size in bytes */
576 int rsize; /* read size in bytes */
577 int timeo; /* initial timeout in .1 secs */
578 int retrans; /* times to retry send */
579 char *hostname; /* server's hostname */
580 int acregmin; /* attr cache file min secs */
581 int acregmax; /* attr cache file max secs */
582 int acdirmin; /* attr cache dir min secs */
583 int acdirmax; /* attr cache dir max secs */
584 char *netname; /* server's netname */
585};
586
587
588/* Bind the socket on a local reserved port and connect it to the
589 * remote server. This on Linux/i386 is done by the mount program,
590 * not by the kernel.
591 */
592/* XXXXXXXXXXXXXXXXXXXX */
593static int
594sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
595{
596 struct sockaddr_in local;
597 struct sockaddr_in server;
598 int try_port;
599 int ret;
600 struct socket *socket;
601 struct inode *inode;
602 struct file *file;
603
604 file = fget(fd);
605 if (!file)
606 return 0;
607
608 inode = file->f_dentry->d_inode;
609
610 socket = SOCKET_I(inode);
611 local.sin_family = AF_INET;
612 local.sin_addr.s_addr = INADDR_ANY;
613
614 /* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
615 try_port = 1024;
616 do {
617 local.sin_port = htons (--try_port);
618 ret = socket->ops->bind(socket, (struct sockaddr*)&local,
619 sizeof(local));
620 } while (ret && try_port > (1024 / 2));
621
622 if (ret) {
623 fput(file);
624 return 0;
625 }
626
627 server.sin_family = AF_INET;
628 server.sin_addr = addr->sin_addr;
629 server.sin_port = NFS_PORT;
630
631 /* Call sys_connect */
632 ret = socket->ops->connect (socket, (struct sockaddr *) &server,
633 sizeof (server), file->f_flags);
634 fput(file);
635 if (ret < 0)
636 return 0;
637 return 1;
638}
639
640/* XXXXXXXXXXXXXXXXXXXX */
641static int get_default (int value, int def_value)
642{
643 if (value)
644 return value;
645 else
646 return def_value;
647}
648
649/* XXXXXXXXXXXXXXXXXXXX */
650static int sunos_nfs_mount(char *dir_name, int linux_flags, void __user *data)
651{
652 int server_fd, err;
653 char *the_name, *mount_page;
654 struct nfs_mount_data linux_nfs_mount;
655 struct sunos_nfs_mount_args sunos_mount;
656
657 /* Ok, here comes the fun part: Linux's nfs mount needs a
658 * socket connection to the server, but SunOS mount does not
659 * require this, so we use the information on the destination
660 * address to create a socket and bind it to a reserved
661 * port on this system
662 */
663 if (copy_from_user(&sunos_mount, data, sizeof(sunos_mount)))
664 return -EFAULT;
665
666 server_fd = sys_socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP);
667 if (server_fd < 0)
668 return -ENXIO;
669
670 if (copy_from_user(&linux_nfs_mount.addr, sunos_mount.addr,
671 sizeof(*sunos_mount.addr)) ||
672 copy_from_user(&linux_nfs_mount.root, sunos_mount.fh,
673 sizeof(*sunos_mount.fh))) {
674 sys_close (server_fd);
675 return -EFAULT;
676 }
677
678 if (!sunos_nfs_get_server_fd (server_fd, &linux_nfs_mount.addr)){
679 sys_close (server_fd);
680 return -ENXIO;
681 }
682
683 /* Now, bind it to a locally reserved port */
684 linux_nfs_mount.version = NFS_MOUNT_VERSION;
685 linux_nfs_mount.flags = sunos_mount.flags;
686 linux_nfs_mount.fd = server_fd;
687
688 linux_nfs_mount.rsize = get_default (sunos_mount.rsize, 8192);
689 linux_nfs_mount.wsize = get_default (sunos_mount.wsize, 8192);
690 linux_nfs_mount.timeo = get_default (sunos_mount.timeo, 10);
691 linux_nfs_mount.retrans = sunos_mount.retrans;
692
693 linux_nfs_mount.acregmin = sunos_mount.acregmin;
694 linux_nfs_mount.acregmax = sunos_mount.acregmax;
695 linux_nfs_mount.acdirmin = sunos_mount.acdirmin;
696 linux_nfs_mount.acdirmax = sunos_mount.acdirmax;
697
698 the_name = getname(sunos_mount.hostname);
699 if (IS_ERR(the_name))
700 return PTR_ERR(the_name);
701
702 strlcpy(linux_nfs_mount.hostname, the_name,
703 sizeof(linux_nfs_mount.hostname));
704 putname (the_name);
705
706 mount_page = (char *) get_zeroed_page(GFP_KERNEL);
707 if (!mount_page)
708 return -ENOMEM;
709
710 memcpy(mount_page, &linux_nfs_mount, sizeof(linux_nfs_mount));
711
712 err = do_mount("", dir_name, "nfs", linux_flags, mount_page);
713
714 free_page((unsigned long) mount_page);
715 return err;
716}
717
718/* XXXXXXXXXXXXXXXXXXXX */
719asmlinkage int
720sunos_mount(char *type, char *dir, int flags, void *data)
721{
722 int linux_flags = 0;
723 int ret = -EINVAL;
724 char *dev_fname = 0;
725 char *dir_page, *type_page;
726
727 if (!capable (CAP_SYS_ADMIN))
728 return -EPERM;
729
730 /* We don't handle the integer fs type */
731 if ((flags & SMNT_NEWTYPE) == 0)
732 goto out;
733
734 /* Do not allow for those flags we don't support */
735 if (flags & (SMNT_GRPID|SMNT_NOSUB|SMNT_MULTI|SMNT_SYS5))
736 goto out;
737
738 if (flags & SMNT_REMOUNT)
739 linux_flags |= MS_REMOUNT;
740 if (flags & SMNT_RDONLY)
741 linux_flags |= MS_RDONLY;
742 if (flags & SMNT_NOSUID)
743 linux_flags |= MS_NOSUID;
744
745 dir_page = getname(dir);
746 ret = PTR_ERR(dir_page);
747 if (IS_ERR(dir_page))
748 goto out;
749
750 type_page = getname(type);
751 ret = PTR_ERR(type_page);
752 if (IS_ERR(type_page))
753 goto out1;
754
755 if (strcmp(type_page, "ext2") == 0) {
756 dev_fname = getname(data);
757 } else if (strcmp(type_page, "iso9660") == 0) {
758 dev_fname = getname(data);
759 } else if (strcmp(type_page, "minix") == 0) {
760 dev_fname = getname(data);
761 } else if (strcmp(type_page, "nfs") == 0) {
762 ret = sunos_nfs_mount (dir_page, flags, data);
763 goto out2;
764 } else if (strcmp(type_page, "ufs") == 0) {
765 printk("Warning: UFS filesystem mounts unsupported.\n");
766 ret = -ENODEV;
767 goto out2;
768 } else if (strcmp(type_page, "proc")) {
769 ret = -ENODEV;
770 goto out2;
771 }
772 ret = PTR_ERR(dev_fname);
773 if (IS_ERR(dev_fname))
774 goto out2;
775 lock_kernel();
776 ret = do_mount(dev_fname, dir_page, type_page, linux_flags, NULL);
777 unlock_kernel();
778 if (dev_fname)
779 putname(dev_fname);
780out2:
781 putname(type_page);
782out1:
783 putname(dir_page);
784out:
785 return ret;
786}
787#endif
788
789asmlinkage int sunos_setpgrp(pid_t pid, pid_t pgid)
790{
791 int ret;
792
793 /* So stupid... */
794 if ((!pid || pid == current->pid) &&
795 !pgid) {
796 sys_setsid();
797 ret = 0;
798 } else {
799 ret = sys_setpgid(pid, pgid);
800 }
801 return ret;
802}
803
804/* So stupid... */
805extern long compat_sys_wait4(compat_pid_t, compat_uint_t __user *, int,
806 struct compat_rusage __user *);
807
808asmlinkage int sunos_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, struct compat_rusage __user *ru)
809{
810 int ret;
811
812 ret = compat_sys_wait4((pid ? pid : ((compat_pid_t)-1)),
813 stat_addr, options, ru);
814 return ret;
815}
816
817extern int kill_pg(int, int, int);
818asmlinkage int sunos_killpg(int pgrp, int sig)
819{
820 return kill_pg(pgrp, sig, 0);
821}
822
823asmlinkage int sunos_audit(void)
824{
825 printk ("sys_audit\n");
826 return -1;
827}
828
829asmlinkage u32 sunos_gethostid(void)
830{
831 u32 ret;
832
833 ret = (((u32)idprom->id_machtype << 24) | ((u32)idprom->id_sernum));
834
835 return ret;
836}
837
838/* sysconf options, for SunOS compatibility */
839#define _SC_ARG_MAX 1
840#define _SC_CHILD_MAX 2
841#define _SC_CLK_TCK 3
842#define _SC_NGROUPS_MAX 4
843#define _SC_OPEN_MAX 5
844#define _SC_JOB_CONTROL 6
845#define _SC_SAVED_IDS 7
846#define _SC_VERSION 8
847
848asmlinkage s32 sunos_sysconf (int name)
849{
850 s32 ret;
851
852 switch (name){
853 case _SC_ARG_MAX:
854 ret = ARG_MAX;
855 break;
856 case _SC_CHILD_MAX:
857 ret = CHILD_MAX;
858 break;
859 case _SC_CLK_TCK:
860 ret = HZ;
861 break;
862 case _SC_NGROUPS_MAX:
863 ret = NGROUPS_MAX;
864 break;
865 case _SC_OPEN_MAX:
866 ret = OPEN_MAX;
867 break;
868 case _SC_JOB_CONTROL:
869 ret = 1; /* yes, we do support job control */
870 break;
871 case _SC_SAVED_IDS:
872 ret = 1; /* yes, we do support saved uids */
873 break;
874 case _SC_VERSION:
875 /* mhm, POSIX_VERSION is in /usr/include/unistd.h
876 * should it go on /usr/include/linux?
877 */
878 ret = 199009;
879 break;
880 default:
881 ret = -1;
882 break;
883 };
884 return ret;
885}
886
887asmlinkage int sunos_semsys(int op, u32 arg1, u32 arg2, u32 arg3, void __user *ptr)
888{
889 union semun arg4;
890 int ret;
891
892 switch (op) {
893 case 0:
894 /* Most arguments match on a 1:1 basis but cmd doesn't */
895 switch(arg3) {
896 case 4:
897 arg3=GETPID; break;
898 case 5:
899 arg3=GETVAL; break;
900 case 6:
901 arg3=GETALL; break;
902 case 3:
903 arg3=GETNCNT; break;
904 case 7:
905 arg3=GETZCNT; break;
906 case 8:
907 arg3=SETVAL; break;
908 case 9:
909 arg3=SETALL; break;
910 }
911 /* sys_semctl(): */
912 /* value to modify semaphore to */
913 arg4.__pad = ptr;
914 ret = sys_semctl((int)arg1, (int)arg2, (int)arg3, arg4);
915 break;
916 case 1:
917 /* sys_semget(): */
918 ret = sys_semget((key_t)arg1, (int)arg2, (int)arg3);
919 break;
920 case 2:
921 /* sys_semop(): */
922 ret = sys_semop((int)arg1, (struct sembuf __user *)(unsigned long)arg2,
923 (unsigned int) arg3);
924 break;
925 default:
926 ret = -EINVAL;
927 break;
928 };
929 return ret;
930}
931
932struct msgbuf32 {
933 s32 mtype;
934 char mtext[1];
935};
936
937struct ipc_perm32
938{
939 key_t key;
940 compat_uid_t uid;
941 compat_gid_t gid;
942 compat_uid_t cuid;
943 compat_gid_t cgid;
944 compat_mode_t mode;
945 unsigned short seq;
946};
947
948struct msqid_ds32
949{
950 struct ipc_perm32 msg_perm;
951 u32 msg_first;
952 u32 msg_last;
953 compat_time_t msg_stime;
954 compat_time_t msg_rtime;
955 compat_time_t msg_ctime;
956 u32 wwait;
957 u32 rwait;
958 unsigned short msg_cbytes;
959 unsigned short msg_qnum;
960 unsigned short msg_qbytes;
961 compat_ipc_pid_t msg_lspid;
962 compat_ipc_pid_t msg_lrpid;
963};
964
965static inline int sunos_msqid_get(struct msqid_ds32 __user *user,
966 struct msqid_ds *kern)
967{
968 if (get_user(kern->msg_perm.key, &user->msg_perm.key) ||
969 __get_user(kern->msg_perm.uid, &user->msg_perm.uid) ||
970 __get_user(kern->msg_perm.gid, &user->msg_perm.gid) ||
971 __get_user(kern->msg_perm.cuid, &user->msg_perm.cuid) ||
972 __get_user(kern->msg_perm.cgid, &user->msg_perm.cgid) ||
973 __get_user(kern->msg_stime, &user->msg_stime) ||
974 __get_user(kern->msg_rtime, &user->msg_rtime) ||
975 __get_user(kern->msg_ctime, &user->msg_ctime) ||
976 __get_user(kern->msg_ctime, &user->msg_cbytes) ||
977 __get_user(kern->msg_ctime, &user->msg_qnum) ||
978 __get_user(kern->msg_ctime, &user->msg_qbytes) ||
979 __get_user(kern->msg_ctime, &user->msg_lspid) ||
980 __get_user(kern->msg_ctime, &user->msg_lrpid))
981 return -EFAULT;
982 return 0;
983}
984
985static inline int sunos_msqid_put(struct msqid_ds32 __user *user,
986 struct msqid_ds *kern)
987{
988 if (put_user(kern->msg_perm.key, &user->msg_perm.key) ||
989 __put_user(kern->msg_perm.uid, &user->msg_perm.uid) ||
990 __put_user(kern->msg_perm.gid, &user->msg_perm.gid) ||
991 __put_user(kern->msg_perm.cuid, &user->msg_perm.cuid) ||
992 __put_user(kern->msg_perm.cgid, &user->msg_perm.cgid) ||
993 __put_user(kern->msg_stime, &user->msg_stime) ||
994 __put_user(kern->msg_rtime, &user->msg_rtime) ||
995 __put_user(kern->msg_ctime, &user->msg_ctime) ||
996 __put_user(kern->msg_ctime, &user->msg_cbytes) ||
997 __put_user(kern->msg_ctime, &user->msg_qnum) ||
998 __put_user(kern->msg_ctime, &user->msg_qbytes) ||
999 __put_user(kern->msg_ctime, &user->msg_lspid) ||
1000 __put_user(kern->msg_ctime, &user->msg_lrpid))
1001 return -EFAULT;
1002 return 0;
1003}
1004
1005static inline int sunos_msgbuf_get(struct msgbuf32 __user *user, struct msgbuf *kern, int len)
1006{
1007 if (get_user(kern->mtype, &user->mtype) ||
1008 __copy_from_user(kern->mtext, &user->mtext, len))
1009 return -EFAULT;
1010 return 0;
1011}
1012
1013static inline int sunos_msgbuf_put(struct msgbuf32 __user *user, struct msgbuf *kern, int len)
1014{
1015 if (put_user(kern->mtype, &user->mtype) ||
1016 __copy_to_user(user->mtext, kern->mtext, len))
1017 return -EFAULT;
1018 return 0;
1019}
1020
1021asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
1022{
1023 struct sparc_stackf32 __user *sp;
1024 struct msqid_ds kds;
1025 struct msgbuf *kmbuf;
1026 mm_segment_t old_fs = get_fs();
1027 u32 arg5;
1028 int rval;
1029
1030 switch(op) {
1031 case 0:
1032 rval = sys_msgget((key_t)arg1, (int)arg2);
1033 break;
1034 case 1:
1035 if (!sunos_msqid_get((struct msqid_ds32 __user *)(unsigned long)arg3, &kds)) {
1036 set_fs(KERNEL_DS);
1037 rval = sys_msgctl((int)arg1, (int)arg2,
1038 (struct msqid_ds __user *)(unsigned long)arg3);
1039 set_fs(old_fs);
1040 if (!rval)
1041 rval = sunos_msqid_put((struct msqid_ds32 __user *)(unsigned long)arg3,
1042 &kds);
1043 } else
1044 rval = -EFAULT;
1045 break;
1046 case 2:
1047 rval = -EFAULT;
1048 kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3,
1049 GFP_KERNEL);
1050 if (!kmbuf)
1051 break;
1052 sp = (struct sparc_stackf32 __user *)
1053 (current_thread_info()->kregs->u_regs[UREG_FP] & 0xffffffffUL);
1054 if (get_user(arg5, &sp->xxargs[0])) {
1055 rval = -EFAULT;
1056 kfree(kmbuf);
1057 break;
1058 }
1059 set_fs(KERNEL_DS);
1060 rval = sys_msgrcv((int)arg1, (struct msgbuf __user *) kmbuf,
1061 (size_t)arg3,
1062 (long)arg4, (int)arg5);
1063 set_fs(old_fs);
1064 if (!rval)
1065 rval = sunos_msgbuf_put((struct msgbuf32 __user *)(unsigned long)arg2,
1066 kmbuf, arg3);
1067 kfree(kmbuf);
1068 break;
1069 case 3:
1070 rval = -EFAULT;
1071 kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3,
1072 GFP_KERNEL);
1073 if (!kmbuf || sunos_msgbuf_get((struct msgbuf32 __user *)(unsigned long)arg2,
1074 kmbuf, arg3))
1075 break;
1076 set_fs(KERNEL_DS);
1077 rval = sys_msgsnd((int)arg1, (struct msgbuf __user *) kmbuf,
1078 (size_t)arg3, (int)arg4);
1079 set_fs(old_fs);
1080 kfree(kmbuf);
1081 break;
1082 default:
1083 rval = -EINVAL;
1084 break;
1085 }
1086 return rval;
1087}
1088
1089struct shmid_ds32 {
1090 struct ipc_perm32 shm_perm;
1091 int shm_segsz;
1092 compat_time_t shm_atime;
1093 compat_time_t shm_dtime;
1094 compat_time_t shm_ctime;
1095 compat_ipc_pid_t shm_cpid;
1096 compat_ipc_pid_t shm_lpid;
1097 unsigned short shm_nattch;
1098};
1099
1100static inline int sunos_shmid_get(struct shmid_ds32 __user *user,
1101 struct shmid_ds *kern)
1102{
1103 if (get_user(kern->shm_perm.key, &user->shm_perm.key) ||
1104 __get_user(kern->shm_perm.uid, &user->shm_perm.uid) ||
1105 __get_user(kern->shm_perm.gid, &user->shm_perm.gid) ||
1106 __get_user(kern->shm_perm.cuid, &user->shm_perm.cuid) ||
1107 __get_user(kern->shm_perm.cgid, &user->shm_perm.cgid) ||
1108 __get_user(kern->shm_segsz, &user->shm_segsz) ||
1109 __get_user(kern->shm_atime, &user->shm_atime) ||
1110 __get_user(kern->shm_dtime, &user->shm_dtime) ||
1111 __get_user(kern->shm_ctime, &user->shm_ctime) ||
1112 __get_user(kern->shm_cpid, &user->shm_cpid) ||
1113 __get_user(kern->shm_lpid, &user->shm_lpid) ||
1114 __get_user(kern->shm_nattch, &user->shm_nattch))
1115 return -EFAULT;
1116 return 0;
1117}
1118
1119static inline int sunos_shmid_put(struct shmid_ds32 __user *user,
1120 struct shmid_ds *kern)
1121{
1122 if (put_user(kern->shm_perm.key, &user->shm_perm.key) ||
1123 __put_user(kern->shm_perm.uid, &user->shm_perm.uid) ||
1124 __put_user(kern->shm_perm.gid, &user->shm_perm.gid) ||
1125 __put_user(kern->shm_perm.cuid, &user->shm_perm.cuid) ||
1126 __put_user(kern->shm_perm.cgid, &user->shm_perm.cgid) ||
1127 __put_user(kern->shm_segsz, &user->shm_segsz) ||
1128 __put_user(kern->shm_atime, &user->shm_atime) ||
1129 __put_user(kern->shm_dtime, &user->shm_dtime) ||
1130 __put_user(kern->shm_ctime, &user->shm_ctime) ||
1131 __put_user(kern->shm_cpid, &user->shm_cpid) ||
1132 __put_user(kern->shm_lpid, &user->shm_lpid) ||
1133 __put_user(kern->shm_nattch, &user->shm_nattch))
1134 return -EFAULT;
1135 return 0;
1136}
1137
1138asmlinkage int sunos_shmsys(int op, u32 arg1, u32 arg2, u32 arg3)
1139{
1140 struct shmid_ds ksds;
1141 unsigned long raddr;
1142 mm_segment_t old_fs = get_fs();
1143 int rval;
1144
1145 switch(op) {
1146 case 0:
1147 /* do_shmat(): attach a shared memory area */
1148 rval = do_shmat((int)arg1,(char __user *)(unsigned long)arg2,(int)arg3,&raddr);
1149 if (!rval)
1150 rval = (int) raddr;
1151 break;
1152 case 1:
1153 /* sys_shmctl(): modify shared memory area attr. */
1154 if (!sunos_shmid_get((struct shmid_ds32 __user *)(unsigned long)arg3, &ksds)) {
1155 set_fs(KERNEL_DS);
1156 rval = sys_shmctl((int) arg1,(int) arg2,
1157 (struct shmid_ds __user *) &ksds);
1158 set_fs(old_fs);
1159 if (!rval)
1160 rval = sunos_shmid_put((struct shmid_ds32 __user *)(unsigned long)arg3,
1161 &ksds);
1162 } else
1163 rval = -EFAULT;
1164 break;
1165 case 2:
1166 /* sys_shmdt(): detach a shared memory area */
1167 rval = sys_shmdt((char __user *)(unsigned long)arg1);
1168 break;
1169 case 3:
1170 /* sys_shmget(): get a shared memory area */
1171 rval = sys_shmget((key_t)arg1,(int)arg2,(int)arg3);
1172 break;
1173 default:
1174 rval = -EINVAL;
1175 break;
1176 };
1177 return rval;
1178}
1179
1180extern asmlinkage long sparc32_open(const char __user * filename, int flags, int mode);
1181
1182asmlinkage int sunos_open(u32 fname, int flags, int mode)
1183{
1184 const char __user *filename = compat_ptr(fname);
1185
1186 return sparc32_open(filename, flags, mode);
1187}
1188
1189#define SUNOS_EWOULDBLOCK 35
1190
1191/* see the sunos man page read(2v) for an explanation
1192 of this garbage. We use O_NDELAY to mark
1193 file descriptors that have been set non-blocking
1194 using 4.2BSD style calls. (tridge) */
1195
1196static inline int check_nonblock(int ret, int fd)
1197{
1198 if (ret == -EAGAIN) {
1199 struct file * file = fget(fd);
1200 if (file) {
1201 if (file->f_flags & O_NDELAY)
1202 ret = -SUNOS_EWOULDBLOCK;
1203 fput(file);
1204 }
1205 }
1206 return ret;
1207}
1208
1209asmlinkage int sunos_read(unsigned int fd, char __user *buf, u32 count)
1210{
1211 int ret;
1212
1213 ret = check_nonblock(sys_read(fd, buf, count), fd);
1214 return ret;
1215}
1216
1217asmlinkage int sunos_readv(u32 fd, void __user *vector, s32 count)
1218{
1219 int ret;
1220
1221 ret = check_nonblock(compat_sys_readv(fd, vector, count), fd);
1222 return ret;
1223}
1224
1225asmlinkage int sunos_write(unsigned int fd, char __user *buf, u32 count)
1226{
1227 int ret;
1228
1229 ret = check_nonblock(sys_write(fd, buf, count), fd);
1230 return ret;
1231}
1232
1233asmlinkage int sunos_writev(u32 fd, void __user *vector, s32 count)
1234{
1235 int ret;
1236
1237 ret = check_nonblock(compat_sys_writev(fd, vector, count), fd);
1238 return ret;
1239}
1240
1241asmlinkage int sunos_recv(u32 __fd, void __user *ubuf, int size, unsigned flags)
1242{
1243 int ret, fd = (int) __fd;
1244
1245 ret = check_nonblock(sys_recv(fd, ubuf, size, flags), fd);
1246 return ret;
1247}
1248
1249asmlinkage int sunos_send(u32 __fd, void __user *buff, int len, unsigned flags)
1250{
1251 int ret, fd = (int) __fd;
1252
1253 ret = check_nonblock(sys_send(fd, buff, len, flags), fd);
1254 return ret;
1255}
1256
1257asmlinkage int sunos_accept(u32 __fd, struct sockaddr __user *sa, int __user *addrlen)
1258{
1259 int ret, fd = (int) __fd;
1260
1261 while (1) {
1262 ret = check_nonblock(sys_accept(fd, sa, addrlen), fd);
1263 if (ret != -ENETUNREACH && ret != -EHOSTUNREACH)
1264 break;
1265 }
1266 return ret;
1267}
1268
1269#define SUNOS_SV_INTERRUPT 2
1270
1271asmlinkage int sunos_sigaction (int sig,
1272 struct old_sigaction32 __user *act,
1273 struct old_sigaction32 __user *oact)
1274{
1275 struct k_sigaction new_ka, old_ka;
1276 int ret;
1277
1278 if (act) {
1279 compat_old_sigset_t mask;
1280 u32 u_handler;
1281
1282 if (get_user(u_handler, &act->sa_handler) ||
1283 __get_user(new_ka.sa.sa_flags, &act->sa_flags))
1284 return -EFAULT;
1285 new_ka.sa.sa_handler = compat_ptr(u_handler);
1286 __get_user(mask, &act->sa_mask);
1287 new_ka.sa.sa_restorer = NULL;
1288 new_ka.ka_restorer = NULL;
1289 siginitset(&new_ka.sa.sa_mask, mask);
1290 new_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
1291 }
1292
1293 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
1294
1295 if (!ret && oact) {
1296 old_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
1297 if (put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) ||
1298 __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
1299 return -EFAULT;
1300 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
1301 }
1302
1303 return ret;
1304}
1305
1306asmlinkage int sunos_setsockopt(u32 __fd, u32 __level, u32 __optname,
1307 char __user *optval, u32 __optlen)
1308{
1309 int fd = (int) __fd;
1310 int level = (int) __level;
1311 int optname = (int) __optname;
1312 int optlen = (int) __optlen;
1313 int tr_opt = optname;
1314 int ret;
1315
1316 if (level == SOL_IP) {
1317 /* Multicast socketopts (ttl, membership) */
1318 if (tr_opt >=2 && tr_opt <= 6)
1319 tr_opt += 30;
1320 }
1321 ret = sys_setsockopt(fd, level, tr_opt,
1322 optval, optlen);
1323 return ret;
1324}
1325
1326asmlinkage int sunos_getsockopt(u32 __fd, u32 __level, u32 __optname,
1327 char __user *optval, int __user *optlen)
1328{
1329 int fd = (int) __fd;
1330 int level = (int) __level;
1331 int optname = (int) __optname;
1332 int tr_opt = optname;
1333 int ret;
1334
1335 if (level == SOL_IP) {
1336 /* Multicast socketopts (ttl, membership) */
1337 if (tr_opt >=2 && tr_opt <= 6)
1338 tr_opt += 30;
1339 }
1340 ret = compat_sys_getsockopt(fd, level, tr_opt,
1341 optval, optlen);
1342 return ret;
1343}
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
new file mode 100644
index 000000000000..48170f77fff1
--- /dev/null
+++ b/arch/sparc64/kernel/systbls.S
@@ -0,0 +1,251 @@
1/* $Id: systbls.S,v 1.81 2002/02/08 03:57:14 davem Exp $
2 * systbls.S: System call entry point tables for OS compatibility.
3 * The native Linux system call table lives here also.
4 *
5 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 *
8 * Based upon preliminary work which is:
9 *
10 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
11 */
12
13#include <linux/config.h>
14
15 .text
16 .align 4
17
18#ifdef CONFIG_COMPAT
19 /* First, the 32-bit Linux native syscall table. */
20
21 .globl sys_call_table32
22sys_call_table32:
23/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
24/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
25/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys32_chown16, sys32_mknod
26/*15*/ .word sys_chmod, sys32_lchown16, sparc_brk, sys32_perfctr, sys32_lseek
27/*20*/ .word sys_getpid, sys_capget, sys_capset, sys32_setuid16, sys32_getuid16
28/*25*/ .word compat_sys_time, sys_ptrace, sys_alarm, sys32_sigaltstack, sys32_pause
29/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
30 .word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile
31/*40*/ .word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid
32 .word sys32_umount, sys32_setgid16, sys32_getgid16, sys32_signal, sys32_geteuid16
33/*50*/ .word sys32_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
34 .word sys32_reboot, sys32_mmap2, sys_symlink, sys32_readlink, sys32_execve
35/*60*/ .word sys32_umask, sys_chroot, compat_sys_newfstat, sys_fstat64, sys_getpagesize
36 .word sys32_msync, sys_vfork, sys32_pread64, sys32_pwrite64, sys_geteuid
37/*70*/ .word sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
38 .word sys_madvise, sys_vhangup, sys32_truncate64, sys_mincore, sys32_getgroups16
39/*80*/ .word sys32_setgroups16, sys_getpgrp, sys32_setgroups, sys32_setitimer, sys32_ftruncate64
40 .word sys32_swapon, sys32_getitimer, sys_setuid, sys32_sethostname, sys_setgid
41/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid
42 .word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
43/*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending
44 .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, sys32_rt_sigsuspend, sys_setresuid, sys_getresuid
45/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
46 .word sys32_getgroups, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
47/*120*/ .word compat_sys_readv, compat_sys_writev, sys32_settimeofday, sys32_fchown16, sys_fchmod
48 .word sys_nis_syscall, sys32_setreuid16, sys32_setregid16, sys_rename, sys_truncate
49/*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
50 .word sys_nis_syscall, sys32_mkdir, sys_rmdir, sys32_utimes, sys_stat64
51/*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
52 .word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write
53/*150*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
54 .word compat_sys_fcntl64, sys_ni_syscall, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
55/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
56 .word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
57/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
58 .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
59/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
60 .word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname
61/*190*/ .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl
62 .word sys32_epoll_wait, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
63/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
64 .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
65/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo
66 .word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
67/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid
68 .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16
69/*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64
70 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
71/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
72 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
73/*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
74 .word sys_ni_syscall, sys32_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
75/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
76 .word sys_timer_delete, sys32_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
77/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
78 .word sys_mq_timedsend, sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
79/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
80
81#endif /* CONFIG_COMPAT */
82
83 /* Now the 64-bit native Linux syscall table. */
84
85 .align 4
86 .globl sys_call_table64, sys_call_table
87sys_call_table64:
88sys_call_table:
89/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
90/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
91/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
92/*15*/ .word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek
93/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
94/*25*/ .word sys_nis_syscall, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
95/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
96 .word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64
97/*40*/ .word sys_newlstat, sys_dup, sys_pipe, sys_times, sys_nis_syscall
98 .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
99/*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
100 .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve
101/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_nis_syscall, sys_getpagesize
102 .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall
103/*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys64_munmap, sys_mprotect
104 .word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups
105/*80*/ .word sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall
106 .word sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
107/*90*/ .word sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall
108 .word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
109/*100*/ .word sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
110 .word sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
111/*110*/ .word sys_setresgid, sys_getresgid, sys_nis_syscall, sys_recvmsg, sys_sendmsg
112 .word sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
113/*120*/ .word sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod
114 .word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate
115/*130*/ .word sys_ftruncate, sys_flock, sys_nis_syscall, sys_sendto, sys_shutdown
116 .word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_nis_syscall
117/*140*/ .word sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
118 .word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
119/*150*/ .word sys_getsockname, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
120 .word sys_nis_syscall, sys_ni_syscall, sys_statfs, sys_fstatfs, sys_oldumount
121/*160*/ .word sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_utrap_install
122 .word sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
123/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
124 .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
125/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall
126 .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sparc64_newuname
127/*190*/ .word sys_init_module, sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
128 .word sys_epoll_wait, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask
129/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
130 .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
131/*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
132 .word sys_ipc, sys_nis_syscall, sys_clone, sys_nis_syscall, sys_adjtimex
133/*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
134 .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
135/*230*/ .word sys_select, sys_nis_syscall, sys_nis_syscall, sys_stime, sys_statfs64
136 .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
137/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
138 .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
139/*250*/ .word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
140 .word sys_ni_syscall, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
141/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
142 .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
143/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
144 .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
145/*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl
146
147#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
148 defined(CONFIG_SOLARIS_EMUL_MODULE)
149 /* Now the 32-bit SunOS syscall table. */
150
151 .align 4
152 .globl sunos_sys_table
153sunos_sys_table:
154/*0*/ .word sunos_indir, sys32_exit, sys_fork
155 .word sunos_read, sunos_write, sunos_open
156 .word sys_close, sunos_wait4, sys_creat
157 .word sys_link, sys_unlink, sunos_execv
158 .word sys_chdir, sunos_nosys, sys32_mknod
159 .word sys_chmod, sys32_lchown16, sunos_brk
160 .word sunos_nosys, sys32_lseek, sunos_getpid
161 .word sunos_nosys, sunos_nosys, sunos_nosys
162 .word sunos_getuid, sunos_nosys, sys_ptrace
163 .word sunos_nosys, sunos_nosys, sunos_nosys
164 .word sunos_nosys, sunos_nosys, sunos_nosys
165 .word sys_access, sunos_nosys, sunos_nosys
166 .word sys_sync, sys_kill, compat_sys_newstat
167 .word sunos_nosys, compat_sys_newlstat, sys_dup
168 .word sys_pipe, sunos_nosys, sunos_nosys
169 .word sunos_nosys, sunos_nosys, sunos_getgid
170 .word sunos_nosys, sunos_nosys
171/*50*/ .word sunos_nosys, sys_acct, sunos_nosys
172 .word sunos_mctl, sunos_ioctl, sys_reboot
173 .word sunos_nosys, sys_symlink, sys_readlink
174 .word sys32_execve, sys_umask, sys_chroot
175 .word compat_sys_newfstat, sunos_nosys, sys_getpagesize
176 .word sys_msync, sys_vfork, sunos_nosys
177 .word sunos_nosys, sunos_sbrk, sunos_sstk
178 .word sunos_mmap, sunos_vadvise, sys_munmap
179 .word sys_mprotect, sys_madvise, sys_vhangup
180 .word sunos_nosys, sys_mincore, sys32_getgroups16
181 .word sys32_setgroups16, sys_getpgrp, sunos_setpgrp
182 .word compat_sys_setitimer, sunos_nosys, sys_swapon
183 .word compat_sys_getitimer, sys_gethostname, sys_sethostname
184 .word sunos_getdtablesize, sys_dup2, sunos_nop
185 .word compat_sys_fcntl, sunos_select, sunos_nop
186 .word sys_fsync, sys32_setpriority, sys32_socket
187 .word sys32_connect, sunos_accept
188/*100*/ .word sys_getpriority, sunos_send, sunos_recv
189 .word sunos_nosys, sys32_bind, sunos_setsockopt
190 .word sys32_listen, sunos_nosys, sunos_sigaction
191 .word sunos_sigblock, sunos_sigsetmask, sys_sigpause
192 .word sys32_sigstack, sys32_recvmsg, sys32_sendmsg
193 .word sunos_nosys, sys32_gettimeofday, compat_sys_getrusage
194 .word sunos_getsockopt, sunos_nosys, sunos_readv
195 .word sunos_writev, sys32_settimeofday, sys32_fchown16
196 .word sys_fchmod, sys32_recvfrom, sys32_setreuid16
197 .word sys32_setregid16, sys_rename, sys_truncate
198 .word sys_ftruncate, sys_flock, sunos_nosys
199 .word sys32_sendto, sys32_shutdown, sys32_socketpair
200 .word sys_mkdir, sys_rmdir, sys32_utimes
201 .word sys32_sigreturn, sunos_nosys, sys32_getpeername
202 .word sunos_gethostid, sunos_nosys, compat_sys_getrlimit
203 .word compat_sys_setrlimit, sunos_killpg, sunos_nosys
204 .word sunos_nosys, sunos_nosys
205/*150*/ .word sys32_getsockname, sunos_nosys, sunos_nosys
206 .word sys_poll, sunos_nosys, sunos_nosys
207 .word sunos_getdirentries, compat_sys_statfs, compat_sys_fstatfs
208 .word sys_oldumount, sunos_nosys, sunos_nosys
209 .word sys_getdomainname, sys_setdomainname
210 .word sunos_nosys, sys_quotactl, sunos_nosys
211 .word sunos_nosys, sys_ustat, sunos_semsys
212 .word sunos_nosys, sunos_shmsys, sunos_audit
213 .word sunos_nosys, sunos_getdents, sys_setsid
214 .word sys_fchdir, sunos_nosys, sunos_nosys
215 .word sunos_nosys, sunos_nosys, sunos_nosys
216 .word sunos_nosys, compat_sys_sigpending, sunos_nosys
217 .word sys_setpgid, sunos_pathconf, sunos_fpathconf
218 .word sunos_sysconf, sunos_uname, sunos_nosys
219 .word sunos_nosys, sunos_nosys, sunos_nosys
220 .word sunos_nosys, sunos_nosys, sunos_nosys
221 .word sunos_nosys, sunos_nosys, sunos_nosys
222/*200*/ .word sunos_nosys, sunos_nosys, sunos_nosys
223 .word sunos_nosys, sunos_nosys, sunos_nosys
224 .word sunos_nosys, sunos_nosys, sunos_nosys
225 .word sunos_nosys, sunos_nosys, sunos_nosys
226 .word sunos_nosys, sunos_nosys, sunos_nosys
227 .word sunos_nosys, sunos_nosys, sunos_nosys
228 .word sunos_nosys, sunos_nosys, sunos_nosys
229 .word sunos_nosys, sunos_nosys, sunos_nosys
230 .word sunos_nosys, sunos_nosys, sunos_nosys
231 .word sunos_nosys, sunos_nosys, sunos_nosys
232 .word sunos_nosys, sunos_nosys, sunos_nosys
233 .word sunos_nosys, sunos_nosys, sunos_nosys
234 .word sunos_nosys, sunos_nosys, sunos_nosys
235 .word sunos_nosys, sunos_nosys, sunos_nosys
236 .word sunos_nosys, sunos_nosys, sunos_nosys
237 .word sunos_nosys, sunos_nosys, sunos_nosys
238 .word sunos_nosys, sunos_nosys
239/*250*/ .word sunos_nosys, sunos_nosys, sunos_nosys
240 .word sunos_nosys, sunos_nosys, sunos_nosys
241 .word sunos_nosys, sunos_nosys, sunos_nosys
242 .word sunos_nosys, sunos_nosys, sunos_nosys
243 .word sunos_nosys, sunos_nosys, sunos_nosys
244 .word sunos_nosys, sunos_nosys, sunos_nosys
245 .word sunos_nosys, sunos_nosys, sunos_nosys
246 .word sunos_nosys, sunos_nosys, sunos_nosys
247 .word sunos_nosys, sunos_nosys, sunos_nosys
248 .word sunos_nosys, sunos_nosys, sunos_nosys
249 .word sunos_nosys, sunos_nosys, sunos_nosys
250 .word sunos_nosys
251#endif
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
new file mode 100644
index 000000000000..6a717d4d2bc5
--- /dev/null
+++ b/arch/sparc64/kernel/time.c
@@ -0,0 +1,1195 @@
1/* $Id: time.c,v 1.42 2002/01/23 14:33:55 davem Exp $
2 * time.c: UltraSparc timer and TOD clock support.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 *
7 * Based largely on code which is:
8 *
9 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
10 */
11
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/param.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/interrupt.h>
21#include <linux/time.h>
22#include <linux/timex.h>
23#include <linux/init.h>
24#include <linux/ioport.h>
25#include <linux/mc146818rtc.h>
26#include <linux/delay.h>
27#include <linux/profile.h>
28#include <linux/bcd.h>
29#include <linux/jiffies.h>
30#include <linux/cpufreq.h>
31#include <linux/percpu.h>
32#include <linux/profile.h>
33
34#include <asm/oplib.h>
35#include <asm/mostek.h>
36#include <asm/timer.h>
37#include <asm/irq.h>
38#include <asm/io.h>
39#include <asm/sbus.h>
40#include <asm/fhc.h>
41#include <asm/pbm.h>
42#include <asm/ebus.h>
43#include <asm/isa.h>
44#include <asm/starfire.h>
45#include <asm/smp.h>
46#include <asm/sections.h>
47#include <asm/cpudata.h>
48
49DEFINE_SPINLOCK(mostek_lock);
50DEFINE_SPINLOCK(rtc_lock);
51unsigned long mstk48t02_regs = 0UL;
52#ifdef CONFIG_PCI
53unsigned long ds1287_regs = 0UL;
54#endif
55
56extern unsigned long wall_jiffies;
57
58u64 jiffies_64 = INITIAL_JIFFIES;
59
60EXPORT_SYMBOL(jiffies_64);
61
62static unsigned long mstk48t08_regs = 0UL;
63static unsigned long mstk48t59_regs = 0UL;
64
65static int set_rtc_mmss(unsigned long);
66
67static __init unsigned long dummy_get_tick(void)
68{
69 return 0;
70}
71
72static __initdata struct sparc64_tick_ops dummy_tick_ops = {
73 .get_tick = dummy_get_tick,
74};
75
76struct sparc64_tick_ops *tick_ops = &dummy_tick_ops;
77
78#define TICK_PRIV_BIT (1UL << 63)
79
80#ifdef CONFIG_SMP
81unsigned long profile_pc(struct pt_regs *regs)
82{
83 unsigned long pc = instruction_pointer(regs);
84
85 if (in_lock_functions(pc))
86 return regs->u_regs[UREG_RETPC];
87 return pc;
88}
89EXPORT_SYMBOL(profile_pc);
90#endif
91
92static void tick_disable_protection(void)
93{
94 /* Set things up so user can access tick register for profiling
95 * purposes. Also workaround BB_ERRATA_1 by doing a dummy
96 * read back of %tick after writing it.
97 */
98 __asm__ __volatile__(
99 " ba,pt %%xcc, 1f\n"
100 " nop\n"
101 " .align 64\n"
102 "1: rd %%tick, %%g2\n"
103 " add %%g2, 6, %%g2\n"
104 " andn %%g2, %0, %%g2\n"
105 " wrpr %%g2, 0, %%tick\n"
106 " rdpr %%tick, %%g0"
107 : /* no outputs */
108 : "r" (TICK_PRIV_BIT)
109 : "g2");
110}
111
112static void tick_init_tick(unsigned long offset)
113{
114 tick_disable_protection();
115
116 __asm__ __volatile__(
117 " rd %%tick, %%g1\n"
118 " andn %%g1, %1, %%g1\n"
119 " ba,pt %%xcc, 1f\n"
120 " add %%g1, %0, %%g1\n"
121 " .align 64\n"
122 "1: wr %%g1, 0x0, %%tick_cmpr\n"
123 " rd %%tick_cmpr, %%g0"
124 : /* no outputs */
125 : "r" (offset), "r" (TICK_PRIV_BIT)
126 : "g1");
127}
128
129static unsigned long tick_get_tick(void)
130{
131 unsigned long ret;
132
133 __asm__ __volatile__("rd %%tick, %0\n\t"
134 "mov %0, %0"
135 : "=r" (ret));
136
137 return ret & ~TICK_PRIV_BIT;
138}
139
140static unsigned long tick_get_compare(void)
141{
142 unsigned long ret;
143
144 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
145 "mov %0, %0"
146 : "=r" (ret));
147
148 return ret;
149}
150
151static unsigned long tick_add_compare(unsigned long adj)
152{
153 unsigned long new_compare;
154
155 /* Workaround for Spitfire Errata (#54 I think??), I discovered
156 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
157 * number 103640.
158 *
159 * On Blackbird writes to %tick_cmpr can fail, the
160 * workaround seems to be to execute the wr instruction
161 * at the start of an I-cache line, and perform a dummy
162 * read back from %tick_cmpr right after writing to it. -DaveM
163 */
164 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
165 "ba,pt %%xcc, 1f\n\t"
166 " add %0, %1, %0\n\t"
167 ".align 64\n"
168 "1:\n\t"
169 "wr %0, 0, %%tick_cmpr\n\t"
170 "rd %%tick_cmpr, %%g0"
171 : "=&r" (new_compare)
172 : "r" (adj));
173
174 return new_compare;
175}
176
177static unsigned long tick_add_tick(unsigned long adj, unsigned long offset)
178{
179 unsigned long new_tick, tmp;
180
181 /* Also need to handle Blackbird bug here too. */
182 __asm__ __volatile__("rd %%tick, %0\n\t"
183 "add %0, %2, %0\n\t"
184 "wrpr %0, 0, %%tick\n\t"
185 "andn %0, %4, %1\n\t"
186 "ba,pt %%xcc, 1f\n\t"
187 " add %1, %3, %1\n\t"
188 ".align 64\n"
189 "1:\n\t"
190 "wr %1, 0, %%tick_cmpr\n\t"
191 "rd %%tick_cmpr, %%g0"
192 : "=&r" (new_tick), "=&r" (tmp)
193 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
194
195 return new_tick;
196}
197
198static struct sparc64_tick_ops tick_operations = {
199 .init_tick = tick_init_tick,
200 .get_tick = tick_get_tick,
201 .get_compare = tick_get_compare,
202 .add_tick = tick_add_tick,
203 .add_compare = tick_add_compare,
204 .softint_mask = 1UL << 0,
205};
206
207static void stick_init_tick(unsigned long offset)
208{
209 tick_disable_protection();
210
211 /* Let the user get at STICK too. */
212 __asm__ __volatile__(
213 " rd %%asr24, %%g2\n"
214 " andn %%g2, %0, %%g2\n"
215 " wr %%g2, 0, %%asr24"
216 : /* no outputs */
217 : "r" (TICK_PRIV_BIT)
218 : "g1", "g2");
219
220 __asm__ __volatile__(
221 " rd %%asr24, %%g1\n"
222 " andn %%g1, %1, %%g1\n"
223 " add %%g1, %0, %%g1\n"
224 " wr %%g1, 0x0, %%asr25"
225 : /* no outputs */
226 : "r" (offset), "r" (TICK_PRIV_BIT)
227 : "g1");
228}
229
230static unsigned long stick_get_tick(void)
231{
232 unsigned long ret;
233
234 __asm__ __volatile__("rd %%asr24, %0"
235 : "=r" (ret));
236
237 return ret & ~TICK_PRIV_BIT;
238}
239
240static unsigned long stick_get_compare(void)
241{
242 unsigned long ret;
243
244 __asm__ __volatile__("rd %%asr25, %0"
245 : "=r" (ret));
246
247 return ret;
248}
249
250static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
251{
252 unsigned long new_tick, tmp;
253
254 __asm__ __volatile__("rd %%asr24, %0\n\t"
255 "add %0, %2, %0\n\t"
256 "wr %0, 0, %%asr24\n\t"
257 "andn %0, %4, %1\n\t"
258 "add %1, %3, %1\n\t"
259 "wr %1, 0, %%asr25"
260 : "=&r" (new_tick), "=&r" (tmp)
261 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
262
263 return new_tick;
264}
265
266static unsigned long stick_add_compare(unsigned long adj)
267{
268 unsigned long new_compare;
269
270 __asm__ __volatile__("rd %%asr25, %0\n\t"
271 "add %0, %1, %0\n\t"
272 "wr %0, 0, %%asr25"
273 : "=&r" (new_compare)
274 : "r" (adj));
275
276 return new_compare;
277}
278
279static struct sparc64_tick_ops stick_operations = {
280 .init_tick = stick_init_tick,
281 .get_tick = stick_get_tick,
282 .get_compare = stick_get_compare,
283 .add_tick = stick_add_tick,
284 .add_compare = stick_add_compare,
285 .softint_mask = 1UL << 16,
286};
287
288/* On Hummingbird the STICK/STICK_CMPR register is implemented
289 * in I/O space. There are two 64-bit registers each, the
290 * first holds the low 32-bits of the value and the second holds
291 * the high 32-bits.
292 *
293 * Since STICK is constantly updating, we have to access it carefully.
294 *
295 * The sequence we use to read is:
296 * 1) read low
297 * 2) read high
298 * 3) read low again, if it rolled over increment high by 1
299 *
300 * Writing STICK safely is also tricky:
301 * 1) write low to zero
302 * 2) write high
303 * 3) write low
304 */
305#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
306#define HBIRD_STICK_ADDR 0x1fe0000f070UL
307
308static unsigned long __hbird_read_stick(void)
309{
310 unsigned long ret, tmp1, tmp2, tmp3;
311 unsigned long addr = HBIRD_STICK_ADDR;
312
313 __asm__ __volatile__("ldxa [%1] %5, %2\n\t"
314 "add %1, 0x8, %1\n\t"
315 "ldxa [%1] %5, %3\n\t"
316 "sub %1, 0x8, %1\n\t"
317 "ldxa [%1] %5, %4\n\t"
318 "cmp %4, %2\n\t"
319 "blu,a,pn %%xcc, 1f\n\t"
320 " add %3, 1, %3\n"
321 "1:\n\t"
322 "sllx %3, 32, %3\n\t"
323 "or %3, %4, %0\n\t"
324 : "=&r" (ret), "=&r" (addr),
325 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
326 : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr));
327
328 return ret;
329}
330
331static unsigned long __hbird_read_compare(void)
332{
333 unsigned long low, high;
334 unsigned long addr = HBIRD_STICKCMP_ADDR;
335
336 __asm__ __volatile__("ldxa [%2] %3, %0\n\t"
337 "add %2, 0x8, %2\n\t"
338 "ldxa [%2] %3, %1"
339 : "=&r" (low), "=&r" (high), "=&r" (addr)
340 : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr));
341
342 return (high << 32UL) | low;
343}
344
345static void __hbird_write_stick(unsigned long val)
346{
347 unsigned long low = (val & 0xffffffffUL);
348 unsigned long high = (val >> 32UL);
349 unsigned long addr = HBIRD_STICK_ADDR;
350
351 __asm__ __volatile__("stxa %%g0, [%0] %4\n\t"
352 "add %0, 0x8, %0\n\t"
353 "stxa %3, [%0] %4\n\t"
354 "sub %0, 0x8, %0\n\t"
355 "stxa %2, [%0] %4"
356 : "=&r" (addr)
357 : "0" (addr), "r" (low), "r" (high),
358 "i" (ASI_PHYS_BYPASS_EC_E));
359}
360
361static void __hbird_write_compare(unsigned long val)
362{
363 unsigned long low = (val & 0xffffffffUL);
364 unsigned long high = (val >> 32UL);
365 unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL;
366
367 __asm__ __volatile__("stxa %3, [%0] %4\n\t"
368 "sub %0, 0x8, %0\n\t"
369 "stxa %2, [%0] %4"
370 : "=&r" (addr)
371 : "0" (addr), "r" (low), "r" (high),
372 "i" (ASI_PHYS_BYPASS_EC_E));
373}
374
375static void hbtick_init_tick(unsigned long offset)
376{
377 unsigned long val;
378
379 tick_disable_protection();
380
381 /* XXX This seems to be necessary to 'jumpstart' Hummingbird
382 * XXX into actually sending STICK interrupts. I think because
383 * XXX of how we store %tick_cmpr in head.S this somehow resets the
384 * XXX {TICK + STICK} interrupt mux. -DaveM
385 */
386 __hbird_write_stick(__hbird_read_stick());
387
388 val = __hbird_read_stick() & ~TICK_PRIV_BIT;
389 __hbird_write_compare(val + offset);
390}
391
392static unsigned long hbtick_get_tick(void)
393{
394 return __hbird_read_stick() & ~TICK_PRIV_BIT;
395}
396
397static unsigned long hbtick_get_compare(void)
398{
399 return __hbird_read_compare();
400}
401
402static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset)
403{
404 unsigned long val;
405
406 val = __hbird_read_stick() + adj;
407 __hbird_write_stick(val);
408
409 val &= ~TICK_PRIV_BIT;
410 __hbird_write_compare(val + offset);
411
412 return val;
413}
414
415static unsigned long hbtick_add_compare(unsigned long adj)
416{
417 unsigned long val = __hbird_read_compare() + adj;
418
419 val &= ~TICK_PRIV_BIT;
420 __hbird_write_compare(val);
421
422 return val;
423}
424
425static struct sparc64_tick_ops hbtick_operations = {
426 .init_tick = hbtick_init_tick,
427 .get_tick = hbtick_get_tick,
428 .get_compare = hbtick_get_compare,
429 .add_tick = hbtick_add_tick,
430 .add_compare = hbtick_add_compare,
431 .softint_mask = 1UL << 0,
432};
433
434/* timer_interrupt() needs to keep up the real-time clock,
435 * as well as call the "do_timer()" routine every clocktick
436 *
437 * NOTE: On SUN5 systems the ticker interrupt comes in using 2
438 * interrupts, one at level14 and one with softint bit 0.
439 */
440unsigned long timer_tick_offset;
441unsigned long timer_tick_compare;
442
443static unsigned long timer_ticks_per_nsec_quotient;
444
445#define TICK_SIZE (tick_nsec / 1000)
446
447static inline void timer_check_rtc(void)
448{
449 /* last time the cmos clock got updated */
450 static long last_rtc_update;
451
452 /* Determine when to update the Mostek clock. */
453 if ((time_status & STA_UNSYNC) == 0 &&
454 xtime.tv_sec > last_rtc_update + 660 &&
455 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
456 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
457 if (set_rtc_mmss(xtime.tv_sec) == 0)
458 last_rtc_update = xtime.tv_sec;
459 else
460 last_rtc_update = xtime.tv_sec - 600;
461 /* do it again in 60 s */
462 }
463}
464
465static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
466{
467 unsigned long ticks, pstate;
468
469 write_seqlock(&xtime_lock);
470
471 do {
472#ifndef CONFIG_SMP
473 profile_tick(CPU_PROFILING, regs);
474 update_process_times(user_mode(regs));
475#endif
476 do_timer(regs);
477
478 /* Guarantee that the following sequences execute
479 * uninterrupted.
480 */
481 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
482 "wrpr %0, %1, %%pstate"
483 : "=r" (pstate)
484 : "i" (PSTATE_IE));
485
486 timer_tick_compare = tick_ops->add_compare(timer_tick_offset);
487 ticks = tick_ops->get_tick();
488
489 /* Restore PSTATE_IE. */
490 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
491 : /* no outputs */
492 : "r" (pstate));
493 } while (time_after_eq(ticks, timer_tick_compare));
494
495 timer_check_rtc();
496
497 write_sequnlock(&xtime_lock);
498
499 return IRQ_HANDLED;
500}
501
502#ifdef CONFIG_SMP
503void timer_tick_interrupt(struct pt_regs *regs)
504{
505 write_seqlock(&xtime_lock);
506
507 do_timer(regs);
508
509 /*
510 * Only keep timer_tick_offset uptodate, but don't set TICK_CMPR.
511 */
512 timer_tick_compare = tick_ops->get_compare() + timer_tick_offset;
513
514 timer_check_rtc();
515
516 write_sequnlock(&xtime_lock);
517}
518#endif
519
520/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
521static void __init kick_start_clock(void)
522{
523 unsigned long regs = mstk48t02_regs;
524 u8 sec, tmp;
525 int i, count;
526
527 prom_printf("CLOCK: Clock was stopped. Kick start ");
528
529 spin_lock_irq(&mostek_lock);
530
531 /* Turn on the kick start bit to start the oscillator. */
532 tmp = mostek_read(regs + MOSTEK_CREG);
533 tmp |= MSTK_CREG_WRITE;
534 mostek_write(regs + MOSTEK_CREG, tmp);
535 tmp = mostek_read(regs + MOSTEK_SEC);
536 tmp &= ~MSTK_STOP;
537 mostek_write(regs + MOSTEK_SEC, tmp);
538 tmp = mostek_read(regs + MOSTEK_HOUR);
539 tmp |= MSTK_KICK_START;
540 mostek_write(regs + MOSTEK_HOUR, tmp);
541 tmp = mostek_read(regs + MOSTEK_CREG);
542 tmp &= ~MSTK_CREG_WRITE;
543 mostek_write(regs + MOSTEK_CREG, tmp);
544
545 spin_unlock_irq(&mostek_lock);
546
547 /* Delay to allow the clock oscillator to start. */
548 sec = MSTK_REG_SEC(regs);
549 for (i = 0; i < 3; i++) {
550 while (sec == MSTK_REG_SEC(regs))
551 for (count = 0; count < 100000; count++)
552 /* nothing */ ;
553 prom_printf(".");
554 sec = MSTK_REG_SEC(regs);
555 }
556 prom_printf("\n");
557
558 spin_lock_irq(&mostek_lock);
559
560 /* Turn off kick start and set a "valid" time and date. */
561 tmp = mostek_read(regs + MOSTEK_CREG);
562 tmp |= MSTK_CREG_WRITE;
563 mostek_write(regs + MOSTEK_CREG, tmp);
564 tmp = mostek_read(regs + MOSTEK_HOUR);
565 tmp &= ~MSTK_KICK_START;
566 mostek_write(regs + MOSTEK_HOUR, tmp);
567 MSTK_SET_REG_SEC(regs,0);
568 MSTK_SET_REG_MIN(regs,0);
569 MSTK_SET_REG_HOUR(regs,0);
570 MSTK_SET_REG_DOW(regs,5);
571 MSTK_SET_REG_DOM(regs,1);
572 MSTK_SET_REG_MONTH(regs,8);
573 MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
574 tmp = mostek_read(regs + MOSTEK_CREG);
575 tmp &= ~MSTK_CREG_WRITE;
576 mostek_write(regs + MOSTEK_CREG, tmp);
577
578 spin_unlock_irq(&mostek_lock);
579
580 /* Ensure the kick start bit is off. If it isn't, turn it off. */
581 while (mostek_read(regs + MOSTEK_HOUR) & MSTK_KICK_START) {
582 prom_printf("CLOCK: Kick start still on!\n");
583
584 spin_lock_irq(&mostek_lock);
585
586 tmp = mostek_read(regs + MOSTEK_CREG);
587 tmp |= MSTK_CREG_WRITE;
588 mostek_write(regs + MOSTEK_CREG, tmp);
589
590 tmp = mostek_read(regs + MOSTEK_HOUR);
591 tmp &= ~MSTK_KICK_START;
592 mostek_write(regs + MOSTEK_HOUR, tmp);
593
594 tmp = mostek_read(regs + MOSTEK_CREG);
595 tmp &= ~MSTK_CREG_WRITE;
596 mostek_write(regs + MOSTEK_CREG, tmp);
597
598 spin_unlock_irq(&mostek_lock);
599 }
600
601 prom_printf("CLOCK: Kick start procedure successful.\n");
602}
603
604/* Return nonzero if the clock chip battery is low. */
605static int __init has_low_battery(void)
606{
607 unsigned long regs = mstk48t02_regs;
608 u8 data1, data2;
609
610 spin_lock_irq(&mostek_lock);
611
612 data1 = mostek_read(regs + MOSTEK_EEPROM); /* Read some data. */
613 mostek_write(regs + MOSTEK_EEPROM, ~data1); /* Write back the complement. */
614 data2 = mostek_read(regs + MOSTEK_EEPROM); /* Read back the complement. */
615 mostek_write(regs + MOSTEK_EEPROM, data1); /* Restore original value. */
616
617 spin_unlock_irq(&mostek_lock);
618
619 return (data1 == data2); /* Was the write blocked? */
620}
621
622/* Probe for the real time clock chip. */
623static void __init set_system_time(void)
624{
625 unsigned int year, mon, day, hour, min, sec;
626 unsigned long mregs = mstk48t02_regs;
627#ifdef CONFIG_PCI
628 unsigned long dregs = ds1287_regs;
629#else
630 unsigned long dregs = 0UL;
631#endif
632 u8 tmp;
633
634 if (!mregs && !dregs) {
635 prom_printf("Something wrong, clock regs not mapped yet.\n");
636 prom_halt();
637 }
638
639 if (mregs) {
640 spin_lock_irq(&mostek_lock);
641
642 /* Traditional Mostek chip. */
643 tmp = mostek_read(mregs + MOSTEK_CREG);
644 tmp |= MSTK_CREG_READ;
645 mostek_write(mregs + MOSTEK_CREG, tmp);
646
647 sec = MSTK_REG_SEC(mregs);
648 min = MSTK_REG_MIN(mregs);
649 hour = MSTK_REG_HOUR(mregs);
650 day = MSTK_REG_DOM(mregs);
651 mon = MSTK_REG_MONTH(mregs);
652 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
653 } else {
654 int i;
655
656 /* Dallas 12887 RTC chip. */
657
658 /* Stolen from arch/i386/kernel/time.c, see there for
659 * credits and descriptive comments.
660 */
661 for (i = 0; i < 1000000; i++) {
662 if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
663 break;
664 udelay(10);
665 }
666 for (i = 0; i < 1000000; i++) {
667 if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
668 break;
669 udelay(10);
670 }
671 do {
672 sec = CMOS_READ(RTC_SECONDS);
673 min = CMOS_READ(RTC_MINUTES);
674 hour = CMOS_READ(RTC_HOURS);
675 day = CMOS_READ(RTC_DAY_OF_MONTH);
676 mon = CMOS_READ(RTC_MONTH);
677 year = CMOS_READ(RTC_YEAR);
678 } while (sec != CMOS_READ(RTC_SECONDS));
679 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
680 BCD_TO_BIN(sec);
681 BCD_TO_BIN(min);
682 BCD_TO_BIN(hour);
683 BCD_TO_BIN(day);
684 BCD_TO_BIN(mon);
685 BCD_TO_BIN(year);
686 }
687 if ((year += 1900) < 1970)
688 year += 100;
689 }
690
691 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
692 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
693 set_normalized_timespec(&wall_to_monotonic,
694 -xtime.tv_sec, -xtime.tv_nsec);
695
696 if (mregs) {
697 tmp = mostek_read(mregs + MOSTEK_CREG);
698 tmp &= ~MSTK_CREG_READ;
699 mostek_write(mregs + MOSTEK_CREG, tmp);
700
701 spin_unlock_irq(&mostek_lock);
702 }
703}
704
705void __init clock_probe(void)
706{
707 struct linux_prom_registers clk_reg[2];
708 char model[128];
709 int node, busnd = -1, err;
710 unsigned long flags;
711 struct linux_central *cbus;
712#ifdef CONFIG_PCI
713 struct linux_ebus *ebus = NULL;
714 struct sparc_isa_bridge *isa_br = NULL;
715#endif
716 static int invoked;
717
718 if (invoked)
719 return;
720 invoked = 1;
721
722
723 if (this_is_starfire) {
724 /* davem suggests we keep this within the 4M locked kernel image */
725 static char obp_gettod[256];
726 static u32 unix_tod;
727
728 sprintf(obp_gettod, "h# %08x unix-gettod",
729 (unsigned int) (long) &unix_tod);
730 prom_feval(obp_gettod);
731 xtime.tv_sec = unix_tod;
732 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
733 set_normalized_timespec(&wall_to_monotonic,
734 -xtime.tv_sec, -xtime.tv_nsec);
735 return;
736 }
737
738 local_irq_save(flags);
739
740 cbus = central_bus;
741 if (cbus != NULL)
742 busnd = central_bus->child->prom_node;
743
744 /* Check FHC Central then EBUSs then ISA bridges then SBUSs.
745 * That way we handle the presence of multiple properly.
746 *
747 * As a special case, machines with Central must provide the
748 * timer chip there.
749 */
750#ifdef CONFIG_PCI
751 if (ebus_chain != NULL) {
752 ebus = ebus_chain;
753 if (busnd == -1)
754 busnd = ebus->prom_node;
755 }
756 if (isa_chain != NULL) {
757 isa_br = isa_chain;
758 if (busnd == -1)
759 busnd = isa_br->prom_node;
760 }
761#endif
762 if (sbus_root != NULL && busnd == -1)
763 busnd = sbus_root->prom_node;
764
765 if (busnd == -1) {
766 prom_printf("clock_probe: problem, cannot find bus to search.\n");
767 prom_halt();
768 }
769
770 node = prom_getchild(busnd);
771
772 while (1) {
773 if (!node)
774 model[0] = 0;
775 else
776 prom_getstring(node, "model", model, sizeof(model));
777 if (strcmp(model, "mk48t02") &&
778 strcmp(model, "mk48t08") &&
779 strcmp(model, "mk48t59") &&
780 strcmp(model, "m5819") &&
781 strcmp(model, "m5819p") &&
782 strcmp(model, "m5823") &&
783 strcmp(model, "ds1287")) {
784 if (cbus != NULL) {
785 prom_printf("clock_probe: Central bus lacks timer chip.\n");
786 prom_halt();
787 }
788
789 if (node != 0)
790 node = prom_getsibling(node);
791#ifdef CONFIG_PCI
792 while ((node == 0) && ebus != NULL) {
793 ebus = ebus->next;
794 if (ebus != NULL) {
795 busnd = ebus->prom_node;
796 node = prom_getchild(busnd);
797 }
798 }
799 while ((node == 0) && isa_br != NULL) {
800 isa_br = isa_br->next;
801 if (isa_br != NULL) {
802 busnd = isa_br->prom_node;
803 node = prom_getchild(busnd);
804 }
805 }
806#endif
807 if (node == 0) {
808 prom_printf("clock_probe: Cannot find timer chip\n");
809 prom_halt();
810 }
811 continue;
812 }
813
814 err = prom_getproperty(node, "reg", (char *)clk_reg,
815 sizeof(clk_reg));
816 if(err == -1) {
817 prom_printf("clock_probe: Cannot get Mostek reg property\n");
818 prom_halt();
819 }
820
821 if (cbus != NULL) {
822 apply_fhc_ranges(central_bus->child, clk_reg, 1);
823 apply_central_ranges(central_bus, clk_reg, 1);
824 }
825#ifdef CONFIG_PCI
826 else if (ebus != NULL) {
827 struct linux_ebus_device *edev;
828
829 for_each_ebusdev(edev, ebus)
830 if (edev->prom_node == node)
831 break;
832 if (edev == NULL) {
833 if (isa_chain != NULL)
834 goto try_isa_clock;
835 prom_printf("%s: Mostek not probed by EBUS\n",
836 __FUNCTION__);
837 prom_halt();
838 }
839
840 if (!strcmp(model, "ds1287") ||
841 !strcmp(model, "m5819") ||
842 !strcmp(model, "m5819p") ||
843 !strcmp(model, "m5823")) {
844 ds1287_regs = edev->resource[0].start;
845 } else {
846 mstk48t59_regs = edev->resource[0].start;
847 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
848 }
849 break;
850 }
851 else if (isa_br != NULL) {
852 struct sparc_isa_device *isadev;
853
854try_isa_clock:
855 for_each_isadev(isadev, isa_br)
856 if (isadev->prom_node == node)
857 break;
858 if (isadev == NULL) {
859 prom_printf("%s: Mostek not probed by ISA\n");
860 prom_halt();
861 }
862 if (!strcmp(model, "ds1287") ||
863 !strcmp(model, "m5819") ||
864 !strcmp(model, "m5819p") ||
865 !strcmp(model, "m5823")) {
866 ds1287_regs = isadev->resource.start;
867 } else {
868 mstk48t59_regs = isadev->resource.start;
869 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
870 }
871 break;
872 }
873#endif
874 else {
875 if (sbus_root->num_sbus_ranges) {
876 int nranges = sbus_root->num_sbus_ranges;
877 int rngc;
878
879 for (rngc = 0; rngc < nranges; rngc++)
880 if (clk_reg[0].which_io ==
881 sbus_root->sbus_ranges[rngc].ot_child_space)
882 break;
883 if (rngc == nranges) {
884 prom_printf("clock_probe: Cannot find ranges for "
885 "clock regs.\n");
886 prom_halt();
887 }
888 clk_reg[0].which_io =
889 sbus_root->sbus_ranges[rngc].ot_parent_space;
890 clk_reg[0].phys_addr +=
891 sbus_root->sbus_ranges[rngc].ot_parent_base;
892 }
893 }
894
895 if(model[5] == '0' && model[6] == '2') {
896 mstk48t02_regs = (((u64)clk_reg[0].phys_addr) |
897 (((u64)clk_reg[0].which_io)<<32UL));
898 } else if(model[5] == '0' && model[6] == '8') {
899 mstk48t08_regs = (((u64)clk_reg[0].phys_addr) |
900 (((u64)clk_reg[0].which_io)<<32UL));
901 mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02;
902 } else {
903 mstk48t59_regs = (((u64)clk_reg[0].phys_addr) |
904 (((u64)clk_reg[0].which_io)<<32UL));
905 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
906 }
907 break;
908 }
909
910 if (mstk48t02_regs != 0UL) {
911 /* Report a low battery voltage condition. */
912 if (has_low_battery())
913 prom_printf("NVRAM: Low battery voltage!\n");
914
915 /* Kick start the clock if it is completely stopped. */
916 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
917 kick_start_clock();
918 }
919
920 set_system_time();
921
922 local_irq_restore(flags);
923}
924
925/* This is gets the master TICK_INT timer going. */
926static unsigned long sparc64_init_timers(void)
927{
928 unsigned long clock;
929 int node;
930#ifdef CONFIG_SMP
931 extern void smp_tick_init(void);
932#endif
933
934 if (tlb_type == spitfire) {
935 unsigned long ver, manuf, impl;
936
937 __asm__ __volatile__ ("rdpr %%ver, %0"
938 : "=&r" (ver));
939 manuf = ((ver >> 48) & 0xffff);
940 impl = ((ver >> 32) & 0xffff);
941 if (manuf == 0x17 && impl == 0x13) {
942 /* Hummingbird, aka Ultra-IIe */
943 tick_ops = &hbtick_operations;
944 node = prom_root_node;
945 clock = prom_getint(node, "stick-frequency");
946 } else {
947 tick_ops = &tick_operations;
948 cpu_find_by_instance(0, &node, NULL);
949 clock = prom_getint(node, "clock-frequency");
950 }
951 } else {
952 tick_ops = &stick_operations;
953 node = prom_root_node;
954 clock = prom_getint(node, "stick-frequency");
955 }
956 timer_tick_offset = clock / HZ;
957
958#ifdef CONFIG_SMP
959 smp_tick_init();
960#endif
961
962 return clock;
963}
964
965static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_regs *))
966{
967 unsigned long pstate;
968 int err;
969
970 /* Register IRQ handler. */
971 err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC,
972 "timer", NULL);
973
974 if (err) {
975 prom_printf("Serious problem, cannot register TICK_INT\n");
976 prom_halt();
977 }
978
979 /* Guarantee that the following sequences execute
980 * uninterrupted.
981 */
982 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
983 "wrpr %0, %1, %%pstate"
984 : "=r" (pstate)
985 : "i" (PSTATE_IE));
986
987 tick_ops->init_tick(timer_tick_offset);
988
989 /* Restore PSTATE_IE. */
990 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
991 : /* no outputs */
992 : "r" (pstate));
993
994 local_irq_enable();
995}
996
997struct freq_table {
998 unsigned long udelay_val_ref;
999 unsigned long clock_tick_ref;
1000 unsigned int ref_freq;
1001};
1002static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 };
1003
1004unsigned long sparc64_get_clock_tick(unsigned int cpu)
1005{
1006 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
1007
1008 if (ft->clock_tick_ref)
1009 return ft->clock_tick_ref;
1010 return cpu_data(cpu).clock_tick;
1011}
1012
1013#ifdef CONFIG_CPU_FREQ
1014
1015static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
1016 void *data)
1017{
1018 struct cpufreq_freqs *freq = data;
1019 unsigned int cpu = freq->cpu;
1020 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
1021
1022 if (!ft->ref_freq) {
1023 ft->ref_freq = freq->old;
1024 ft->udelay_val_ref = cpu_data(cpu).udelay_val;
1025 ft->clock_tick_ref = cpu_data(cpu).clock_tick;
1026 }
1027 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
1028 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
1029 (val == CPUFREQ_RESUMECHANGE)) {
1030 cpu_data(cpu).udelay_val =
1031 cpufreq_scale(ft->udelay_val_ref,
1032 ft->ref_freq,
1033 freq->new);
1034 cpu_data(cpu).clock_tick =
1035 cpufreq_scale(ft->clock_tick_ref,
1036 ft->ref_freq,
1037 freq->new);
1038 }
1039
1040 return 0;
1041}
1042
1043static struct notifier_block sparc64_cpufreq_notifier_block = {
1044 .notifier_call = sparc64_cpufreq_notifier
1045};
1046
1047#endif /* CONFIG_CPU_FREQ */
1048
1049static struct time_interpolator sparc64_cpu_interpolator = {
1050 .source = TIME_SOURCE_CPU,
1051 .shift = 16,
1052 .mask = 0xffffffffffffffffLL
1053};
1054
1055/* The quotient formula is taken from the IA64 port. */
1056#define SPARC64_NSEC_PER_CYC_SHIFT 30UL
1057void __init time_init(void)
1058{
1059 unsigned long clock = sparc64_init_timers();
1060
1061 sparc64_cpu_interpolator.frequency = clock;
1062 register_time_interpolator(&sparc64_cpu_interpolator);
1063
1064 /* Now that the interpolator is registered, it is
1065 * safe to start the timer ticking.
1066 */
1067 sparc64_start_timers(timer_interrupt);
1068
1069 timer_ticks_per_nsec_quotient =
1070 (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) +
1071 (clock / 2)) / clock);
1072
1073#ifdef CONFIG_CPU_FREQ
1074 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
1075 CPUFREQ_TRANSITION_NOTIFIER);
1076#endif
1077}
1078
1079unsigned long long sched_clock(void)
1080{
1081 unsigned long ticks = tick_ops->get_tick();
1082
1083 return (ticks * timer_ticks_per_nsec_quotient)
1084 >> SPARC64_NSEC_PER_CYC_SHIFT;
1085}
1086
1087static int set_rtc_mmss(unsigned long nowtime)
1088{
1089 int real_seconds, real_minutes, chip_minutes;
1090 unsigned long mregs = mstk48t02_regs;
1091#ifdef CONFIG_PCI
1092 unsigned long dregs = ds1287_regs;
1093#else
1094 unsigned long dregs = 0UL;
1095#endif
1096 unsigned long flags;
1097 u8 tmp;
1098
1099 /*
1100 * Not having a register set can lead to trouble.
1101 * Also starfire doesn't have a tod clock.
1102 */
1103 if (!mregs && !dregs)
1104 return -1;
1105
1106 if (mregs) {
1107 spin_lock_irqsave(&mostek_lock, flags);
1108
1109 /* Read the current RTC minutes. */
1110 tmp = mostek_read(mregs + MOSTEK_CREG);
1111 tmp |= MSTK_CREG_READ;
1112 mostek_write(mregs + MOSTEK_CREG, tmp);
1113
1114 chip_minutes = MSTK_REG_MIN(mregs);
1115
1116 tmp = mostek_read(mregs + MOSTEK_CREG);
1117 tmp &= ~MSTK_CREG_READ;
1118 mostek_write(mregs + MOSTEK_CREG, tmp);
1119
1120 /*
1121 * since we're only adjusting minutes and seconds,
1122 * don't interfere with hour overflow. This avoids
1123 * messing with unknown time zones but requires your
1124 * RTC not to be off by more than 15 minutes
1125 */
1126 real_seconds = nowtime % 60;
1127 real_minutes = nowtime / 60;
1128 if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
1129 real_minutes += 30; /* correct for half hour time zone */
1130 real_minutes %= 60;
1131
1132 if (abs(real_minutes - chip_minutes) < 30) {
1133 tmp = mostek_read(mregs + MOSTEK_CREG);
1134 tmp |= MSTK_CREG_WRITE;
1135 mostek_write(mregs + MOSTEK_CREG, tmp);
1136
1137 MSTK_SET_REG_SEC(mregs,real_seconds);
1138 MSTK_SET_REG_MIN(mregs,real_minutes);
1139
1140 tmp = mostek_read(mregs + MOSTEK_CREG);
1141 tmp &= ~MSTK_CREG_WRITE;
1142 mostek_write(mregs + MOSTEK_CREG, tmp);
1143
1144 spin_unlock_irqrestore(&mostek_lock, flags);
1145
1146 return 0;
1147 } else {
1148 spin_unlock_irqrestore(&mostek_lock, flags);
1149
1150 return -1;
1151 }
1152 } else {
1153 int retval = 0;
1154 unsigned char save_control, save_freq_select;
1155
1156 /* Stolen from arch/i386/kernel/time.c, see there for
1157 * credits and descriptive comments.
1158 */
1159 spin_lock_irqsave(&rtc_lock, flags);
1160 save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
1161 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
1162
1163 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
1164 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
1165
1166 chip_minutes = CMOS_READ(RTC_MINUTES);
1167 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
1168 BCD_TO_BIN(chip_minutes);
1169 real_seconds = nowtime % 60;
1170 real_minutes = nowtime / 60;
1171 if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
1172 real_minutes += 30;
1173 real_minutes %= 60;
1174
1175 if (abs(real_minutes - chip_minutes) < 30) {
1176 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
1177 BIN_TO_BCD(real_seconds);
1178 BIN_TO_BCD(real_minutes);
1179 }
1180 CMOS_WRITE(real_seconds,RTC_SECONDS);
1181 CMOS_WRITE(real_minutes,RTC_MINUTES);
1182 } else {
1183 printk(KERN_WARNING
1184 "set_rtc_mmss: can't update from %d to %d\n",
1185 chip_minutes, real_minutes);
1186 retval = -1;
1187 }
1188
1189 CMOS_WRITE(save_control, RTC_CONTROL);
1190 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1191 spin_unlock_irqrestore(&rtc_lock, flags);
1192
1193 return retval;
1194 }
1195}
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
new file mode 100644
index 000000000000..2c8f9344b4ee
--- /dev/null
+++ b/arch/sparc64/kernel/trampoline.S
@@ -0,0 +1,368 @@
1/* $Id: trampoline.S,v 1.26 2002/02/09 19:49:30 davem Exp $
2 * trampoline.S: Jump start slave processors on sparc64.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/head.h>
8#include <asm/asi.h>
9#include <asm/lsu.h>
10#include <asm/dcr.h>
11#include <asm/dcu.h>
12#include <asm/pstate.h>
13#include <asm/page.h>
14#include <asm/pgtable.h>
15#include <asm/spitfire.h>
16#include <asm/processor.h>
17#include <asm/thread_info.h>
18#include <asm/mmu.h>
19
20 .data
21 .align 8
22call_method:
23 .asciz "call-method"
24 .align 8
25itlb_load:
26 .asciz "SUNW,itlb-load"
27 .align 8
28dtlb_load:
29 .asciz "SUNW,dtlb-load"
30
31 .text
32 .align 8
33 .globl sparc64_cpu_startup, sparc64_cpu_startup_end
34sparc64_cpu_startup:
35 flushw
36
37 BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup)
38 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup)
39
40 ba,pt %xcc, spitfire_startup
41 nop
42
43cheetah_plus_startup:
44 /* Preserve OBP chosen DCU and DCR register settings. */
45 ba,pt %xcc, cheetah_generic_startup
46 nop
47
48cheetah_startup:
49 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
50 wr %g1, %asr18
51
52 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
53 or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
54 sllx %g5, 32, %g5
55 or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
56 stxa %g5, [%g0] ASI_DCU_CONTROL_REG
57 membar #Sync
58
59cheetah_generic_startup:
60 mov TSB_EXTENSION_P, %g3
61 stxa %g0, [%g3] ASI_DMMU
62 stxa %g0, [%g3] ASI_IMMU
63 membar #Sync
64
65 mov TSB_EXTENSION_S, %g3
66 stxa %g0, [%g3] ASI_DMMU
67 membar #Sync
68
69 mov TSB_EXTENSION_N, %g3
70 stxa %g0, [%g3] ASI_DMMU
71 stxa %g0, [%g3] ASI_IMMU
72 membar #Sync
73
74 /* Disable STICK_INT interrupts. */
75 sethi %hi(0x80000000), %g5
76 sllx %g5, 32, %g5
77 wr %g5, %asr25
78
79 ba,pt %xcc, startup_continue
80 nop
81
82spitfire_startup:
83 mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
84 stxa %g1, [%g0] ASI_LSU_CONTROL
85 membar #Sync
86
87startup_continue:
88 wrpr %g0, 15, %pil
89
90 sethi %hi(0x80000000), %g2
91 sllx %g2, 32, %g2
92 wr %g2, 0, %tick_cmpr
93
94 /* Call OBP by hand to lock KERNBASE into i/d tlbs.
95 * We lock 2 consequetive entries if we are 'bigkernel'.
96 */
97 mov %o0, %l0
98
99 sethi %hi(prom_entry_lock), %g2
1001: ldstub [%g2 + %lo(prom_entry_lock)], %g1
101 brnz,pn %g1, 1b
102 membar #StoreLoad | #StoreStore
103
104 sethi %hi(p1275buf), %g2
105 or %g2, %lo(p1275buf), %g2
106 ldx [%g2 + 0x10], %l2
107 mov %sp, %l1
108 add %l2, -(192 + 128), %sp
109 flushw
110
111 sethi %hi(call_method), %g2
112 or %g2, %lo(call_method), %g2
113 stx %g2, [%sp + 2047 + 128 + 0x00]
114 mov 5, %g2
115 stx %g2, [%sp + 2047 + 128 + 0x08]
116 mov 1, %g2
117 stx %g2, [%sp + 2047 + 128 + 0x10]
118 sethi %hi(itlb_load), %g2
119 or %g2, %lo(itlb_load), %g2
120 stx %g2, [%sp + 2047 + 128 + 0x18]
121 sethi %hi(mmu_ihandle_cache), %g2
122 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
123 stx %g2, [%sp + 2047 + 128 + 0x20]
124 sethi %hi(KERNBASE), %g2
125 stx %g2, [%sp + 2047 + 128 + 0x28]
126 sethi %hi(kern_locked_tte_data), %g2
127 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
128 stx %g2, [%sp + 2047 + 128 + 0x30]
129
130 mov 15, %g2
131 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
132
133 mov 63, %g2
1341:
135 stx %g2, [%sp + 2047 + 128 + 0x38]
136 sethi %hi(p1275buf), %g2
137 or %g2, %lo(p1275buf), %g2
138 ldx [%g2 + 0x08], %o1
139 call %o1
140 add %sp, (2047 + 128), %o0
141
142 sethi %hi(bigkernel), %g2
143 lduw [%g2 + %lo(bigkernel)], %g2
144 cmp %g2, 0
145 be,pt %icc, do_dtlb
146 nop
147
148 sethi %hi(call_method), %g2
149 or %g2, %lo(call_method), %g2
150 stx %g2, [%sp + 2047 + 128 + 0x00]
151 mov 5, %g2
152 stx %g2, [%sp + 2047 + 128 + 0x08]
153 mov 1, %g2
154 stx %g2, [%sp + 2047 + 128 + 0x10]
155 sethi %hi(itlb_load), %g2
156 or %g2, %lo(itlb_load), %g2
157 stx %g2, [%sp + 2047 + 128 + 0x18]
158 sethi %hi(mmu_ihandle_cache), %g2
159 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
160 stx %g2, [%sp + 2047 + 128 + 0x20]
161 sethi %hi(KERNBASE + 0x400000), %g2
162 stx %g2, [%sp + 2047 + 128 + 0x28]
163 sethi %hi(kern_locked_tte_data), %g2
164 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
165 sethi %hi(0x400000), %g1
166 add %g2, %g1, %g2
167 stx %g2, [%sp + 2047 + 128 + 0x30]
168
169 mov 14, %g2
170 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
171
172 mov 62, %g2
1731:
174 stx %g2, [%sp + 2047 + 128 + 0x38]
175 sethi %hi(p1275buf), %g2
176 or %g2, %lo(p1275buf), %g2
177 ldx [%g2 + 0x08], %o1
178 call %o1
179 add %sp, (2047 + 128), %o0
180
181do_dtlb:
182 sethi %hi(call_method), %g2
183 or %g2, %lo(call_method), %g2
184 stx %g2, [%sp + 2047 + 128 + 0x00]
185 mov 5, %g2
186 stx %g2, [%sp + 2047 + 128 + 0x08]
187 mov 1, %g2
188 stx %g2, [%sp + 2047 + 128 + 0x10]
189 sethi %hi(dtlb_load), %g2
190 or %g2, %lo(dtlb_load), %g2
191 stx %g2, [%sp + 2047 + 128 + 0x18]
192 sethi %hi(mmu_ihandle_cache), %g2
193 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
194 stx %g2, [%sp + 2047 + 128 + 0x20]
195 sethi %hi(KERNBASE), %g2
196 stx %g2, [%sp + 2047 + 128 + 0x28]
197 sethi %hi(kern_locked_tte_data), %g2
198 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
199 stx %g2, [%sp + 2047 + 128 + 0x30]
200
201 mov 15, %g2
202 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
203
204 mov 63, %g2
2051:
206
207 stx %g2, [%sp + 2047 + 128 + 0x38]
208 sethi %hi(p1275buf), %g2
209 or %g2, %lo(p1275buf), %g2
210 ldx [%g2 + 0x08], %o1
211 call %o1
212 add %sp, (2047 + 128), %o0
213
214 sethi %hi(bigkernel), %g2
215 lduw [%g2 + %lo(bigkernel)], %g2
216 cmp %g2, 0
217 be,pt %icc, do_unlock
218 nop
219
220 sethi %hi(call_method), %g2
221 or %g2, %lo(call_method), %g2
222 stx %g2, [%sp + 2047 + 128 + 0x00]
223 mov 5, %g2
224 stx %g2, [%sp + 2047 + 128 + 0x08]
225 mov 1, %g2
226 stx %g2, [%sp + 2047 + 128 + 0x10]
227 sethi %hi(dtlb_load), %g2
228 or %g2, %lo(dtlb_load), %g2
229 stx %g2, [%sp + 2047 + 128 + 0x18]
230 sethi %hi(mmu_ihandle_cache), %g2
231 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
232 stx %g2, [%sp + 2047 + 128 + 0x20]
233 sethi %hi(KERNBASE + 0x400000), %g2
234 stx %g2, [%sp + 2047 + 128 + 0x28]
235 sethi %hi(kern_locked_tte_data), %g2
236 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
237 sethi %hi(0x400000), %g1
238 add %g2, %g1, %g2
239 stx %g2, [%sp + 2047 + 128 + 0x30]
240
241 mov 14, %g2
242 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
243
244 mov 62, %g2
2451:
246
247 stx %g2, [%sp + 2047 + 128 + 0x38]
248 sethi %hi(p1275buf), %g2
249 or %g2, %lo(p1275buf), %g2
250 ldx [%g2 + 0x08], %o1
251 call %o1
252 add %sp, (2047 + 128), %o0
253
254do_unlock:
255 sethi %hi(prom_entry_lock), %g2
256 stb %g0, [%g2 + %lo(prom_entry_lock)]
257 membar #StoreStore | #StoreLoad
258
259 mov %l1, %sp
260 flushw
261
262 mov %l0, %o0
263
264 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
265 wr %g0, 0, %fprs
266
267 /* XXX Buggy PROM... */
268 srl %o0, 0, %o0
269 ldx [%o0], %g6
270
271 wr %g0, ASI_P, %asi
272
273 mov PRIMARY_CONTEXT, %g7
274 stxa %g0, [%g7] ASI_DMMU
275 membar #Sync
276 mov SECONDARY_CONTEXT, %g7
277 stxa %g0, [%g7] ASI_DMMU
278 membar #Sync
279
280 mov 1, %g5
281 sllx %g5, THREAD_SHIFT, %g5
282 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
283 add %g6, %g5, %sp
284 mov 0, %fp
285
286 wrpr %g0, 0, %wstate
287 wrpr %g0, 0, %tl
288
289 /* Setup the trap globals, then we can resurface. */
290 rdpr %pstate, %o1
291 mov %g6, %o2
292 wrpr %o1, PSTATE_AG, %pstate
293 sethi %hi(sparc64_ttable_tl0), %g5
294 wrpr %g5, %tba
295 mov %o2, %g6
296
297 wrpr %o1, PSTATE_MG, %pstate
298#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
299#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
300
301 mov TSB_REG, %g1
302 stxa %g0, [%g1] ASI_DMMU
303 membar #Sync
304 mov TLB_SFSR, %g1
305 sethi %uhi(KERN_HIGHBITS), %g2
306 or %g2, %ulo(KERN_HIGHBITS), %g2
307 sllx %g2, 32, %g2
308 or %g2, KERN_LOWBITS, %g2
309
310 BRANCH_IF_ANY_CHEETAH(g3,g7,9f)
311
312 ba,pt %xcc, 1f
313 nop
314
3159:
316 sethi %uhi(VPTE_BASE_CHEETAH), %g3
317 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
318 ba,pt %xcc, 2f
319 sllx %g3, 32, %g3
3201:
321 sethi %uhi(VPTE_BASE_SPITFIRE), %g3
322 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
323 sllx %g3, 32, %g3
324
3252:
326 clr %g7
327#undef KERN_HIGHBITS
328#undef KERN_LOWBITS
329
330 wrpr %o1, 0x0, %pstate
331 ldx [%g6 + TI_TASK], %g4
332
333 wrpr %g0, 0, %wstate
334
335 call init_irqwork_curcpu
336 nop
337
338 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
339 ba,pt %xcc, 2f
340 nop
341
3421: /* Start using proper page size encodings in ctx register. */
343 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
344 mov PRIMARY_CONTEXT, %g1
345 sllx %g3, 32, %g3
346 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
347 or %g3, %g2, %g3
348 stxa %g3, [%g1] ASI_DMMU
349 membar #Sync
350
3512:
352 rdpr %pstate, %o1
353 or %o1, PSTATE_IE, %o1
354 wrpr %o1, 0, %pstate
355
356 call prom_set_trap_table
357 sethi %hi(sparc64_ttable_tl0), %o0
358
359 call smp_callin
360 nop
361 call cpu_idle
362 mov 0, %o0
363 call cpu_panic
364 nop
3651: b,a,pt %xcc, 1b
366
367 .align 8
368sparc64_cpu_startup_end:
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
new file mode 100644
index 000000000000..56b203a2af69
--- /dev/null
+++ b/arch/sparc64/kernel/traps.c
@@ -0,0 +1,2118 @@
1/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I like traps on v9, :))))
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/sched.h> /* for jiffies */
15#include <linux/kernel.h>
16#include <linux/kallsyms.h>
17#include <linux/signal.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/mm.h>
21#include <linux/init.h>
22
23#include <asm/delay.h>
24#include <asm/system.h>
25#include <asm/ptrace.h>
26#include <asm/oplib.h>
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/unistd.h>
30#include <asm/uaccess.h>
31#include <asm/fpumacro.h>
32#include <asm/lsu.h>
33#include <asm/dcu.h>
34#include <asm/estate.h>
35#include <asm/chafsr.h>
36#include <asm/psrcompat.h>
37#include <asm/processor.h>
38#include <asm/timer.h>
39#include <asm/kdebug.h>
40#ifdef CONFIG_KMOD
41#include <linux/kmod.h>
42#endif
43
44struct notifier_block *sparc64die_chain;
45static DEFINE_SPINLOCK(die_notifier_lock);
46
47int register_die_notifier(struct notifier_block *nb)
48{
49 int err = 0;
50 unsigned long flags;
51 spin_lock_irqsave(&die_notifier_lock, flags);
52 err = notifier_chain_register(&sparc64die_chain, nb);
53 spin_unlock_irqrestore(&die_notifier_lock, flags);
54 return err;
55}
56
57/* When an irrecoverable trap occurs at tl > 0, the trap entry
58 * code logs the trap state registers at every level in the trap
59 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
60 * is as follows:
61 */
62struct tl1_traplog {
63 struct {
64 unsigned long tstate;
65 unsigned long tpc;
66 unsigned long tnpc;
67 unsigned long tt;
68 } trapstack[4];
69 unsigned long tl;
70};
71
72static void dump_tl1_traplog(struct tl1_traplog *p)
73{
74 int i;
75
76 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
77 p->tl);
78 for (i = 0; i < 4; i++) {
79 printk(KERN_CRIT
80 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
81 "TNPC[%016lx] TT[%lx]\n",
82 i + 1,
83 p->trapstack[i].tstate, p->trapstack[i].tpc,
84 p->trapstack[i].tnpc, p->trapstack[i].tt);
85 }
86}
87
88void do_call_debug(struct pt_regs *regs)
89{
90 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
91}
92
93void bad_trap(struct pt_regs *regs, long lvl)
94{
95 char buffer[32];
96 siginfo_t info;
97
98 if (notify_die(DIE_TRAP, "bad trap", regs,
99 0, lvl, SIGTRAP) == NOTIFY_STOP)
100 return;
101
102 if (lvl < 0x100) {
103 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
104 die_if_kernel(buffer, regs);
105 }
106
107 lvl -= 0x100;
108 if (regs->tstate & TSTATE_PRIV) {
109 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
110 die_if_kernel(buffer, regs);
111 }
112 if (test_thread_flag(TIF_32BIT)) {
113 regs->tpc &= 0xffffffff;
114 regs->tnpc &= 0xffffffff;
115 }
116 info.si_signo = SIGILL;
117 info.si_errno = 0;
118 info.si_code = ILL_ILLTRP;
119 info.si_addr = (void __user *)regs->tpc;
120 info.si_trapno = lvl;
121 force_sig_info(SIGILL, &info, current);
122}
123
124void bad_trap_tl1(struct pt_regs *regs, long lvl)
125{
126 char buffer[32];
127
128 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
129 0, lvl, SIGTRAP) == NOTIFY_STOP)
130 return;
131
132 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
133
134 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
135 die_if_kernel (buffer, regs);
136}
137
138#ifdef CONFIG_DEBUG_BUGVERBOSE
139void do_BUG(const char *file, int line)
140{
141 bust_spinlocks(1);
142 printk("kernel BUG at %s:%d!\n", file, line);
143}
144#endif
145
146void instruction_access_exception(struct pt_regs *regs,
147 unsigned long sfsr, unsigned long sfar)
148{
149 siginfo_t info;
150
151 if (notify_die(DIE_TRAP, "instruction access exception", regs,
152 0, 0x8, SIGTRAP) == NOTIFY_STOP)
153 return;
154
155 if (regs->tstate & TSTATE_PRIV) {
156 printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
157 sfsr, sfar);
158 die_if_kernel("Iax", regs);
159 }
160 if (test_thread_flag(TIF_32BIT)) {
161 regs->tpc &= 0xffffffff;
162 regs->tnpc &= 0xffffffff;
163 }
164 info.si_signo = SIGSEGV;
165 info.si_errno = 0;
166 info.si_code = SEGV_MAPERR;
167 info.si_addr = (void __user *)regs->tpc;
168 info.si_trapno = 0;
169 force_sig_info(SIGSEGV, &info, current);
170}
171
172void instruction_access_exception_tl1(struct pt_regs *regs,
173 unsigned long sfsr, unsigned long sfar)
174{
175 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
176 0, 0x8, SIGTRAP) == NOTIFY_STOP)
177 return;
178
179 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
180 instruction_access_exception(regs, sfsr, sfar);
181}
182
183void data_access_exception(struct pt_regs *regs,
184 unsigned long sfsr, unsigned long sfar)
185{
186 siginfo_t info;
187
188 if (notify_die(DIE_TRAP, "data access exception", regs,
189 0, 0x30, SIGTRAP) == NOTIFY_STOP)
190 return;
191
192 if (regs->tstate & TSTATE_PRIV) {
193 /* Test if this comes from uaccess places. */
194 unsigned long fixup;
195 unsigned long g2 = regs->u_regs[UREG_G2];
196
197 if ((fixup = search_extables_range(regs->tpc, &g2))) {
198 /* Ouch, somebody is trying ugly VM hole tricks on us... */
199#ifdef DEBUG_EXCEPTIONS
200 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
201 printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
202 "g2<%016lx>\n", regs->tpc, fixup, g2);
203#endif
204 regs->tpc = fixup;
205 regs->tnpc = regs->tpc + 4;
206 regs->u_regs[UREG_G2] = g2;
207 return;
208 }
209 /* Shit... */
210 printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
211 sfsr, sfar);
212 die_if_kernel("Dax", regs);
213 }
214
215 info.si_signo = SIGSEGV;
216 info.si_errno = 0;
217 info.si_code = SEGV_MAPERR;
218 info.si_addr = (void __user *)sfar;
219 info.si_trapno = 0;
220 force_sig_info(SIGSEGV, &info, current);
221}
222
223#ifdef CONFIG_PCI
224/* This is really pathetic... */
225extern volatile int pci_poke_in_progress;
226extern volatile int pci_poke_cpu;
227extern volatile int pci_poke_faulted;
228#endif
229
230/* When access exceptions happen, we must do this. */
231static void spitfire_clean_and_reenable_l1_caches(void)
232{
233 unsigned long va;
234
235 if (tlb_type != spitfire)
236 BUG();
237
238 /* Clean 'em. */
239 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
240 spitfire_put_icache_tag(va, 0x0);
241 spitfire_put_dcache_tag(va, 0x0);
242 }
243
244 /* Re-enable in LSU. */
245 __asm__ __volatile__("flush %%g6\n\t"
246 "membar #Sync\n\t"
247 "stxa %0, [%%g0] %1\n\t"
248 "membar #Sync"
249 : /* no outputs */
250 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
251 LSU_CONTROL_IM | LSU_CONTROL_DM),
252 "i" (ASI_LSU_CONTROL)
253 : "memory");
254}
255
256void do_iae(struct pt_regs *regs)
257{
258 siginfo_t info;
259
260 spitfire_clean_and_reenable_l1_caches();
261
262 if (notify_die(DIE_TRAP, "instruction access exception", regs,
263 0, 0x8, SIGTRAP) == NOTIFY_STOP)
264 return;
265
266 info.si_signo = SIGBUS;
267 info.si_errno = 0;
268 info.si_code = BUS_OBJERR;
269 info.si_addr = (void *)0;
270 info.si_trapno = 0;
271 force_sig_info(SIGBUS, &info, current);
272}
273
274void do_dae(struct pt_regs *regs)
275{
276 siginfo_t info;
277
278#ifdef CONFIG_PCI
279 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
280 spitfire_clean_and_reenable_l1_caches();
281
282 pci_poke_faulted = 1;
283
284 /* Why the fuck did they have to change this? */
285 if (tlb_type == cheetah || tlb_type == cheetah_plus)
286 regs->tpc += 4;
287
288 regs->tnpc = regs->tpc + 4;
289 return;
290 }
291#endif
292 spitfire_clean_and_reenable_l1_caches();
293
294 if (notify_die(DIE_TRAP, "data access exception", regs,
295 0, 0x30, SIGTRAP) == NOTIFY_STOP)
296 return;
297
298 info.si_signo = SIGBUS;
299 info.si_errno = 0;
300 info.si_code = BUS_OBJERR;
301 info.si_addr = (void *)0;
302 info.si_trapno = 0;
303 force_sig_info(SIGBUS, &info, current);
304}
305
306static char ecc_syndrome_table[] = {
307 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
308 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
309 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
310 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
311 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
312 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
313 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
314 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
315 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
316 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
317 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
318 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
319 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
320 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
321 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
322 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
323 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
324 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
325 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
326 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
327 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
328 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
329 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
330 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
331 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
332 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
333 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
334 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
335 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
336 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
337 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
338 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
339};
340
341/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
342 * in the following format. The AFAR is left as is, with
343 * reserved bits cleared, and is a raw 40-bit physical
344 * address.
345 */
346#define CE_STATUS_UDBH_UE (1UL << (43 + 9))
347#define CE_STATUS_UDBH_CE (1UL << (43 + 8))
348#define CE_STATUS_UDBH_ESYNDR (0xffUL << 43)
349#define CE_STATUS_UDBH_SHIFT 43
350#define CE_STATUS_UDBL_UE (1UL << (33 + 9))
351#define CE_STATUS_UDBL_CE (1UL << (33 + 8))
352#define CE_STATUS_UDBL_ESYNDR (0xffUL << 33)
353#define CE_STATUS_UDBL_SHIFT 33
354#define CE_STATUS_AFSR_MASK (0x1ffffffffUL)
355#define CE_STATUS_AFSR_ME (1UL << 32)
356#define CE_STATUS_AFSR_PRIV (1UL << 31)
357#define CE_STATUS_AFSR_ISAP (1UL << 30)
358#define CE_STATUS_AFSR_ETP (1UL << 29)
359#define CE_STATUS_AFSR_IVUE (1UL << 28)
360#define CE_STATUS_AFSR_TO (1UL << 27)
361#define CE_STATUS_AFSR_BERR (1UL << 26)
362#define CE_STATUS_AFSR_LDP (1UL << 25)
363#define CE_STATUS_AFSR_CP (1UL << 24)
364#define CE_STATUS_AFSR_WP (1UL << 23)
365#define CE_STATUS_AFSR_EDP (1UL << 22)
366#define CE_STATUS_AFSR_UE (1UL << 21)
367#define CE_STATUS_AFSR_CE (1UL << 20)
368#define CE_STATUS_AFSR_ETS (0xfUL << 16)
369#define CE_STATUS_AFSR_ETS_SHIFT 16
370#define CE_STATUS_AFSR_PSYND (0xffffUL << 0)
371#define CE_STATUS_AFSR_PSYND_SHIFT 0
372
373/* Layout of Ecache TAG Parity Syndrome of AFSR */
374#define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */
375#define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */
376#define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */
377#define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */
378
379static char *syndrome_unknown = "<Unknown>";
380
381asmlinkage void cee_log(unsigned long ce_status,
382 unsigned long afar,
383 struct pt_regs *regs)
384{
385 char memmod_str[64];
386 char *p;
387 unsigned short scode, udb_reg;
388
389 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
390 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
391 smp_processor_id(),
392 (ce_status & CE_STATUS_AFSR_MASK),
393 afar,
394 ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
395 ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
396
397 udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
398 if (udb_reg & (1 << 8)) {
399 scode = ecc_syndrome_table[udb_reg & 0xff];
400 if (prom_getunumber(scode, afar,
401 memmod_str, sizeof(memmod_str)) == -1)
402 p = syndrome_unknown;
403 else
404 p = memmod_str;
405 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
406 "Memory Module \"%s\"\n",
407 smp_processor_id(), scode, p);
408 }
409
410 udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
411 if (udb_reg & (1 << 8)) {
412 scode = ecc_syndrome_table[udb_reg & 0xff];
413 if (prom_getunumber(scode, afar,
414 memmod_str, sizeof(memmod_str)) == -1)
415 p = syndrome_unknown;
416 else
417 p = memmod_str;
418 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
419 "Memory Module \"%s\"\n",
420 smp_processor_id(), scode, p);
421 }
422}
423
424/* Cheetah error trap handling. */
425static unsigned long ecache_flush_physbase;
426static unsigned long ecache_flush_linesize;
427static unsigned long ecache_flush_size;
428
429/* WARNING: The error trap handlers in assembly know the precise
430 * layout of the following structure.
431 *
432 * C-level handlers below use this information to log the error
433 * and then determine how to recover (if possible).
434 */
435struct cheetah_err_info {
436/*0x00*/u64 afsr;
437/*0x08*/u64 afar;
438
439 /* D-cache state */
440/*0x10*/u64 dcache_data[4]; /* The actual data */
441/*0x30*/u64 dcache_index; /* D-cache index */
442/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
443/*0x40*/u64 dcache_utag; /* D-cache microtag */
444/*0x48*/u64 dcache_stag; /* D-cache snooptag */
445
446 /* I-cache state */
447/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
448/*0x90*/u64 icache_index; /* I-cache index */
449/*0x98*/u64 icache_tag; /* I-cache phys tag */
450/*0xa0*/u64 icache_utag; /* I-cache microtag */
451/*0xa8*/u64 icache_stag; /* I-cache snooptag */
452/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
453/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
454
455 /* E-cache state */
456/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
457/*0xe0*/u64 ecache_index; /* E-cache index */
458/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
459
460/*0xf0*/u64 __pad[32 - 30];
461};
462#define CHAFSR_INVALID ((u64)-1L)
463
464/* This table is ordered in priority of errors and matches the
465 * AFAR overwrite policy as well.
466 */
467
468struct afsr_error_table {
469 unsigned long mask;
470 const char *name;
471};
472
473static const char CHAFSR_PERR_msg[] =
474 "System interface protocol error";
475static const char CHAFSR_IERR_msg[] =
476 "Internal processor error";
477static const char CHAFSR_ISAP_msg[] =
478 "System request parity error on incoming addresss";
479static const char CHAFSR_UCU_msg[] =
480 "Uncorrectable E-cache ECC error for ifetch/data";
481static const char CHAFSR_UCC_msg[] =
482 "SW Correctable E-cache ECC error for ifetch/data";
483static const char CHAFSR_UE_msg[] =
484 "Uncorrectable system bus data ECC error for read";
485static const char CHAFSR_EDU_msg[] =
486 "Uncorrectable E-cache ECC error for stmerge/blkld";
487static const char CHAFSR_EMU_msg[] =
488 "Uncorrectable system bus MTAG error";
489static const char CHAFSR_WDU_msg[] =
490 "Uncorrectable E-cache ECC error for writeback";
491static const char CHAFSR_CPU_msg[] =
492 "Uncorrectable ECC error for copyout";
493static const char CHAFSR_CE_msg[] =
494 "HW corrected system bus data ECC error for read";
495static const char CHAFSR_EDC_msg[] =
496 "HW corrected E-cache ECC error for stmerge/blkld";
497static const char CHAFSR_EMC_msg[] =
498 "HW corrected system bus MTAG ECC error";
499static const char CHAFSR_WDC_msg[] =
500 "HW corrected E-cache ECC error for writeback";
501static const char CHAFSR_CPC_msg[] =
502 "HW corrected ECC error for copyout";
503static const char CHAFSR_TO_msg[] =
504 "Unmapped error from system bus";
505static const char CHAFSR_BERR_msg[] =
506 "Bus error response from system bus";
507static const char CHAFSR_IVC_msg[] =
508 "HW corrected system bus data ECC error for ivec read";
509static const char CHAFSR_IVU_msg[] =
510 "Uncorrectable system bus data ECC error for ivec read";
511static struct afsr_error_table __cheetah_error_table[] = {
512 { CHAFSR_PERR, CHAFSR_PERR_msg },
513 { CHAFSR_IERR, CHAFSR_IERR_msg },
514 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
515 { CHAFSR_UCU, CHAFSR_UCU_msg },
516 { CHAFSR_UCC, CHAFSR_UCC_msg },
517 { CHAFSR_UE, CHAFSR_UE_msg },
518 { CHAFSR_EDU, CHAFSR_EDU_msg },
519 { CHAFSR_EMU, CHAFSR_EMU_msg },
520 { CHAFSR_WDU, CHAFSR_WDU_msg },
521 { CHAFSR_CPU, CHAFSR_CPU_msg },
522 { CHAFSR_CE, CHAFSR_CE_msg },
523 { CHAFSR_EDC, CHAFSR_EDC_msg },
524 { CHAFSR_EMC, CHAFSR_EMC_msg },
525 { CHAFSR_WDC, CHAFSR_WDC_msg },
526 { CHAFSR_CPC, CHAFSR_CPC_msg },
527 { CHAFSR_TO, CHAFSR_TO_msg },
528 { CHAFSR_BERR, CHAFSR_BERR_msg },
529 /* These two do not update the AFAR. */
530 { CHAFSR_IVC, CHAFSR_IVC_msg },
531 { CHAFSR_IVU, CHAFSR_IVU_msg },
532 { 0, NULL },
533};
534static const char CHPAFSR_DTO_msg[] =
535 "System bus unmapped error for prefetch/storequeue-read";
536static const char CHPAFSR_DBERR_msg[] =
537 "System bus error for prefetch/storequeue-read";
538static const char CHPAFSR_THCE_msg[] =
539 "Hardware corrected E-cache Tag ECC error";
540static const char CHPAFSR_TSCE_msg[] =
541 "SW handled correctable E-cache Tag ECC error";
542static const char CHPAFSR_TUE_msg[] =
543 "Uncorrectable E-cache Tag ECC error";
544static const char CHPAFSR_DUE_msg[] =
545 "System bus uncorrectable data ECC error due to prefetch/store-fill";
546static struct afsr_error_table __cheetah_plus_error_table[] = {
547 { CHAFSR_PERR, CHAFSR_PERR_msg },
548 { CHAFSR_IERR, CHAFSR_IERR_msg },
549 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
550 { CHAFSR_UCU, CHAFSR_UCU_msg },
551 { CHAFSR_UCC, CHAFSR_UCC_msg },
552 { CHAFSR_UE, CHAFSR_UE_msg },
553 { CHAFSR_EDU, CHAFSR_EDU_msg },
554 { CHAFSR_EMU, CHAFSR_EMU_msg },
555 { CHAFSR_WDU, CHAFSR_WDU_msg },
556 { CHAFSR_CPU, CHAFSR_CPU_msg },
557 { CHAFSR_CE, CHAFSR_CE_msg },
558 { CHAFSR_EDC, CHAFSR_EDC_msg },
559 { CHAFSR_EMC, CHAFSR_EMC_msg },
560 { CHAFSR_WDC, CHAFSR_WDC_msg },
561 { CHAFSR_CPC, CHAFSR_CPC_msg },
562 { CHAFSR_TO, CHAFSR_TO_msg },
563 { CHAFSR_BERR, CHAFSR_BERR_msg },
564 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
565 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
566 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
567 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
568 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
569 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
570 /* These two do not update the AFAR. */
571 { CHAFSR_IVC, CHAFSR_IVC_msg },
572 { CHAFSR_IVU, CHAFSR_IVU_msg },
573 { 0, NULL },
574};
575static const char JPAFSR_JETO_msg[] =
576 "System interface protocol error, hw timeout caused";
577static const char JPAFSR_SCE_msg[] =
578 "Parity error on system snoop results";
579static const char JPAFSR_JEIC_msg[] =
580 "System interface protocol error, illegal command detected";
581static const char JPAFSR_JEIT_msg[] =
582 "System interface protocol error, illegal ADTYPE detected";
583static const char JPAFSR_OM_msg[] =
584 "Out of range memory error has occurred";
585static const char JPAFSR_ETP_msg[] =
586 "Parity error on L2 cache tag SRAM";
587static const char JPAFSR_UMS_msg[] =
588 "Error due to unsupported store";
589static const char JPAFSR_RUE_msg[] =
590 "Uncorrectable ECC error from remote cache/memory";
591static const char JPAFSR_RCE_msg[] =
592 "Correctable ECC error from remote cache/memory";
593static const char JPAFSR_BP_msg[] =
594 "JBUS parity error on returned read data";
595static const char JPAFSR_WBP_msg[] =
596 "JBUS parity error on data for writeback or block store";
597static const char JPAFSR_FRC_msg[] =
598 "Foreign read to DRAM incurring correctable ECC error";
599static const char JPAFSR_FRU_msg[] =
600 "Foreign read to DRAM incurring uncorrectable ECC error";
601static struct afsr_error_table __jalapeno_error_table[] = {
602 { JPAFSR_JETO, JPAFSR_JETO_msg },
603 { JPAFSR_SCE, JPAFSR_SCE_msg },
604 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
605 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
606 { CHAFSR_PERR, CHAFSR_PERR_msg },
607 { CHAFSR_IERR, CHAFSR_IERR_msg },
608 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
609 { CHAFSR_UCU, CHAFSR_UCU_msg },
610 { CHAFSR_UCC, CHAFSR_UCC_msg },
611 { CHAFSR_UE, CHAFSR_UE_msg },
612 { CHAFSR_EDU, CHAFSR_EDU_msg },
613 { JPAFSR_OM, JPAFSR_OM_msg },
614 { CHAFSR_WDU, CHAFSR_WDU_msg },
615 { CHAFSR_CPU, CHAFSR_CPU_msg },
616 { CHAFSR_CE, CHAFSR_CE_msg },
617 { CHAFSR_EDC, CHAFSR_EDC_msg },
618 { JPAFSR_ETP, JPAFSR_ETP_msg },
619 { CHAFSR_WDC, CHAFSR_WDC_msg },
620 { CHAFSR_CPC, CHAFSR_CPC_msg },
621 { CHAFSR_TO, CHAFSR_TO_msg },
622 { CHAFSR_BERR, CHAFSR_BERR_msg },
623 { JPAFSR_UMS, JPAFSR_UMS_msg },
624 { JPAFSR_RUE, JPAFSR_RUE_msg },
625 { JPAFSR_RCE, JPAFSR_RCE_msg },
626 { JPAFSR_BP, JPAFSR_BP_msg },
627 { JPAFSR_WBP, JPAFSR_WBP_msg },
628 { JPAFSR_FRC, JPAFSR_FRC_msg },
629 { JPAFSR_FRU, JPAFSR_FRU_msg },
630 /* These two do not update the AFAR. */
631 { CHAFSR_IVU, CHAFSR_IVU_msg },
632 { 0, NULL },
633};
634static struct afsr_error_table *cheetah_error_table;
635static unsigned long cheetah_afsr_errors;
636
637/* This is allocated at boot time based upon the largest hardware
638 * cpu ID in the system. We allocate two entries per cpu, one for
639 * TL==0 logging and one for TL >= 1 logging.
640 */
641struct cheetah_err_info *cheetah_error_log;
642
643static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
644{
645 struct cheetah_err_info *p;
646 int cpu = smp_processor_id();
647
648 if (!cheetah_error_log)
649 return NULL;
650
651 p = cheetah_error_log + (cpu * 2);
652 if ((afsr & CHAFSR_TL1) != 0UL)
653 p++;
654
655 return p;
656}
657
658extern unsigned int tl0_icpe[], tl1_icpe[];
659extern unsigned int tl0_dcpe[], tl1_dcpe[];
660extern unsigned int tl0_fecc[], tl1_fecc[];
661extern unsigned int tl0_cee[], tl1_cee[];
662extern unsigned int tl0_iae[], tl1_iae[];
663extern unsigned int tl0_dae[], tl1_dae[];
664extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
665extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
666extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
667extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
668extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
669
670void __init cheetah_ecache_flush_init(void)
671{
672 unsigned long largest_size, smallest_linesize, order, ver;
673 int node, i, instance;
674
675 /* Scan all cpu device tree nodes, note two values:
676 * 1) largest E-cache size
677 * 2) smallest E-cache line size
678 */
679 largest_size = 0UL;
680 smallest_linesize = ~0UL;
681
682 instance = 0;
683 while (!cpu_find_by_instance(instance, &node, NULL)) {
684 unsigned long val;
685
686 val = prom_getintdefault(node, "ecache-size",
687 (2 * 1024 * 1024));
688 if (val > largest_size)
689 largest_size = val;
690 val = prom_getintdefault(node, "ecache-line-size", 64);
691 if (val < smallest_linesize)
692 smallest_linesize = val;
693 instance++;
694 }
695
696 if (largest_size == 0UL || smallest_linesize == ~0UL) {
697 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
698 "parameters.\n");
699 prom_halt();
700 }
701
702 ecache_flush_size = (2 * largest_size);
703 ecache_flush_linesize = smallest_linesize;
704
705 /* Discover a physically contiguous chunk of physical
706 * memory in 'sp_banks' of size ecache_flush_size calculated
707 * above. Store the physical base of this area at
708 * ecache_flush_physbase.
709 */
710 for (node = 0; ; node++) {
711 if (sp_banks[node].num_bytes == 0)
712 break;
713 if (sp_banks[node].num_bytes >= ecache_flush_size) {
714 ecache_flush_physbase = sp_banks[node].base_addr;
715 break;
716 }
717 }
718
719 /* Note: Zero would be a valid value of ecache_flush_physbase so
720 * don't use that as the success test. :-)
721 */
722 if (sp_banks[node].num_bytes == 0) {
723 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
724 "contiguous physical memory.\n", ecache_flush_size);
725 prom_halt();
726 }
727
728 /* Now allocate error trap reporting scoreboard. */
729 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
730 for (order = 0; order < MAX_ORDER; order++) {
731 if ((PAGE_SIZE << order) >= node)
732 break;
733 }
734 cheetah_error_log = (struct cheetah_err_info *)
735 __get_free_pages(GFP_KERNEL, order);
736 if (!cheetah_error_log) {
737 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
738 "error logging scoreboard (%d bytes).\n", node);
739 prom_halt();
740 }
741 memset(cheetah_error_log, 0, PAGE_SIZE << order);
742
743 /* Mark all AFSRs as invalid so that the trap handler will
744 * log new new information there.
745 */
746 for (i = 0; i < 2 * NR_CPUS; i++)
747 cheetah_error_log[i].afsr = CHAFSR_INVALID;
748
749 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
750 if ((ver >> 32) == 0x003e0016) {
751 cheetah_error_table = &__jalapeno_error_table[0];
752 cheetah_afsr_errors = JPAFSR_ERRORS;
753 } else if ((ver >> 32) == 0x003e0015) {
754 cheetah_error_table = &__cheetah_plus_error_table[0];
755 cheetah_afsr_errors = CHPAFSR_ERRORS;
756 } else {
757 cheetah_error_table = &__cheetah_error_table[0];
758 cheetah_afsr_errors = CHAFSR_ERRORS;
759 }
760
761 /* Now patch trap tables. */
762 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
763 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
764 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
765 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
766 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
767 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
768 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
769 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
770 if (tlb_type == cheetah_plus) {
771 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
772 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
773 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
774 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
775 }
776 flushi(PAGE_OFFSET);
777}
778
779static void cheetah_flush_ecache(void)
780{
781 unsigned long flush_base = ecache_flush_physbase;
782 unsigned long flush_linesize = ecache_flush_linesize;
783 unsigned long flush_size = ecache_flush_size;
784
785 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
786 " bne,pt %%xcc, 1b\n\t"
787 " ldxa [%2 + %0] %3, %%g0\n\t"
788 : "=&r" (flush_size)
789 : "0" (flush_size), "r" (flush_base),
790 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
791}
792
793static void cheetah_flush_ecache_line(unsigned long physaddr)
794{
795 unsigned long alias;
796
797 physaddr &= ~(8UL - 1UL);
798 physaddr = (ecache_flush_physbase +
799 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
800 alias = physaddr + (ecache_flush_size >> 1UL);
801 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
802 "ldxa [%1] %2, %%g0\n\t"
803 "membar #Sync"
804 : /* no outputs */
805 : "r" (physaddr), "r" (alias),
806 "i" (ASI_PHYS_USE_EC));
807}
808
809/* Unfortunately, the diagnostic access to the I-cache tags we need to
810 * use to clear the thing interferes with I-cache coherency transactions.
811 *
812 * So we must only flush the I-cache when it is disabled.
813 */
814static void __cheetah_flush_icache(void)
815{
816 unsigned long i;
817
818 /* Clear the valid bits in all the tags. */
819 for (i = 0; i < (1 << 15); i += (1 << 5)) {
820 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
821 "membar #Sync"
822 : /* no outputs */
823 : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
824 }
825}
826
827static void cheetah_flush_icache(void)
828{
829 unsigned long dcu_save;
830
831 /* Save current DCU, disable I-cache. */
832 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
833 "or %0, %2, %%g1\n\t"
834 "stxa %%g1, [%%g0] %1\n\t"
835 "membar #Sync"
836 : "=r" (dcu_save)
837 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
838 : "g1");
839
840 __cheetah_flush_icache();
841
842 /* Restore DCU register */
843 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
844 "membar #Sync"
845 : /* no outputs */
846 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
847}
848
849static void cheetah_flush_dcache(void)
850{
851 unsigned long i;
852
853 for (i = 0; i < (1 << 16); i += (1 << 5)) {
854 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
855 "membar #Sync"
856 : /* no outputs */
857 : "r" (i), "i" (ASI_DCACHE_TAG));
858 }
859}
860
861/* In order to make the even parity correct we must do two things.
862 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
863 * Next, we clear out all 32-bytes of data for that line. Data of
864 * all-zero + tag parity value of zero == correct parity.
865 */
866static void cheetah_plus_zap_dcache_parity(void)
867{
868 unsigned long i;
869
870 for (i = 0; i < (1 << 16); i += (1 << 5)) {
871 unsigned long tag = (i >> 14);
872 unsigned long j;
873
874 __asm__ __volatile__("membar #Sync\n\t"
875 "stxa %0, [%1] %2\n\t"
876 "membar #Sync"
877 : /* no outputs */
878 : "r" (tag), "r" (i),
879 "i" (ASI_DCACHE_UTAG));
880 for (j = i; j < i + (1 << 5); j += (1 << 3))
881 __asm__ __volatile__("membar #Sync\n\t"
882 "stxa %%g0, [%0] %1\n\t"
883 "membar #Sync"
884 : /* no outputs */
885 : "r" (j), "i" (ASI_DCACHE_DATA));
886 }
887}
888
889/* Conversion tables used to frob Cheetah AFSR syndrome values into
890 * something palatable to the memory controller driver get_unumber
891 * routine.
892 */
893#define MT0 137
894#define MT1 138
895#define MT2 139
896#define NONE 254
897#define MTC0 140
898#define MTC1 141
899#define MTC2 142
900#define MTC3 143
901#define C0 128
902#define C1 129
903#define C2 130
904#define C3 131
905#define C4 132
906#define C5 133
907#define C6 134
908#define C7 135
909#define C8 136
910#define M2 144
911#define M3 145
912#define M4 146
913#define M 147
914static unsigned char cheetah_ecc_syntab[] = {
915/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
916/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
917/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
918/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
919/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
920/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
921/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
922/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
923/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
924/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
925/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
926/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
927/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
928/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
929/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
930/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
931/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
932/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
933/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
934/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
935/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
936/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
937/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
938/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
939/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
940/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
941/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
942/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
943/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
944/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
945/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
946/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
947};
948static unsigned char cheetah_mtag_syntab[] = {
949 NONE, MTC0,
950 MTC1, NONE,
951 MTC2, NONE,
952 NONE, MT0,
953 MTC3, NONE,
954 NONE, MT1,
955 NONE, MT2,
956 NONE, NONE
957};
958
959/* Return the highest priority error conditon mentioned. */
960static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
961{
962 unsigned long tmp = 0;
963 int i;
964
965 for (i = 0; cheetah_error_table[i].mask; i++) {
966 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
967 return tmp;
968 }
969 return tmp;
970}
971
972static const char *cheetah_get_string(unsigned long bit)
973{
974 int i;
975
976 for (i = 0; cheetah_error_table[i].mask; i++) {
977 if ((bit & cheetah_error_table[i].mask) != 0UL)
978 return cheetah_error_table[i].name;
979 }
980 return "???";
981}
982
983extern int chmc_getunumber(int, unsigned long, char *, int);
984
985static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
986 unsigned long afsr, unsigned long afar, int recoverable)
987{
988 unsigned long hipri;
989 char unum[256];
990
991 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
992 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
993 afsr, afar,
994 (afsr & CHAFSR_TL1) ? 1 : 0);
995 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
996 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
997 regs->tpc, regs->tnpc, regs->tstate);
998 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
999 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1000 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1001 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1002 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1003 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1004 hipri = cheetah_get_hipri(afsr);
1005 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1006 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1007 hipri, cheetah_get_string(hipri));
1008
1009 /* Try to get unumber if relevant. */
1010#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1011 CHAFSR_CPC | CHAFSR_CPU | \
1012 CHAFSR_UE | CHAFSR_CE | \
1013 CHAFSR_EDC | CHAFSR_EDU | \
1014 CHAFSR_UCC | CHAFSR_UCU | \
1015 CHAFSR_WDU | CHAFSR_WDC)
1016#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1017 if (afsr & ESYND_ERRORS) {
1018 int syndrome;
1019 int ret;
1020
1021 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1022 syndrome = cheetah_ecc_syntab[syndrome];
1023 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1024 if (ret != -1)
1025 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1026 (recoverable ? KERN_WARNING : KERN_CRIT),
1027 smp_processor_id(), unum);
1028 } else if (afsr & MSYND_ERRORS) {
1029 int syndrome;
1030 int ret;
1031
1032 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1033 syndrome = cheetah_mtag_syntab[syndrome];
1034 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1035 if (ret != -1)
1036 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1037 (recoverable ? KERN_WARNING : KERN_CRIT),
1038 smp_processor_id(), unum);
1039 }
1040
1041 /* Now dump the cache snapshots. */
1042 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1043 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1044 (int) info->dcache_index,
1045 info->dcache_tag,
1046 info->dcache_utag,
1047 info->dcache_stag);
1048 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1049 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1050 info->dcache_data[0],
1051 info->dcache_data[1],
1052 info->dcache_data[2],
1053 info->dcache_data[3]);
1054 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1055 "u[%016lx] l[%016lx]\n",
1056 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1057 (int) info->icache_index,
1058 info->icache_tag,
1059 info->icache_utag,
1060 info->icache_stag,
1061 info->icache_upper,
1062 info->icache_lower);
1063 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1064 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1065 info->icache_data[0],
1066 info->icache_data[1],
1067 info->icache_data[2],
1068 info->icache_data[3]);
1069 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1070 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1071 info->icache_data[4],
1072 info->icache_data[5],
1073 info->icache_data[6],
1074 info->icache_data[7]);
1075 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1076 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1077 (int) info->ecache_index, info->ecache_tag);
1078 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1079 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1080 info->ecache_data[0],
1081 info->ecache_data[1],
1082 info->ecache_data[2],
1083 info->ecache_data[3]);
1084
1085 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1086 while (afsr != 0UL) {
1087 unsigned long bit = cheetah_get_hipri(afsr);
1088
1089 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1090 (recoverable ? KERN_WARNING : KERN_CRIT),
1091 bit, cheetah_get_string(bit));
1092
1093 afsr &= ~bit;
1094 }
1095
1096 if (!recoverable)
1097 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1098}
1099
1100static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1101{
1102 unsigned long afsr, afar;
1103 int ret = 0;
1104
1105 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1106 : "=r" (afsr)
1107 : "i" (ASI_AFSR));
1108 if ((afsr & cheetah_afsr_errors) != 0) {
1109 if (logp != NULL) {
1110 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1111 : "=r" (afar)
1112 : "i" (ASI_AFAR));
1113 logp->afsr = afsr;
1114 logp->afar = afar;
1115 }
1116 ret = 1;
1117 }
1118 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1119 "membar #Sync\n\t"
1120 : : "r" (afsr), "i" (ASI_AFSR));
1121
1122 return ret;
1123}
1124
1125void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1126{
1127 struct cheetah_err_info local_snapshot, *p;
1128 int recoverable;
1129
1130 /* Flush E-cache */
1131 cheetah_flush_ecache();
1132
1133 p = cheetah_get_error_log(afsr);
1134 if (!p) {
1135 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1136 afsr, afar);
1137 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1138 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1139 prom_halt();
1140 }
1141
1142 /* Grab snapshot of logged error. */
1143 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1144
1145 /* If the current trap snapshot does not match what the
1146 * trap handler passed along into our args, big trouble.
1147 * In such a case, mark the local copy as invalid.
1148 *
1149 * Else, it matches and we mark the afsr in the non-local
1150 * copy as invalid so we may log new error traps there.
1151 */
1152 if (p->afsr != afsr || p->afar != afar)
1153 local_snapshot.afsr = CHAFSR_INVALID;
1154 else
1155 p->afsr = CHAFSR_INVALID;
1156
1157 cheetah_flush_icache();
1158 cheetah_flush_dcache();
1159
1160 /* Re-enable I-cache/D-cache */
1161 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1162 "or %%g1, %1, %%g1\n\t"
1163 "stxa %%g1, [%%g0] %0\n\t"
1164 "membar #Sync"
1165 : /* no outputs */
1166 : "i" (ASI_DCU_CONTROL_REG),
1167 "i" (DCU_DC | DCU_IC)
1168 : "g1");
1169
1170 /* Re-enable error reporting */
1171 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1172 "or %%g1, %1, %%g1\n\t"
1173 "stxa %%g1, [%%g0] %0\n\t"
1174 "membar #Sync"
1175 : /* no outputs */
1176 : "i" (ASI_ESTATE_ERROR_EN),
1177 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1178 : "g1");
1179
1180 /* Decide if we can continue after handling this trap and
1181 * logging the error.
1182 */
1183 recoverable = 1;
1184 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1185 recoverable = 0;
1186
1187 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1188 * error was logged while we had error reporting traps disabled.
1189 */
1190 if (cheetah_recheck_errors(&local_snapshot)) {
1191 unsigned long new_afsr = local_snapshot.afsr;
1192
1193 /* If we got a new asynchronous error, die... */
1194 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1195 CHAFSR_WDU | CHAFSR_CPU |
1196 CHAFSR_IVU | CHAFSR_UE |
1197 CHAFSR_BERR | CHAFSR_TO))
1198 recoverable = 0;
1199 }
1200
1201 /* Log errors. */
1202 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1203
1204 if (!recoverable)
1205 panic("Irrecoverable Fast-ECC error trap.\n");
1206
1207 /* Flush E-cache to kick the error trap handlers out. */
1208 cheetah_flush_ecache();
1209}
1210
1211/* Try to fix a correctable error by pushing the line out from
1212 * the E-cache. Recheck error reporting registers to see if the
1213 * problem is intermittent.
1214 */
1215static int cheetah_fix_ce(unsigned long physaddr)
1216{
1217 unsigned long orig_estate;
1218 unsigned long alias1, alias2;
1219 int ret;
1220
1221 /* Make sure correctable error traps are disabled. */
1222 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1223 "andn %0, %1, %%g1\n\t"
1224 "stxa %%g1, [%%g0] %2\n\t"
1225 "membar #Sync"
1226 : "=&r" (orig_estate)
1227 : "i" (ESTATE_ERROR_CEEN),
1228 "i" (ASI_ESTATE_ERROR_EN)
1229 : "g1");
1230
1231 /* We calculate alias addresses that will force the
1232 * cache line in question out of the E-cache. Then
1233 * we bring it back in with an atomic instruction so
1234 * that we get it in some modified/exclusive state,
1235 * then we displace it again to try and get proper ECC
1236 * pushed back into the system.
1237 */
1238 physaddr &= ~(8UL - 1UL);
1239 alias1 = (ecache_flush_physbase +
1240 (physaddr & ((ecache_flush_size >> 1) - 1)));
1241 alias2 = alias1 + (ecache_flush_size >> 1);
1242 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1243 "ldxa [%1] %3, %%g0\n\t"
1244 "casxa [%2] %3, %%g0, %%g0\n\t"
1245 "membar #StoreLoad | #StoreStore\n\t"
1246 "ldxa [%0] %3, %%g0\n\t"
1247 "ldxa [%1] %3, %%g0\n\t"
1248 "membar #Sync"
1249 : /* no outputs */
1250 : "r" (alias1), "r" (alias2),
1251 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1252
1253 /* Did that trigger another error? */
1254 if (cheetah_recheck_errors(NULL)) {
1255 /* Try one more time. */
1256 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1257 "membar #Sync"
1258 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1259 if (cheetah_recheck_errors(NULL))
1260 ret = 2;
1261 else
1262 ret = 1;
1263 } else {
1264 /* No new error, intermittent problem. */
1265 ret = 0;
1266 }
1267
1268 /* Restore error enables. */
1269 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1270 "membar #Sync"
1271 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1272
1273 return ret;
1274}
1275
1276/* Return non-zero if PADDR is a valid physical memory address. */
1277static int cheetah_check_main_memory(unsigned long paddr)
1278{
1279 int i;
1280
1281 for (i = 0; ; i++) {
1282 if (sp_banks[i].num_bytes == 0)
1283 break;
1284 if (paddr >= sp_banks[i].base_addr &&
1285 paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1286 return 1;
1287 }
1288 return 0;
1289}
1290
1291void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1292{
1293 struct cheetah_err_info local_snapshot, *p;
1294 int recoverable, is_memory;
1295
1296 p = cheetah_get_error_log(afsr);
1297 if (!p) {
1298 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1299 afsr, afar);
1300 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1301 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1302 prom_halt();
1303 }
1304
1305 /* Grab snapshot of logged error. */
1306 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1307
1308 /* If the current trap snapshot does not match what the
1309 * trap handler passed along into our args, big trouble.
1310 * In such a case, mark the local copy as invalid.
1311 *
1312 * Else, it matches and we mark the afsr in the non-local
1313 * copy as invalid so we may log new error traps there.
1314 */
1315 if (p->afsr != afsr || p->afar != afar)
1316 local_snapshot.afsr = CHAFSR_INVALID;
1317 else
1318 p->afsr = CHAFSR_INVALID;
1319
1320 is_memory = cheetah_check_main_memory(afar);
1321
1322 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1323 /* XXX Might want to log the results of this operation
1324 * XXX somewhere... -DaveM
1325 */
1326 cheetah_fix_ce(afar);
1327 }
1328
1329 {
1330 int flush_all, flush_line;
1331
1332 flush_all = flush_line = 0;
1333 if ((afsr & CHAFSR_EDC) != 0UL) {
1334 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1335 flush_line = 1;
1336 else
1337 flush_all = 1;
1338 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1339 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1340 flush_line = 1;
1341 else
1342 flush_all = 1;
1343 }
1344
1345 /* Trap handler only disabled I-cache, flush it. */
1346 cheetah_flush_icache();
1347
1348 /* Re-enable I-cache */
1349 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1350 "or %%g1, %1, %%g1\n\t"
1351 "stxa %%g1, [%%g0] %0\n\t"
1352 "membar #Sync"
1353 : /* no outputs */
1354 : "i" (ASI_DCU_CONTROL_REG),
1355 "i" (DCU_IC)
1356 : "g1");
1357
1358 if (flush_all)
1359 cheetah_flush_ecache();
1360 else if (flush_line)
1361 cheetah_flush_ecache_line(afar);
1362 }
1363
1364 /* Re-enable error reporting */
1365 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1366 "or %%g1, %1, %%g1\n\t"
1367 "stxa %%g1, [%%g0] %0\n\t"
1368 "membar #Sync"
1369 : /* no outputs */
1370 : "i" (ASI_ESTATE_ERROR_EN),
1371 "i" (ESTATE_ERROR_CEEN)
1372 : "g1");
1373
1374 /* Decide if we can continue after handling this trap and
1375 * logging the error.
1376 */
1377 recoverable = 1;
1378 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1379 recoverable = 0;
1380
1381 /* Re-check AFSR/AFAR */
1382 (void) cheetah_recheck_errors(&local_snapshot);
1383
1384 /* Log errors. */
1385 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1386
1387 if (!recoverable)
1388 panic("Irrecoverable Correctable-ECC error trap.\n");
1389}
1390
1391void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1392{
1393 struct cheetah_err_info local_snapshot, *p;
1394 int recoverable, is_memory;
1395
1396#ifdef CONFIG_PCI
1397 /* Check for the special PCI poke sequence. */
1398 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1399 cheetah_flush_icache();
1400 cheetah_flush_dcache();
1401
1402 /* Re-enable I-cache/D-cache */
1403 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1404 "or %%g1, %1, %%g1\n\t"
1405 "stxa %%g1, [%%g0] %0\n\t"
1406 "membar #Sync"
1407 : /* no outputs */
1408 : "i" (ASI_DCU_CONTROL_REG),
1409 "i" (DCU_DC | DCU_IC)
1410 : "g1");
1411
1412 /* Re-enable error reporting */
1413 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1414 "or %%g1, %1, %%g1\n\t"
1415 "stxa %%g1, [%%g0] %0\n\t"
1416 "membar #Sync"
1417 : /* no outputs */
1418 : "i" (ASI_ESTATE_ERROR_EN),
1419 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1420 : "g1");
1421
1422 (void) cheetah_recheck_errors(NULL);
1423
1424 pci_poke_faulted = 1;
1425 regs->tpc += 4;
1426 regs->tnpc = regs->tpc + 4;
1427 return;
1428 }
1429#endif
1430
1431 p = cheetah_get_error_log(afsr);
1432 if (!p) {
1433 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1434 afsr, afar);
1435 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1436 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1437 prom_halt();
1438 }
1439
1440 /* Grab snapshot of logged error. */
1441 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1442
1443 /* If the current trap snapshot does not match what the
1444 * trap handler passed along into our args, big trouble.
1445 * In such a case, mark the local copy as invalid.
1446 *
1447 * Else, it matches and we mark the afsr in the non-local
1448 * copy as invalid so we may log new error traps there.
1449 */
1450 if (p->afsr != afsr || p->afar != afar)
1451 local_snapshot.afsr = CHAFSR_INVALID;
1452 else
1453 p->afsr = CHAFSR_INVALID;
1454
1455 is_memory = cheetah_check_main_memory(afar);
1456
1457 {
1458 int flush_all, flush_line;
1459
1460 flush_all = flush_line = 0;
1461 if ((afsr & CHAFSR_EDU) != 0UL) {
1462 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1463 flush_line = 1;
1464 else
1465 flush_all = 1;
1466 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1467 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1468 flush_line = 1;
1469 else
1470 flush_all = 1;
1471 }
1472
1473 cheetah_flush_icache();
1474 cheetah_flush_dcache();
1475
1476 /* Re-enable I/D caches */
1477 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1478 "or %%g1, %1, %%g1\n\t"
1479 "stxa %%g1, [%%g0] %0\n\t"
1480 "membar #Sync"
1481 : /* no outputs */
1482 : "i" (ASI_DCU_CONTROL_REG),
1483 "i" (DCU_IC | DCU_DC)
1484 : "g1");
1485
1486 if (flush_all)
1487 cheetah_flush_ecache();
1488 else if (flush_line)
1489 cheetah_flush_ecache_line(afar);
1490 }
1491
1492 /* Re-enable error reporting */
1493 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1494 "or %%g1, %1, %%g1\n\t"
1495 "stxa %%g1, [%%g0] %0\n\t"
1496 "membar #Sync"
1497 : /* no outputs */
1498 : "i" (ASI_ESTATE_ERROR_EN),
1499 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1500 : "g1");
1501
1502 /* Decide if we can continue after handling this trap and
1503 * logging the error.
1504 */
1505 recoverable = 1;
1506 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1507 recoverable = 0;
1508
1509 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1510 * error was logged while we had error reporting traps disabled.
1511 */
1512 if (cheetah_recheck_errors(&local_snapshot)) {
1513 unsigned long new_afsr = local_snapshot.afsr;
1514
1515 /* If we got a new asynchronous error, die... */
1516 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1517 CHAFSR_WDU | CHAFSR_CPU |
1518 CHAFSR_IVU | CHAFSR_UE |
1519 CHAFSR_BERR | CHAFSR_TO))
1520 recoverable = 0;
1521 }
1522
1523 /* Log errors. */
1524 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1525
1526 /* "Recoverable" here means we try to yank the page from ever
1527 * being newly used again. This depends upon a few things:
1528 * 1) Must be main memory, and AFAR must be valid.
1529 * 2) If we trapped from user, OK.
1530 * 3) Else, if we trapped from kernel we must find exception
1531 * table entry (ie. we have to have been accessing user
1532 * space).
1533 *
1534 * If AFAR is not in main memory, or we trapped from kernel
1535 * and cannot find an exception table entry, it is unacceptable
1536 * to try and continue.
1537 */
1538 if (recoverable && is_memory) {
1539 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1540 /* OK, usermode access. */
1541 recoverable = 1;
1542 } else {
1543 unsigned long g2 = regs->u_regs[UREG_G2];
1544 unsigned long fixup = search_extables_range(regs->tpc, &g2);
1545
1546 if (fixup != 0UL) {
1547 /* OK, kernel access to userspace. */
1548 recoverable = 1;
1549
1550 } else {
1551 /* BAD, privileged state is corrupted. */
1552 recoverable = 0;
1553 }
1554
1555 if (recoverable) {
1556 if (pfn_valid(afar >> PAGE_SHIFT))
1557 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1558 else
1559 recoverable = 0;
1560
1561 /* Only perform fixup if we still have a
1562 * recoverable condition.
1563 */
1564 if (recoverable) {
1565 regs->tpc = fixup;
1566 regs->tnpc = regs->tpc + 4;
1567 regs->u_regs[UREG_G2] = g2;
1568 }
1569 }
1570 }
1571 } else {
1572 recoverable = 0;
1573 }
1574
1575 if (!recoverable)
1576 panic("Irrecoverable deferred error trap.\n");
1577}
1578
1579/* Handle a D/I cache parity error trap. TYPE is encoded as:
1580 *
1581 * Bit0: 0=dcache,1=icache
1582 * Bit1: 0=recoverable,1=unrecoverable
1583 *
1584 * The hardware has disabled both the I-cache and D-cache in
1585 * the %dcr register.
1586 */
1587void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1588{
1589 if (type & 0x1)
1590 __cheetah_flush_icache();
1591 else
1592 cheetah_plus_zap_dcache_parity();
1593 cheetah_flush_dcache();
1594
1595 /* Re-enable I-cache/D-cache */
1596 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1597 "or %%g1, %1, %%g1\n\t"
1598 "stxa %%g1, [%%g0] %0\n\t"
1599 "membar #Sync"
1600 : /* no outputs */
1601 : "i" (ASI_DCU_CONTROL_REG),
1602 "i" (DCU_DC | DCU_IC)
1603 : "g1");
1604
1605 if (type & 0x2) {
1606 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1607 smp_processor_id(),
1608 (type & 0x1) ? 'I' : 'D',
1609 regs->tpc);
1610 panic("Irrecoverable Cheetah+ parity error.");
1611 }
1612
1613 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1614 smp_processor_id(),
1615 (type & 0x1) ? 'I' : 'D',
1616 regs->tpc);
1617}
1618
1619void do_fpe_common(struct pt_regs *regs)
1620{
1621 if (regs->tstate & TSTATE_PRIV) {
1622 regs->tpc = regs->tnpc;
1623 regs->tnpc += 4;
1624 } else {
1625 unsigned long fsr = current_thread_info()->xfsr[0];
1626 siginfo_t info;
1627
1628 if (test_thread_flag(TIF_32BIT)) {
1629 regs->tpc &= 0xffffffff;
1630 regs->tnpc &= 0xffffffff;
1631 }
1632 info.si_signo = SIGFPE;
1633 info.si_errno = 0;
1634 info.si_addr = (void __user *)regs->tpc;
1635 info.si_trapno = 0;
1636 info.si_code = __SI_FAULT;
1637 if ((fsr & 0x1c000) == (1 << 14)) {
1638 if (fsr & 0x10)
1639 info.si_code = FPE_FLTINV;
1640 else if (fsr & 0x08)
1641 info.si_code = FPE_FLTOVF;
1642 else if (fsr & 0x04)
1643 info.si_code = FPE_FLTUND;
1644 else if (fsr & 0x02)
1645 info.si_code = FPE_FLTDIV;
1646 else if (fsr & 0x01)
1647 info.si_code = FPE_FLTRES;
1648 }
1649 force_sig_info(SIGFPE, &info, current);
1650 }
1651}
1652
1653void do_fpieee(struct pt_regs *regs)
1654{
1655 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1656 0, 0x24, SIGFPE) == NOTIFY_STOP)
1657 return;
1658
1659 do_fpe_common(regs);
1660}
1661
1662extern int do_mathemu(struct pt_regs *, struct fpustate *);
1663
1664void do_fpother(struct pt_regs *regs)
1665{
1666 struct fpustate *f = FPUSTATE;
1667 int ret = 0;
1668
1669 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1670 0, 0x25, SIGFPE) == NOTIFY_STOP)
1671 return;
1672
1673 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1674 case (2 << 14): /* unfinished_FPop */
1675 case (3 << 14): /* unimplemented_FPop */
1676 ret = do_mathemu(regs, f);
1677 break;
1678 }
1679 if (ret)
1680 return;
1681 do_fpe_common(regs);
1682}
1683
1684void do_tof(struct pt_regs *regs)
1685{
1686 siginfo_t info;
1687
1688 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1689 0, 0x26, SIGEMT) == NOTIFY_STOP)
1690 return;
1691
1692 if (regs->tstate & TSTATE_PRIV)
1693 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1694 if (test_thread_flag(TIF_32BIT)) {
1695 regs->tpc &= 0xffffffff;
1696 regs->tnpc &= 0xffffffff;
1697 }
1698 info.si_signo = SIGEMT;
1699 info.si_errno = 0;
1700 info.si_code = EMT_TAGOVF;
1701 info.si_addr = (void __user *)regs->tpc;
1702 info.si_trapno = 0;
1703 force_sig_info(SIGEMT, &info, current);
1704}
1705
1706void do_div0(struct pt_regs *regs)
1707{
1708 siginfo_t info;
1709
1710 if (notify_die(DIE_TRAP, "integer division by zero", regs,
1711 0, 0x28, SIGFPE) == NOTIFY_STOP)
1712 return;
1713
1714 if (regs->tstate & TSTATE_PRIV)
1715 die_if_kernel("TL0: Kernel divide by zero.", regs);
1716 if (test_thread_flag(TIF_32BIT)) {
1717 regs->tpc &= 0xffffffff;
1718 regs->tnpc &= 0xffffffff;
1719 }
1720 info.si_signo = SIGFPE;
1721 info.si_errno = 0;
1722 info.si_code = FPE_INTDIV;
1723 info.si_addr = (void __user *)regs->tpc;
1724 info.si_trapno = 0;
1725 force_sig_info(SIGFPE, &info, current);
1726}
1727
1728void instruction_dump (unsigned int *pc)
1729{
1730 int i;
1731
1732 if ((((unsigned long) pc) & 3))
1733 return;
1734
1735 printk("Instruction DUMP:");
1736 for (i = -3; i < 6; i++)
1737 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1738 printk("\n");
1739}
1740
1741static void user_instruction_dump (unsigned int __user *pc)
1742{
1743 int i;
1744 unsigned int buf[9];
1745
1746 if ((((unsigned long) pc) & 3))
1747 return;
1748
1749 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1750 return;
1751
1752 printk("Instruction DUMP:");
1753 for (i = 0; i < 9; i++)
1754 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1755 printk("\n");
1756}
1757
1758void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1759{
1760 unsigned long pc, fp, thread_base, ksp;
1761 struct thread_info *tp = tsk->thread_info;
1762 struct reg_window *rw;
1763 int count = 0;
1764
1765 ksp = (unsigned long) _ksp;
1766
1767 if (tp == current_thread_info())
1768 flushw_all();
1769
1770 fp = ksp + STACK_BIAS;
1771 thread_base = (unsigned long) tp;
1772
1773 printk("Call Trace:");
1774#ifdef CONFIG_KALLSYMS
1775 printk("\n");
1776#endif
1777 do {
1778 /* Bogus frame pointer? */
1779 if (fp < (thread_base + sizeof(struct thread_info)) ||
1780 fp >= (thread_base + THREAD_SIZE))
1781 break;
1782 rw = (struct reg_window *)fp;
1783 pc = rw->ins[7];
1784 printk(" [%016lx] ", pc);
1785 print_symbol("%s\n", pc);
1786 fp = rw->ins[6] + STACK_BIAS;
1787 } while (++count < 16);
1788#ifndef CONFIG_KALLSYMS
1789 printk("\n");
1790#endif
1791}
1792
1793void dump_stack(void)
1794{
1795 unsigned long *ksp;
1796
1797 __asm__ __volatile__("mov %%fp, %0"
1798 : "=r" (ksp));
1799 show_stack(current, ksp);
1800}
1801
1802EXPORT_SYMBOL(dump_stack);
1803
1804static inline int is_kernel_stack(struct task_struct *task,
1805 struct reg_window *rw)
1806{
1807 unsigned long rw_addr = (unsigned long) rw;
1808 unsigned long thread_base, thread_end;
1809
1810 if (rw_addr < PAGE_OFFSET) {
1811 if (task != &init_task)
1812 return 0;
1813 }
1814
1815 thread_base = (unsigned long) task->thread_info;
1816 thread_end = thread_base + sizeof(union thread_union);
1817 if (rw_addr >= thread_base &&
1818 rw_addr < thread_end &&
1819 !(rw_addr & 0x7UL))
1820 return 1;
1821
1822 return 0;
1823}
1824
1825static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
1826{
1827 unsigned long fp = rw->ins[6];
1828
1829 if (!fp)
1830 return NULL;
1831
1832 return (struct reg_window *) (fp + STACK_BIAS);
1833}
1834
1835void die_if_kernel(char *str, struct pt_regs *regs)
1836{
1837 static int die_counter;
1838 extern void __show_regs(struct pt_regs * regs);
1839 extern void smp_report_regs(void);
1840 int count = 0;
1841
1842 /* Amuse the user. */
1843 printk(
1844" \\|/ ____ \\|/\n"
1845" \"@'/ .. \\`@\"\n"
1846" /_| \\__/ |_\\\n"
1847" \\__U_/\n");
1848
1849 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1850 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
1851 __asm__ __volatile__("flushw");
1852 __show_regs(regs);
1853 if (regs->tstate & TSTATE_PRIV) {
1854 struct reg_window *rw = (struct reg_window *)
1855 (regs->u_regs[UREG_FP] + STACK_BIAS);
1856
1857 /* Stop the back trace when we hit userland or we
1858 * find some badly aligned kernel stack.
1859 */
1860 while (rw &&
1861 count++ < 30&&
1862 is_kernel_stack(current, rw)) {
1863 printk("Caller[%016lx]", rw->ins[7]);
1864 print_symbol(": %s", rw->ins[7]);
1865 printk("\n");
1866
1867 rw = kernel_stack_up(rw);
1868 }
1869 instruction_dump ((unsigned int *) regs->tpc);
1870 } else {
1871 if (test_thread_flag(TIF_32BIT)) {
1872 regs->tpc &= 0xffffffff;
1873 regs->tnpc &= 0xffffffff;
1874 }
1875 user_instruction_dump ((unsigned int __user *) regs->tpc);
1876 }
1877#ifdef CONFIG_SMP
1878 smp_report_regs();
1879#endif
1880
1881 if (regs->tstate & TSTATE_PRIV)
1882 do_exit(SIGKILL);
1883 do_exit(SIGSEGV);
1884}
1885
1886extern int handle_popc(u32 insn, struct pt_regs *regs);
1887extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1888
1889void do_illegal_instruction(struct pt_regs *regs)
1890{
1891 unsigned long pc = regs->tpc;
1892 unsigned long tstate = regs->tstate;
1893 u32 insn;
1894 siginfo_t info;
1895
1896 if (notify_die(DIE_TRAP, "illegal instruction", regs,
1897 0, 0x10, SIGILL) == NOTIFY_STOP)
1898 return;
1899
1900 if (tstate & TSTATE_PRIV)
1901 die_if_kernel("Kernel illegal instruction", regs);
1902 if (test_thread_flag(TIF_32BIT))
1903 pc = (u32)pc;
1904 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
1905 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1906 if (handle_popc(insn, regs))
1907 return;
1908 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1909 if (handle_ldf_stq(insn, regs))
1910 return;
1911 }
1912 }
1913 info.si_signo = SIGILL;
1914 info.si_errno = 0;
1915 info.si_code = ILL_ILLOPC;
1916 info.si_addr = (void __user *)pc;
1917 info.si_trapno = 0;
1918 force_sig_info(SIGILL, &info, current);
1919}
1920
1921void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1922{
1923 siginfo_t info;
1924
1925 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
1926 0, 0x34, SIGSEGV) == NOTIFY_STOP)
1927 return;
1928
1929 if (regs->tstate & TSTATE_PRIV) {
1930 extern void kernel_unaligned_trap(struct pt_regs *regs,
1931 unsigned int insn,
1932 unsigned long sfar,
1933 unsigned long sfsr);
1934
1935 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
1936 sfar, sfsr);
1937 return;
1938 }
1939 info.si_signo = SIGBUS;
1940 info.si_errno = 0;
1941 info.si_code = BUS_ADRALN;
1942 info.si_addr = (void __user *)sfar;
1943 info.si_trapno = 0;
1944 force_sig_info(SIGBUS, &info, current);
1945}
1946
1947void do_privop(struct pt_regs *regs)
1948{
1949 siginfo_t info;
1950
1951 if (notify_die(DIE_TRAP, "privileged operation", regs,
1952 0, 0x11, SIGILL) == NOTIFY_STOP)
1953 return;
1954
1955 if (test_thread_flag(TIF_32BIT)) {
1956 regs->tpc &= 0xffffffff;
1957 regs->tnpc &= 0xffffffff;
1958 }
1959 info.si_signo = SIGILL;
1960 info.si_errno = 0;
1961 info.si_code = ILL_PRVOPC;
1962 info.si_addr = (void __user *)regs->tpc;
1963 info.si_trapno = 0;
1964 force_sig_info(SIGILL, &info, current);
1965}
1966
1967void do_privact(struct pt_regs *regs)
1968{
1969 do_privop(regs);
1970}
1971
1972/* Trap level 1 stuff or other traps we should never see... */
1973void do_cee(struct pt_regs *regs)
1974{
1975 die_if_kernel("TL0: Cache Error Exception", regs);
1976}
1977
1978void do_cee_tl1(struct pt_regs *regs)
1979{
1980 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1981 die_if_kernel("TL1: Cache Error Exception", regs);
1982}
1983
1984void do_dae_tl1(struct pt_regs *regs)
1985{
1986 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1987 die_if_kernel("TL1: Data Access Exception", regs);
1988}
1989
1990void do_iae_tl1(struct pt_regs *regs)
1991{
1992 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1993 die_if_kernel("TL1: Instruction Access Exception", regs);
1994}
1995
1996void do_div0_tl1(struct pt_regs *regs)
1997{
1998 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1999 die_if_kernel("TL1: DIV0 Exception", regs);
2000}
2001
2002void do_fpdis_tl1(struct pt_regs *regs)
2003{
2004 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2005 die_if_kernel("TL1: FPU Disabled", regs);
2006}
2007
2008void do_fpieee_tl1(struct pt_regs *regs)
2009{
2010 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2011 die_if_kernel("TL1: FPU IEEE Exception", regs);
2012}
2013
2014void do_fpother_tl1(struct pt_regs *regs)
2015{
2016 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2017 die_if_kernel("TL1: FPU Other Exception", regs);
2018}
2019
2020void do_ill_tl1(struct pt_regs *regs)
2021{
2022 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2023 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2024}
2025
2026void do_irq_tl1(struct pt_regs *regs)
2027{
2028 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2029 die_if_kernel("TL1: IRQ Exception", regs);
2030}
2031
2032void do_lddfmna_tl1(struct pt_regs *regs)
2033{
2034 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2035 die_if_kernel("TL1: LDDF Exception", regs);
2036}
2037
2038void do_stdfmna_tl1(struct pt_regs *regs)
2039{
2040 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2041 die_if_kernel("TL1: STDF Exception", regs);
2042}
2043
2044void do_paw(struct pt_regs *regs)
2045{
2046 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2047}
2048
2049void do_paw_tl1(struct pt_regs *regs)
2050{
2051 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2052 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2053}
2054
2055void do_vaw(struct pt_regs *regs)
2056{
2057 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2058}
2059
2060void do_vaw_tl1(struct pt_regs *regs)
2061{
2062 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2063 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2064}
2065
2066void do_tof_tl1(struct pt_regs *regs)
2067{
2068 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2069 die_if_kernel("TL1: Tag Overflow Exception", regs);
2070}
2071
2072void do_getpsr(struct pt_regs *regs)
2073{
2074 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2075 regs->tpc = regs->tnpc;
2076 regs->tnpc += 4;
2077 if (test_thread_flag(TIF_32BIT)) {
2078 regs->tpc &= 0xffffffff;
2079 regs->tnpc &= 0xffffffff;
2080 }
2081}
2082
2083extern void thread_info_offsets_are_bolixed_dave(void);
2084
2085/* Only invoked on boot processor. */
2086void __init trap_init(void)
2087{
2088 /* Compile time sanity check. */
2089 if (TI_TASK != offsetof(struct thread_info, task) ||
2090 TI_FLAGS != offsetof(struct thread_info, flags) ||
2091 TI_CPU != offsetof(struct thread_info, cpu) ||
2092 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2093 TI_KSP != offsetof(struct thread_info, ksp) ||
2094 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2095 TI_KREGS != offsetof(struct thread_info, kregs) ||
2096 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2097 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2098 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2099 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2100 TI_GSR != offsetof(struct thread_info, gsr) ||
2101 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2102 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2103 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2104 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2105 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2106 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2107 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2108 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2109 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2110 (TI_FPREGS & (64 - 1)))
2111 thread_info_offsets_are_bolixed_dave();
2112
2113 /* Attach to the address space of init_task. On SMP we
2114 * do this in smp.c:smp_callin for other cpus.
2115 */
2116 atomic_inc(&init_mm.mm_count);
2117 current->active_mm = &init_mm;
2118}
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
new file mode 100644
index 000000000000..491bb3681f9d
--- /dev/null
+++ b/arch/sparc64/kernel/ttable.S
@@ -0,0 +1,280 @@
1/* $Id: ttable.S,v 1.38 2002/02/09 19:49:30 davem Exp $
2 * ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah extensions.
3 *
4 * Copyright (C) 1996, 2001 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/config.h>
8
9 .globl sparc64_ttable_tl0, sparc64_ttable_tl1
10 .globl tl0_icpe, tl1_icpe
11 .globl tl0_dcpe, tl1_dcpe
12 .globl tl0_fecc, tl1_fecc
13 .globl tl0_cee, tl1_cee
14 .globl tl0_iae, tl1_iae
15 .globl tl0_dae, tl1_dae
16
17sparc64_ttable_tl0:
18tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
19tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
20tl0_iax: membar #Sync
21 TRAP_NOSAVE_7INSNS(__do_instruction_access_exception)
22tl0_resv009: BTRAP(0x9)
23tl0_iae: TRAP(do_iae)
24tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
25tl0_ill: membar #Sync
26 TRAP_7INSNS(do_illegal_instruction)
27tl0_privop: TRAP(do_privop)
28tl0_resv012: BTRAP(0x12) BTRAP(0x13) BTRAP(0x14) BTRAP(0x15) BTRAP(0x16) BTRAP(0x17)
29tl0_resv018: BTRAP(0x18) BTRAP(0x19) BTRAP(0x1a) BTRAP(0x1b) BTRAP(0x1c) BTRAP(0x1d)
30tl0_resv01e: BTRAP(0x1e) BTRAP(0x1f)
31tl0_fpdis: TRAP_NOSAVE(do_fpdis)
32tl0_fpieee: TRAP_SAVEFPU(do_fpieee)
33tl0_fpother: TRAP_NOSAVE(do_fpother_check_fitos)
34tl0_tof: TRAP(do_tof)
35tl0_cwin: CLEAN_WINDOW
36tl0_div0: TRAP(do_div0)
37tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
38tl0_resv02f: BTRAP(0x2f)
39tl0_dax: TRAP_NOSAVE(__do_data_access_exception)
40tl0_resv031: BTRAP(0x31)
41tl0_dae: TRAP(do_dae)
42tl0_resv033: BTRAP(0x33)
43tl0_mna: TRAP_NOSAVE(do_mna)
44tl0_lddfmna: TRAP_NOSAVE(do_lddfmna)
45tl0_stdfmna: TRAP_NOSAVE(do_stdfmna)
46tl0_privact: TRAP_NOSAVE(__do_privact)
47tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d)
48tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
49#ifdef CONFIG_SMP
50tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
51tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
52tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
53#else
54tl0_irq1: BTRAP(0x41)
55tl0_irq2: BTRAP(0x42)
56tl0_irq3: BTRAP(0x43)
57#endif
58tl0_irq4: TRAP_IRQ(handler_irq, 4)
59tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6)
60tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8)
61tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10)
62tl0_irq11: TRAP_IRQ(handler_irq, 11) TRAP_IRQ(handler_irq, 12)
63tl0_irq13: TRAP_IRQ(handler_irq, 13)
64#ifndef CONFIG_SMP
65tl0_irq14: TRAP_IRQ(handler_irq, 14)
66#else
67tl0_irq14: TICK_SMP_IRQ
68#endif
69tl0_irq15: TRAP_IRQ(handler_irq, 15)
70tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
71tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
72tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
73tl0_ivec: TRAP_IVEC
74tl0_paw: TRAP(do_paw)
75tl0_vaw: TRAP(do_vaw)
76tl0_cee: TRAP_NOSAVE(cee_trap)
77tl0_iamiss:
78#include "itlb_base.S"
79tl0_damiss:
80#include "dtlb_base.S"
81tl0_daprot:
82#include "dtlb_prot.S"
83tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */
84tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */
85tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */
86tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75)
87tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b)
88tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f)
89tl0_s0n: SPILL_0_NORMAL
90tl0_s1n: SPILL_1_NORMAL
91tl0_s2n: SPILL_2_NORMAL
92tl0_s3n: SPILL_3_NORMAL
93tl0_s4n: SPILL_4_NORMAL
94tl0_s5n: SPILL_5_NORMAL
95tl0_s6n: SPILL_6_NORMAL
96tl0_s7n: SPILL_7_NORMAL
97tl0_s0o: SPILL_0_OTHER
98tl0_s1o: SPILL_1_OTHER
99tl0_s2o: SPILL_2_OTHER
100tl0_s3o: SPILL_3_OTHER
101tl0_s4o: SPILL_4_OTHER
102tl0_s5o: SPILL_5_OTHER
103tl0_s6o: SPILL_6_OTHER
104tl0_s7o: SPILL_7_OTHER
105tl0_f0n: FILL_0_NORMAL
106tl0_f1n: FILL_1_NORMAL
107tl0_f2n: FILL_2_NORMAL
108tl0_f3n: FILL_3_NORMAL
109tl0_f4n: FILL_4_NORMAL
110tl0_f5n: FILL_5_NORMAL
111tl0_f6n: FILL_6_NORMAL
112tl0_f7n: FILL_7_NORMAL
113tl0_f0o: FILL_0_OTHER
114tl0_f1o: FILL_1_OTHER
115tl0_f2o: FILL_2_OTHER
116tl0_f3o: FILL_3_OTHER
117tl0_f4o: FILL_4_OTHER
118tl0_f5o: FILL_5_OTHER
119tl0_f6o: FILL_6_OTHER
120tl0_f7o: FILL_7_OTHER
121tl0_sunos: SUNOS_SYSCALL_TRAP
122tl0_bkpt: BREAKPOINT_TRAP
123tl0_divz: TRAP(do_div0)
124tl0_flushw: FLUSH_WINDOW_TRAP
125tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107)
126 .globl tl0_solaris
127tl0_solaris: SOLARIS_SYSCALL_TRAP
128tl0_netbsd: NETBSD_SYSCALL_TRAP
129tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e)
130tl0_resv10f: BTRAP(0x10f)
131tl0_linux32: LINUX_32BIT_SYSCALL_TRAP
132tl0_oldlinux64: LINUX_64BIT_SYSCALL_TRAP
133tl0_resv112: TRAP_UTRAP(UT_TRAP_INSTRUCTION_18,0x112) TRAP_UTRAP(UT_TRAP_INSTRUCTION_19,0x113)
134tl0_resv114: TRAP_UTRAP(UT_TRAP_INSTRUCTION_20,0x114) TRAP_UTRAP(UT_TRAP_INSTRUCTION_21,0x115)
135tl0_resv116: TRAP_UTRAP(UT_TRAP_INSTRUCTION_22,0x116) TRAP_UTRAP(UT_TRAP_INSTRUCTION_23,0x117)
136tl0_resv118: TRAP_UTRAP(UT_TRAP_INSTRUCTION_24,0x118) TRAP_UTRAP(UT_TRAP_INSTRUCTION_25,0x119)
137tl0_resv11a: TRAP_UTRAP(UT_TRAP_INSTRUCTION_26,0x11a) TRAP_UTRAP(UT_TRAP_INSTRUCTION_27,0x11b)
138tl0_resv11c: TRAP_UTRAP(UT_TRAP_INSTRUCTION_28,0x11c) TRAP_UTRAP(UT_TRAP_INSTRUCTION_29,0x11d)
139tl0_resv11e: TRAP_UTRAP(UT_TRAP_INSTRUCTION_30,0x11e) TRAP_UTRAP(UT_TRAP_INSTRUCTION_31,0x11f)
140tl0_getcc: GETCC_TRAP
141tl0_setcc: SETCC_TRAP
142tl0_getpsr: TRAP(do_getpsr)
143tl0_resv123: BTRAP(0x123) BTRAP(0x124) BTRAP(0x125) BTRAP(0x126)
144tl0_solindir: INDIRECT_SOLARIS_SYSCALL(156)
145tl0_resv128: BTRAP(0x128) BTRAP(0x129) BTRAP(0x12a) BTRAP(0x12b) BTRAP(0x12c)
146tl0_resv12d: BTRAP(0x12d) BTRAP(0x12e) BTRAP(0x12f) BTRAP(0x130) BTRAP(0x131)
147tl0_resv132: BTRAP(0x132) BTRAP(0x133) BTRAP(0x134) BTRAP(0x135) BTRAP(0x136)
148tl0_resv137: BTRAP(0x137) BTRAP(0x138) BTRAP(0x139) BTRAP(0x13a) BTRAP(0x13b)
149tl0_resv13c: BTRAP(0x13c) BTRAP(0x13d) BTRAP(0x13e) BTRAP(0x13f) BTRAP(0x140)
150tl0_resv141: BTRAP(0x141) BTRAP(0x142) BTRAP(0x143) BTRAP(0x144) BTRAP(0x145)
151tl0_resv146: BTRAP(0x146) BTRAP(0x147) BTRAP(0x148) BTRAP(0x149) BTRAP(0x14a)
152tl0_resv14b: BTRAP(0x14b) BTRAP(0x14c) BTRAP(0x14d) BTRAP(0x14e) BTRAP(0x14f)
153tl0_resv150: BTRAP(0x150) BTRAP(0x151) BTRAP(0x152) BTRAP(0x153) BTRAP(0x154)
154tl0_resv155: BTRAP(0x155) BTRAP(0x156) BTRAP(0x157) BTRAP(0x158) BTRAP(0x159)
155tl0_resv15a: BTRAP(0x15a) BTRAP(0x15b) BTRAP(0x15c) BTRAP(0x15d) BTRAP(0x15e)
156tl0_resv15f: BTRAP(0x15f) BTRAP(0x160) BTRAP(0x161) BTRAP(0x162) BTRAP(0x163)
157tl0_resv164: BTRAP(0x164) BTRAP(0x165) BTRAP(0x166) BTRAP(0x167) BTRAP(0x168)
158tl0_resv169: BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c)
159tl0_linux64: LINUX_64BIT_SYSCALL_TRAP
160tl0_gsctx: TRAP(sparc64_get_context) TRAP(sparc64_set_context)
161tl0_resv170: KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) BTRAP(0x172)
162tl0_resv173: BTRAP(0x173) BTRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
163tl0_resv178: BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c)
164tl0_resv17d: BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f)
165#define BTRAPS(x) BTRAP(x) BTRAP(x+1) BTRAP(x+2) BTRAP(x+3) BTRAP(x+4) BTRAP(x+5) BTRAP(x+6) BTRAP(x+7)
166tl0_resv180: BTRAPS(0x180) BTRAPS(0x188)
167tl0_resv190: BTRAPS(0x190) BTRAPS(0x198)
168tl0_resv1a0: BTRAPS(0x1a0) BTRAPS(0x1a8)
169tl0_resv1b0: BTRAPS(0x1b0) BTRAPS(0x1b8)
170tl0_resv1c0: BTRAPS(0x1c0) BTRAPS(0x1c8)
171tl0_resv1d0: BTRAPS(0x1d0) BTRAPS(0x1d8)
172tl0_resv1e0: BTRAPS(0x1e0) BTRAPS(0x1e8)
173tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
174
175sparc64_ttable_tl1:
176tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
177tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
178tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1)
179tl1_resv009: BTRAPTL1(0x9)
180tl1_iae: TRAPTL1(do_iae_tl1)
181tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
182tl1_ill: TRAPTL1(do_ill_tl1)
183tl1_privop: BTRAPTL1(0x11)
184tl1_resv012: BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15)
185tl1_resv016: BTRAPTL1(0x16) BTRAPTL1(0x17) BTRAPTL1(0x18) BTRAPTL1(0x19)
186tl1_resv01a: BTRAPTL1(0x1a) BTRAPTL1(0x1b) BTRAPTL1(0x1c) BTRAPTL1(0x1d)
187tl1_resv01e: BTRAPTL1(0x1e) BTRAPTL1(0x1f)
188tl1_fpdis: TRAP_NOSAVE(do_fpdis)
189tl1_fpieee: TRAPTL1(do_fpieee_tl1)
190tl1_fpother: TRAPTL1(do_fpother_tl1)
191tl1_tof: TRAPTL1(do_tof_tl1)
192tl1_cwin: CLEAN_WINDOW
193tl1_div0: TRAPTL1(do_div0_tl1)
194tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
195tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
196tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1)
197tl1_resv031: BTRAPTL1(0x31)
198tl1_dae: TRAPTL1(do_dae_tl1)
199tl1_resv033: BTRAPTL1(0x33)
200tl1_mna: TRAP_NOSAVE(do_mna)
201tl1_lddfmna: TRAPTL1(do_lddfmna_tl1)
202tl1_stdfmna: TRAPTL1(do_stdfmna_tl1)
203tl1_privact: BTRAPTL1(0x37)
204tl1_resv038: BTRAPTL1(0x38) BTRAPTL1(0x39) BTRAPTL1(0x3a) BTRAPTL1(0x3b)
205tl1_resv03c: BTRAPTL1(0x3c) BTRAPTL1(0x3d) BTRAPTL1(0x3e) BTRAPTL1(0x3f)
206tl1_resv040: BTRAPTL1(0x40)
207tl1_irq1: TRAP_IRQ(do_irq_tl1, 1) TRAP_IRQ(do_irq_tl1, 2) TRAP_IRQ(do_irq_tl1, 3)
208tl1_irq4: TRAP_IRQ(do_irq_tl1, 4) TRAP_IRQ(do_irq_tl1, 5) TRAP_IRQ(do_irq_tl1, 6)
209tl1_irq7: TRAP_IRQ(do_irq_tl1, 7) TRAP_IRQ(do_irq_tl1, 8) TRAP_IRQ(do_irq_tl1, 9)
210tl1_irq10: TRAP_IRQ(do_irq_tl1, 10) TRAP_IRQ(do_irq_tl1, 11)
211tl1_irq12: TRAP_IRQ(do_irq_tl1, 12) TRAP_IRQ(do_irq_tl1, 13)
212tl1_irq14: TRAP_IRQ(do_irq_tl1, 14) TRAP_IRQ(do_irq_tl1, 15)
213tl1_resv050: BTRAPTL1(0x50) BTRAPTL1(0x51) BTRAPTL1(0x52) BTRAPTL1(0x53)
214tl1_resv054: BTRAPTL1(0x54) BTRAPTL1(0x55) BTRAPTL1(0x56) BTRAPTL1(0x57)
215tl1_resv058: BTRAPTL1(0x58) BTRAPTL1(0x59) BTRAPTL1(0x5a) BTRAPTL1(0x5b)
216tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f)
217tl1_ivec: TRAP_IVEC
218tl1_paw: TRAPTL1(do_paw_tl1)
219tl1_vaw: TRAPTL1(do_vaw_tl1)
220
221 /* The grotty trick to save %g1 into current->thread.cee_stuff
222 * is because when we take this trap we could be interrupting trap
223 * code already using the trap alternate global registers.
224 *
225 * We cross our fingers and pray that this store/load does
226 * not cause yet another CEE trap.
227 */
228tl1_cee: membar #Sync
229 stx %g1, [%g6 + TI_CEE_STUFF]
230 ldxa [%g0] ASI_AFSR, %g1
231 membar #Sync
232 stxa %g1, [%g0] ASI_AFSR
233 membar #Sync
234 ldx [%g6 + TI_CEE_STUFF], %g1
235 retry
236
237tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67)
238tl1_damiss:
239#include "dtlb_backend.S"
240tl1_daprot:
241#include "dtlb_prot.S"
242tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */
243tl1_dcpe: BTRAPTL1(0x71) /* D-cache Parity Error on Cheetah+ */
244tl1_icpe: BTRAPTL1(0x72) /* I-cache Parity Error on Cheetah+ */
245tl1_resv073: BTRAPTL1(0x73)
246tl1_resv074: BTRAPTL1(0x74) BTRAPTL1(0x75) BTRAPTL1(0x76) BTRAPTL1(0x77)
247tl1_resv078: BTRAPTL1(0x78) BTRAPTL1(0x79) BTRAPTL1(0x7a) BTRAPTL1(0x7b)
248tl1_resv07c: BTRAPTL1(0x7c) BTRAPTL1(0x7d) BTRAPTL1(0x7e) BTRAPTL1(0x7f)
249tl1_s0n: SPILL_0_NORMAL
250tl1_s1n: SPILL_1_NORMAL
251tl1_s2n: SPILL_2_NORMAL
252tl1_s3n: SPILL_3_NORMAL
253tl1_s4n: SPILL_4_NORMAL
254tl1_s5n: SPILL_5_NORMAL
255tl1_s6n: SPILL_6_NORMAL
256tl1_s7n: SPILL_7_NORMAL
257tl1_s0o: SPILL_0_OTHER
258tl1_s1o: SPILL_1_OTHER
259tl1_s2o: SPILL_2_OTHER
260tl1_s3o: SPILL_3_OTHER
261tl1_s4o: SPILL_4_OTHER
262tl1_s5o: SPILL_5_OTHER
263tl1_s6o: SPILL_6_OTHER
264tl1_s7o: SPILL_7_OTHER
265tl1_f0n: FILL_0_NORMAL
266tl1_f1n: FILL_1_NORMAL
267tl1_f2n: FILL_2_NORMAL
268tl1_f3n: FILL_3_NORMAL
269tl1_f4n: FILL_4_NORMAL
270tl1_f5n: FILL_5_NORMAL
271tl1_f6n: FILL_6_NORMAL
272tl1_f7n: FILL_7_NORMAL
273tl1_f0o: FILL_0_OTHER
274tl1_f1o: FILL_1_OTHER
275tl1_f2o: FILL_2_OTHER
276tl1_f3o: FILL_3_OTHER
277tl1_f4o: FILL_4_OTHER
278tl1_f5o: FILL_5_OTHER
279tl1_f6o: FILL_6_OTHER
280tl1_f7o: FILL_7_OTHER
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
new file mode 100644
index 000000000000..4372bf32ecf6
--- /dev/null
+++ b/arch/sparc64/kernel/unaligned.c
@@ -0,0 +1,729 @@
1/* $Id: unaligned.c,v 1.24 2002/02/09 19:49:31 davem Exp $
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <asm/asi.h>
15#include <asm/ptrace.h>
16#include <asm/pstate.h>
17#include <asm/processor.h>
18#include <asm/system.h>
19#include <asm/uaccess.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/bitops.h>
23#include <asm/fpumacro.h>
24
25/* #define DEBUG_MNA */
26
27enum direction {
28 load, /* ld, ldd, ldh, ldsh */
29 store, /* st, std, sth, stsh */
30 both, /* Swap, ldstub, cas, ... */
31 fpld,
32 fpst,
33 invalid,
34};
35
36#ifdef DEBUG_MNA
37static char *dirstrings[] = {
38 "load", "store", "both", "fpload", "fpstore", "invalid"
39};
40#endif
41
42static inline enum direction decode_direction(unsigned int insn)
43{
44 unsigned long tmp = (insn >> 21) & 1;
45
46 if (!tmp)
47 return load;
48 else {
49 switch ((insn>>19)&0xf) {
50 case 15: /* swap* */
51 return both;
52 default:
53 return store;
54 }
55 }
56}
57
58/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
59static inline int decode_access_size(unsigned int insn)
60{
61 unsigned int tmp;
62
63 tmp = ((insn >> 19) & 0xf);
64 if (tmp == 11 || tmp == 14) /* ldx/stx */
65 return 8;
66 tmp &= 3;
67 if (!tmp)
68 return 4;
69 else if (tmp == 3)
70 return 16; /* ldd/std - Although it is actually 8 */
71 else if (tmp == 2)
72 return 2;
73 else {
74 printk("Impossible unaligned trap. insn=%08x\n", insn);
75 die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
76
77 /* GCC should never warn that control reaches the end
78 * of this function without returning a value because
79 * die_if_kernel() is marked with attribute 'noreturn'.
80 * Alas, some versions do...
81 */
82
83 return 0;
84 }
85}
86
87static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
88{
89 if (insn & 0x800000) {
90 if (insn & 0x2000)
91 return (unsigned char)(regs->tstate >> 24); /* %asi */
92 else
93 return (unsigned char)(insn >> 5); /* imm_asi */
94 } else
95 return ASI_P;
96}
97
98/* 0x400000 = signed, 0 = unsigned */
99static inline int decode_signedness(unsigned int insn)
100{
101 return (insn & 0x400000);
102}
103
104static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
105 unsigned int rd, int from_kernel)
106{
107 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
108 if (from_kernel != 0)
109 __asm__ __volatile__("flushw");
110 else
111 flushw_user();
112 }
113}
114
115static inline long sign_extend_imm13(long imm)
116{
117 return imm << 51 >> 51;
118}
119
120static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
121{
122 unsigned long value;
123
124 if (reg < 16)
125 return (!reg ? 0 : regs->u_regs[reg]);
126 if (regs->tstate & TSTATE_PRIV) {
127 struct reg_window *win;
128 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
129 value = win->locals[reg - 16];
130 } else if (test_thread_flag(TIF_32BIT)) {
131 struct reg_window32 __user *win32;
132 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
133 get_user(value, &win32->locals[reg - 16]);
134 } else {
135 struct reg_window __user *win;
136 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
137 get_user(value, &win->locals[reg - 16]);
138 }
139 return value;
140}
141
142static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
143{
144 if (reg < 16)
145 return &regs->u_regs[reg];
146 if (regs->tstate & TSTATE_PRIV) {
147 struct reg_window *win;
148 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
149 return &win->locals[reg - 16];
150 } else if (test_thread_flag(TIF_32BIT)) {
151 struct reg_window32 *win32;
152 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
153 return (unsigned long *)&win32->locals[reg - 16];
154 } else {
155 struct reg_window *win;
156 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
157 return &win->locals[reg - 16];
158 }
159}
160
161unsigned long compute_effective_address(struct pt_regs *regs,
162 unsigned int insn, unsigned int rd)
163{
164 unsigned int rs1 = (insn >> 14) & 0x1f;
165 unsigned int rs2 = insn & 0x1f;
166 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
167
168 if (insn & 0x2000) {
169 maybe_flush_windows(rs1, 0, rd, from_kernel);
170 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
171 } else {
172 maybe_flush_windows(rs1, rs2, rd, from_kernel);
173 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
174 }
175}
176
177/* This is just to make gcc think die_if_kernel does return... */
178static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
179{
180 die_if_kernel(str, regs);
181}
182
183#define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
184__asm__ __volatile__ ( \
185 "wr %4, 0, %%asi\n\t" \
186 "cmp %1, 8\n\t" \
187 "bge,pn %%icc, 9f\n\t" \
188 " cmp %1, 4\n\t" \
189 "be,pt %%icc, 6f\n" \
190"4:\t" " lduba [%2] %%asi, %%l1\n" \
191"5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
192 "sll %%l1, 8, %%l1\n\t" \
193 "brz,pt %3, 3f\n\t" \
194 " add %%l1, %%l2, %%l1\n\t" \
195 "sllx %%l1, 48, %%l1\n\t" \
196 "srax %%l1, 48, %%l1\n" \
197"3:\t" "ba,pt %%xcc, 0f\n\t" \
198 " stx %%l1, [%0]\n" \
199"6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
200 "sll %%l1, 24, %%l1\n" \
201"7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
202 "sll %%l2, 16, %%l2\n" \
203"8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
204 "sll %%g7, 8, %%g7\n\t" \
205 "or %%l1, %%l2, %%l1\n\t" \
206 "or %%g7, %%g1, %%g7\n\t" \
207 "or %%l1, %%g7, %%l1\n\t" \
208 "brnz,a,pt %3, 3f\n\t" \
209 " sra %%l1, 0, %%l1\n" \
210"3:\t" "ba,pt %%xcc, 0f\n\t" \
211 " stx %%l1, [%0]\n" \
212"9:\t" "lduba [%2] %%asi, %%l1\n" \
213"10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
214 "sllx %%l1, 56, %%l1\n" \
215"11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
216 "sllx %%l2, 48, %%l2\n" \
217"12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
218 "sllx %%g7, 40, %%g7\n\t" \
219 "sllx %%g1, 32, %%g1\n\t" \
220 "or %%l1, %%l2, %%l1\n\t" \
221 "or %%g7, %%g1, %%g7\n" \
222"13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
223 "or %%l1, %%g7, %%g7\n" \
224"14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
225 "sllx %%l2, 24, %%l2\n" \
226"15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
227 "sllx %%g1, 16, %%g1\n\t" \
228 "or %%g7, %%l2, %%g7\n" \
229"16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
230 "sllx %%l1, 8, %%l1\n\t" \
231 "or %%g7, %%g1, %%g7\n\t" \
232 "or %%l1, %%l2, %%l1\n\t" \
233 "or %%g7, %%l1, %%g7\n\t" \
234 "cmp %1, 8\n\t" \
235 "be,a,pt %%icc, 0f\n\t" \
236 " stx %%g7, [%0]\n\t" \
237 "srlx %%g7, 32, %%l1\n\t" \
238 "sra %%g7, 0, %%g7\n\t" \
239 "stx %%l1, [%0]\n\t" \
240 "stx %%g7, [%0 + 8]\n" \
241"0:\n\t" \
242 "wr %%g0, %5, %%asi\n\n\t" \
243 ".section __ex_table\n\t" \
244 ".word 4b, " #errh "\n\t" \
245 ".word 5b, " #errh "\n\t" \
246 ".word 6b, " #errh "\n\t" \
247 ".word 7b, " #errh "\n\t" \
248 ".word 8b, " #errh "\n\t" \
249 ".word 9b, " #errh "\n\t" \
250 ".word 10b, " #errh "\n\t" \
251 ".word 11b, " #errh "\n\t" \
252 ".word 12b, " #errh "\n\t" \
253 ".word 13b, " #errh "\n\t" \
254 ".word 14b, " #errh "\n\t" \
255 ".word 15b, " #errh "\n\t" \
256 ".word 16b, " #errh "\n\n\t" \
257 ".previous\n\t" \
258 : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \
259 "r" (asi), "i" (ASI_AIUS) \
260 : "l1", "l2", "g7", "g1", "cc"); \
261})
262
263#define store_common(dst_addr, size, src_val, asi, errh) ({ \
264__asm__ __volatile__ ( \
265 "wr %3, 0, %%asi\n\t" \
266 "ldx [%2], %%l1\n" \
267 "cmp %1, 2\n\t" \
268 "be,pn %%icc, 2f\n\t" \
269 " cmp %1, 4\n\t" \
270 "be,pt %%icc, 1f\n\t" \
271 " srlx %%l1, 24, %%l2\n\t" \
272 "srlx %%l1, 56, %%g1\n\t" \
273 "srlx %%l1, 48, %%g7\n" \
274"4:\t" "stba %%g1, [%0] %%asi\n\t" \
275 "srlx %%l1, 40, %%g1\n" \
276"5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
277 "srlx %%l1, 32, %%g7\n" \
278"6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
279"7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
280 "srlx %%l1, 16, %%g1\n" \
281"8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
282 "srlx %%l1, 8, %%g7\n" \
283"9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
284"10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
285 "ba,pt %%xcc, 0f\n" \
286"11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
287"1:\t" "srl %%l1, 16, %%g7\n" \
288"12:\t" "stba %%l2, [%0] %%asi\n\t" \
289 "srl %%l1, 8, %%l2\n" \
290"13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
291"14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
292 "ba,pt %%xcc, 0f\n" \
293"15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
294"2:\t" "srl %%l1, 8, %%l2\n" \
295"16:\t" "stba %%l2, [%0] %%asi\n" \
296"17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
297"0:\n\t" \
298 "wr %%g0, %4, %%asi\n\n\t" \
299 ".section __ex_table\n\t" \
300 ".word 4b, " #errh "\n\t" \
301 ".word 5b, " #errh "\n\t" \
302 ".word 6b, " #errh "\n\t" \
303 ".word 7b, " #errh "\n\t" \
304 ".word 8b, " #errh "\n\t" \
305 ".word 9b, " #errh "\n\t" \
306 ".word 10b, " #errh "\n\t" \
307 ".word 11b, " #errh "\n\t" \
308 ".word 12b, " #errh "\n\t" \
309 ".word 13b, " #errh "\n\t" \
310 ".word 14b, " #errh "\n\t" \
311 ".word 15b, " #errh "\n\t" \
312 ".word 16b, " #errh "\n\t" \
313 ".word 17b, " #errh "\n\n\t" \
314 ".previous\n\t" \
315 : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
316 : "l1", "l2", "g7", "g1", "cc"); \
317})
318
319#define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
320 unsigned long zero = 0; \
321 unsigned long *src_val = &zero; \
322 \
323 if (size == 16) { \
324 size = 8; \
325 zero = (((long)(reg_num ? \
326 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
327 (unsigned)fetch_reg(reg_num + 1, regs); \
328 } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
329 store_common(dst_addr, size, src_val, asi, errh); \
330})
331
332extern void smp_capture(void);
333extern void smp_release(void);
334
335#define do_atomic(srcdest_reg, mem, errh) ({ \
336 unsigned long flags, tmp; \
337 \
338 smp_capture(); \
339 local_irq_save(flags); \
340 tmp = *srcdest_reg; \
341 do_integer_load(srcdest_reg, 4, mem, 0, errh); \
342 store_common(mem, 4, &tmp, errh); \
343 local_irq_restore(flags); \
344 smp_release(); \
345})
346
347static inline void advance(struct pt_regs *regs)
348{
349 regs->tpc = regs->tnpc;
350 regs->tnpc += 4;
351 if (test_thread_flag(TIF_32BIT)) {
352 regs->tpc &= 0xffffffff;
353 regs->tnpc &= 0xffffffff;
354 }
355}
356
357static inline int floating_point_load_or_store_p(unsigned int insn)
358{
359 return (insn >> 24) & 1;
360}
361
362static inline int ok_for_kernel(unsigned int insn)
363{
364 return !floating_point_load_or_store_p(insn);
365}
366
367void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
368
369void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
370{
371 unsigned long g2 = regs->u_regs [UREG_G2];
372 unsigned long fixup = search_extables_range(regs->tpc, &g2);
373
374 if (!fixup) {
375 unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
376 if (address < PAGE_SIZE) {
377 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
378 } else
379 printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
380 printk(KERN_ALERT " at virtual address %016lx\n",address);
381 printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n",
382 (current->mm ? CTX_HWBITS(current->mm->context) :
383 CTX_HWBITS(current->active_mm->context)));
384 printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n",
385 (current->mm ? (unsigned long) current->mm->pgd :
386 (unsigned long) current->active_mm->pgd));
387 die_if_kernel("Oops", regs);
388 /* Not reached */
389 }
390 regs->tpc = fixup;
391 regs->tnpc = regs->tpc + 4;
392 regs->u_regs [UREG_G2] = g2;
393
394 regs->tstate &= ~TSTATE_ASI;
395 regs->tstate |= (ASI_AIUS << 24UL);
396}
397
398asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr)
399{
400 enum direction dir = decode_direction(insn);
401 int size = decode_access_size(insn);
402
403 if (!ok_for_kernel(insn) || dir == both) {
404 printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
405 regs->tpc);
406 unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
407
408 __asm__ __volatile__ ("\n"
409"kernel_unaligned_trap_fault:\n\t"
410 "mov %0, %%o0\n\t"
411 "call kernel_mna_trap_fault\n\t"
412 " mov %1, %%o1\n\t"
413 :
414 : "r" (regs), "r" (insn)
415 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
416 "g1", "g2", "g3", "g4", "g7", "cc");
417 } else {
418 unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
419
420#ifdef DEBUG_MNA
421 printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
422 regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
423#endif
424 switch (dir) {
425 case load:
426 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
427 size, (unsigned long *) addr,
428 decode_signedness(insn), decode_asi(insn, regs),
429 kernel_unaligned_trap_fault);
430 break;
431
432 case store:
433 do_integer_store(((insn>>25)&0x1f), size,
434 (unsigned long *) addr, regs,
435 decode_asi(insn, regs),
436 kernel_unaligned_trap_fault);
437 break;
438#if 0 /* unsupported */
439 case both:
440 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
441 (unsigned long *) addr,
442 kernel_unaligned_trap_fault);
443 break;
444#endif
445 default:
446 panic("Impossible kernel unaligned trap.");
447 /* Not reached... */
448 }
449 advance(regs);
450 }
451}
452
453static char popc_helper[] = {
4540, 1, 1, 2, 1, 2, 2, 3,
4551, 2, 2, 3, 2, 3, 3, 4,
456};
457
458int handle_popc(u32 insn, struct pt_regs *regs)
459{
460 u64 value;
461 int ret, i, rd = ((insn >> 25) & 0x1f);
462 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
463
464 if (insn & 0x2000) {
465 maybe_flush_windows(0, 0, rd, from_kernel);
466 value = sign_extend_imm13(insn);
467 } else {
468 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
469 value = fetch_reg(insn & 0x1f, regs);
470 }
471 for (ret = 0, i = 0; i < 16; i++) {
472 ret += popc_helper[value & 0xf];
473 value >>= 4;
474 }
475 if (rd < 16) {
476 if (rd)
477 regs->u_regs[rd] = ret;
478 } else {
479 if (test_thread_flag(TIF_32BIT)) {
480 struct reg_window32 __user *win32;
481 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
482 put_user(ret, &win32->locals[rd - 16]);
483 } else {
484 struct reg_window __user *win;
485 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
486 put_user(ret, &win->locals[rd - 16]);
487 }
488 }
489 advance(regs);
490 return 1;
491}
492
493extern void do_fpother(struct pt_regs *regs);
494extern void do_privact(struct pt_regs *regs);
495extern void data_access_exception(struct pt_regs *regs,
496 unsigned long sfsr,
497 unsigned long sfar);
498
499int handle_ldf_stq(u32 insn, struct pt_regs *regs)
500{
501 unsigned long addr = compute_effective_address(regs, insn, 0);
502 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
503 struct fpustate *f = FPUSTATE;
504 int asi = decode_asi(insn, regs);
505 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
506
507 save_and_clear_fpu();
508 current_thread_info()->xfsr[0] &= ~0x1c000;
509 if (freg & 3) {
510 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
511 do_fpother(regs);
512 return 0;
513 }
514 if (insn & 0x200000) {
515 /* STQ */
516 u64 first = 0, second = 0;
517
518 if (current_thread_info()->fpsaved[0] & flag) {
519 first = *(u64 *)&f->regs[freg];
520 second = *(u64 *)&f->regs[freg+2];
521 }
522 if (asi < 0x80) {
523 do_privact(regs);
524 return 1;
525 }
526 switch (asi) {
527 case ASI_P:
528 case ASI_S: break;
529 case ASI_PL:
530 case ASI_SL:
531 {
532 /* Need to convert endians */
533 u64 tmp = __swab64p(&first);
534
535 first = __swab64p(&second);
536 second = tmp;
537 break;
538 }
539 default:
540 data_access_exception(regs, 0, addr);
541 return 1;
542 }
543 if (put_user (first >> 32, (u32 __user *)addr) ||
544 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
545 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
546 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
547 data_access_exception(regs, 0, addr);
548 return 1;
549 }
550 } else {
551 /* LDF, LDDF, LDQF */
552 u32 data[4] __attribute__ ((aligned(8)));
553 int size, i;
554 int err;
555
556 if (asi < 0x80) {
557 do_privact(regs);
558 return 1;
559 } else if (asi > ASI_SNFL) {
560 data_access_exception(regs, 0, addr);
561 return 1;
562 }
563 switch (insn & 0x180000) {
564 case 0x000000: size = 1; break;
565 case 0x100000: size = 4; break;
566 default: size = 2; break;
567 }
568 for (i = 0; i < size; i++)
569 data[i] = 0;
570
571 err = get_user (data[0], (u32 __user *) addr);
572 if (!err) {
573 for (i = 1; i < size; i++)
574 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
575 }
576 if (err && !(asi & 0x2 /* NF */)) {
577 data_access_exception(regs, 0, addr);
578 return 1;
579 }
580 if (asi & 0x8) /* Little */ {
581 u64 tmp;
582
583 switch (size) {
584 case 1: data[0] = le32_to_cpup(data + 0); break;
585 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
586 break;
587 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
588 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
589 *(u64 *)(data + 2) = tmp;
590 break;
591 }
592 }
593 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
594 current_thread_info()->fpsaved[0] = FPRS_FEF;
595 current_thread_info()->gsr[0] = 0;
596 }
597 if (!(current_thread_info()->fpsaved[0] & flag)) {
598 if (freg < 32)
599 memset(f->regs, 0, 32*sizeof(u32));
600 else
601 memset(f->regs+32, 0, 32*sizeof(u32));
602 }
603 memcpy(f->regs + freg, data, size * 4);
604 current_thread_info()->fpsaved[0] |= flag;
605 }
606 advance(regs);
607 return 1;
608}
609
610void handle_ld_nf(u32 insn, struct pt_regs *regs)
611{
612 int rd = ((insn >> 25) & 0x1f);
613 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
614 unsigned long *reg;
615
616 maybe_flush_windows(0, 0, rd, from_kernel);
617 reg = fetch_reg_addr(rd, regs);
618 if (from_kernel || rd < 16) {
619 reg[0] = 0;
620 if ((insn & 0x780000) == 0x180000)
621 reg[1] = 0;
622 } else if (test_thread_flag(TIF_32BIT)) {
623 put_user(0, (int __user *) reg);
624 if ((insn & 0x780000) == 0x180000)
625 put_user(0, ((int __user *) reg) + 1);
626 } else {
627 put_user(0, (unsigned long __user *) reg);
628 if ((insn & 0x780000) == 0x180000)
629 put_user(0, (unsigned long __user *) reg + 1);
630 }
631 advance(regs);
632}
633
634void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
635{
636 unsigned long pc = regs->tpc;
637 unsigned long tstate = regs->tstate;
638 u32 insn;
639 u32 first, second;
640 u64 value;
641 u8 asi, freg;
642 int flag;
643 struct fpustate *f = FPUSTATE;
644
645 if (tstate & TSTATE_PRIV)
646 die_if_kernel("lddfmna from kernel", regs);
647 if (test_thread_flag(TIF_32BIT))
648 pc = (u32)pc;
649 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
650 asi = sfsr >> 16;
651 if ((asi > ASI_SNFL) ||
652 (asi < ASI_P))
653 goto daex;
654 if (get_user(first, (u32 __user *)sfar) ||
655 get_user(second, (u32 __user *)(sfar + 4))) {
656 if (asi & 0x2) /* NF */ {
657 first = 0; second = 0;
658 } else
659 goto daex;
660 }
661 save_and_clear_fpu();
662 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
663 value = (((u64)first) << 32) | second;
664 if (asi & 0x8) /* Little */
665 value = __swab64p(&value);
666 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
667 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
668 current_thread_info()->fpsaved[0] = FPRS_FEF;
669 current_thread_info()->gsr[0] = 0;
670 }
671 if (!(current_thread_info()->fpsaved[0] & flag)) {
672 if (freg < 32)
673 memset(f->regs, 0, 32*sizeof(u32));
674 else
675 memset(f->regs+32, 0, 32*sizeof(u32));
676 }
677 *(u64 *)(f->regs + freg) = value;
678 current_thread_info()->fpsaved[0] |= flag;
679 } else {
680daex: data_access_exception(regs, sfsr, sfar);
681 return;
682 }
683 advance(regs);
684 return;
685}
686
687void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
688{
689 unsigned long pc = regs->tpc;
690 unsigned long tstate = regs->tstate;
691 u32 insn;
692 u64 value;
693 u8 asi, freg;
694 int flag;
695 struct fpustate *f = FPUSTATE;
696
697 if (tstate & TSTATE_PRIV)
698 die_if_kernel("stdfmna from kernel", regs);
699 if (test_thread_flag(TIF_32BIT))
700 pc = (u32)pc;
701 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
702 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
703 asi = sfsr >> 16;
704 value = 0;
705 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
706 if ((asi > ASI_SNFL) ||
707 (asi < ASI_P))
708 goto daex;
709 save_and_clear_fpu();
710 if (current_thread_info()->fpsaved[0] & flag)
711 value = *(u64 *)&f->regs[freg];
712 switch (asi) {
713 case ASI_P:
714 case ASI_S: break;
715 case ASI_PL:
716 case ASI_SL:
717 value = __swab64p(&value); break;
718 default: goto daex;
719 }
720 if (put_user (value >> 32, (u32 __user *) sfar) ||
721 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
722 goto daex;
723 } else {
724daex: data_access_exception(regs, sfsr, sfar);
725 return;
726 }
727 advance(regs);
728 return;
729}
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c
new file mode 100644
index 000000000000..7aae0a18aabe
--- /dev/null
+++ b/arch/sparc64/kernel/us2e_cpufreq.c
@@ -0,0 +1,400 @@
1/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 *
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/cpufreq.h>
14#include <linux/threads.h>
15#include <linux/slab.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18
19#include <asm/asi.h>
20#include <asm/timer.h>
21
22static struct cpufreq_driver *cpufreq_us2e_driver;
23
24struct us2e_freq_percpu_info {
25 struct cpufreq_frequency_table table[6];
26};
27
28/* Indexed by cpu number. */
29static struct us2e_freq_percpu_info *us2e_freq_table;
30
31#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
32#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
33
34/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
35 * in the ESTAR mode control register.
36 */
37#define ESTAR_MODE_DIV_1 0x0000000000000000UL
38#define ESTAR_MODE_DIV_2 0x0000000000000001UL
39#define ESTAR_MODE_DIV_4 0x0000000000000003UL
40#define ESTAR_MODE_DIV_6 0x0000000000000002UL
41#define ESTAR_MODE_DIV_8 0x0000000000000004UL
42#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
43
44#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
45#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
46#define MCTRL0_REFR_COUNT_SHIFT 8
47#define MCTRL0_REFR_INTERVAL 7800
48#define MCTRL0_REFR_CLKS_P_CNT 64
49
50static unsigned long read_hbreg(unsigned long addr)
51{
52 unsigned long ret;
53
54 __asm__ __volatile__("ldxa [%1] %2, %0"
55 : "=&r" (ret)
56 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
57 return ret;
58}
59
60static void write_hbreg(unsigned long addr, unsigned long val)
61{
62 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
63 "membar #Sync"
64 : /* no outputs */
65 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
66 : "memory");
67 if (addr == HBIRD_ESTAR_MODE_ADDR) {
68 /* Need to wait 16 clock cycles for the PLL to lock. */
69 udelay(1);
70 }
71}
72
73static void self_refresh_ctl(int enable)
74{
75 unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
76
77 if (enable)
78 mctrl |= MCTRL0_SREFRESH_ENAB;
79 else
80 mctrl &= ~MCTRL0_SREFRESH_ENAB;
81 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
82 (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
83}
84
85static void frob_mem_refresh(int cpu_slowing_down,
86 unsigned long clock_tick,
87 unsigned long old_divisor, unsigned long divisor)
88{
89 unsigned long old_refr_count, refr_count, mctrl;
90
91
92 refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
93 refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
94
95 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
96 old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
97 >> MCTRL0_REFR_COUNT_SHIFT;
98
99 mctrl &= ~MCTRL0_REFR_COUNT_MASK;
100 mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
101 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
102 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
103
104 if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
105 unsigned long usecs;
106
107 /* We have to wait for both refresh counts (old
108 * and new) to go to zero.
109 */
110 usecs = (MCTRL0_REFR_CLKS_P_CNT *
111 (refr_count + old_refr_count) *
112 1000000UL *
113 old_divisor) / clock_tick;
114 udelay(usecs + 1UL);
115 }
116}
117
118static void us2e_transition(unsigned long estar, unsigned long new_bits,
119 unsigned long clock_tick,
120 unsigned long old_divisor, unsigned long divisor)
121{
122 unsigned long flags;
123
124 local_irq_save(flags);
125
126 estar &= ~ESTAR_MODE_DIV_MASK;
127
128 /* This is based upon the state transition diagram in the IIe manual. */
129 if (old_divisor == 2 && divisor == 1) {
130 self_refresh_ctl(0);
131 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
132 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
133 } else if (old_divisor == 1 && divisor == 2) {
134 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
135 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
136 self_refresh_ctl(1);
137 } else if (old_divisor == 1 && divisor > 2) {
138 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
139 1, 2);
140 us2e_transition(estar, new_bits, clock_tick,
141 2, divisor);
142 } else if (old_divisor > 2 && divisor == 1) {
143 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
144 old_divisor, 2);
145 us2e_transition(estar, new_bits, clock_tick,
146 2, divisor);
147 } else if (old_divisor < divisor) {
148 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
149 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
150 } else if (old_divisor > divisor) {
151 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
152 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
153 } else {
154 BUG();
155 }
156
157 local_irq_restore(flags);
158}
159
160static unsigned long index_to_estar_mode(unsigned int index)
161{
162 switch (index) {
163 case 0:
164 return ESTAR_MODE_DIV_1;
165
166 case 1:
167 return ESTAR_MODE_DIV_2;
168
169 case 2:
170 return ESTAR_MODE_DIV_4;
171
172 case 3:
173 return ESTAR_MODE_DIV_6;
174
175 case 4:
176 return ESTAR_MODE_DIV_8;
177
178 default:
179 BUG();
180 };
181}
182
183static unsigned long index_to_divisor(unsigned int index)
184{
185 switch (index) {
186 case 0:
187 return 1;
188
189 case 1:
190 return 2;
191
192 case 2:
193 return 4;
194
195 case 3:
196 return 6;
197
198 case 4:
199 return 8;
200
201 default:
202 BUG();
203 };
204}
205
206static unsigned long estar_to_divisor(unsigned long estar)
207{
208 unsigned long ret;
209
210 switch (estar & ESTAR_MODE_DIV_MASK) {
211 case ESTAR_MODE_DIV_1:
212 ret = 1;
213 break;
214 case ESTAR_MODE_DIV_2:
215 ret = 2;
216 break;
217 case ESTAR_MODE_DIV_4:
218 ret = 4;
219 break;
220 case ESTAR_MODE_DIV_6:
221 ret = 6;
222 break;
223 case ESTAR_MODE_DIV_8:
224 ret = 8;
225 break;
226 default:
227 BUG();
228 };
229
230 return ret;
231}
232
233static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
234{
235 unsigned long new_bits, new_freq;
236 unsigned long clock_tick, divisor, old_divisor, estar;
237 cpumask_t cpus_allowed;
238 struct cpufreq_freqs freqs;
239
240 if (!cpu_online(cpu))
241 return;
242
243 cpus_allowed = current->cpus_allowed;
244 set_cpus_allowed(current, cpumask_of_cpu(cpu));
245
246 new_freq = clock_tick = sparc64_get_clock_tick(cpu);
247 new_bits = index_to_estar_mode(index);
248 divisor = index_to_divisor(index);
249 new_freq /= divisor;
250
251 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
252
253 old_divisor = estar_to_divisor(estar);
254
255 freqs.old = clock_tick / old_divisor;
256 freqs.new = new_freq;
257 freqs.cpu = cpu;
258 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
259
260 if (old_divisor != divisor)
261 us2e_transition(estar, new_bits, clock_tick, old_divisor, divisor);
262
263 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
264
265 set_cpus_allowed(current, cpus_allowed);
266}
267
268static int us2e_freq_target(struct cpufreq_policy *policy,
269 unsigned int target_freq,
270 unsigned int relation)
271{
272 unsigned int new_index = 0;
273
274 if (cpufreq_frequency_table_target(policy,
275 &us2e_freq_table[policy->cpu].table[0],
276 target_freq,
277 relation,
278 &new_index))
279 return -EINVAL;
280
281 us2e_set_cpu_divider_index(policy->cpu, new_index);
282
283 return 0;
284}
285
286static int us2e_freq_verify(struct cpufreq_policy *policy)
287{
288 return cpufreq_frequency_table_verify(policy,
289 &us2e_freq_table[policy->cpu].table[0]);
290}
291
292static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
293{
294 unsigned int cpu = policy->cpu;
295 unsigned long clock_tick = sparc64_get_clock_tick(cpu);
296 struct cpufreq_frequency_table *table =
297 &us2e_freq_table[cpu].table[0];
298
299 table[0].index = 0;
300 table[0].frequency = clock_tick / 1;
301 table[1].index = 1;
302 table[1].frequency = clock_tick / 2;
303 table[2].index = 2;
304 table[2].frequency = clock_tick / 4;
305 table[2].index = 3;
306 table[2].frequency = clock_tick / 6;
307 table[2].index = 4;
308 table[2].frequency = clock_tick / 8;
309 table[2].index = 5;
310 table[3].frequency = CPUFREQ_TABLE_END;
311
312 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
313 policy->cpuinfo.transition_latency = 0;
314 policy->cur = clock_tick;
315
316 return cpufreq_frequency_table_cpuinfo(policy, table);
317}
318
319static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
320{
321 if (cpufreq_us2e_driver)
322 us2e_set_cpu_divider_index(policy->cpu, 0);
323
324 return 0;
325}
326
327static int __init us2e_freq_init(void)
328{
329 unsigned long manuf, impl, ver;
330 int ret;
331
332 __asm__("rdpr %%ver, %0" : "=r" (ver));
333 manuf = ((ver >> 48) & 0xffff);
334 impl = ((ver >> 32) & 0xffff);
335
336 if (manuf == 0x17 && impl == 0x13) {
337 struct cpufreq_driver *driver;
338
339 ret = -ENOMEM;
340 driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
341 if (!driver)
342 goto err_out;
343 memset(driver, 0, sizeof(*driver));
344
345 us2e_freq_table = kmalloc(
346 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
347 GFP_KERNEL);
348 if (!us2e_freq_table)
349 goto err_out;
350
351 memset(us2e_freq_table, 0,
352 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)));
353
354 driver->verify = us2e_freq_verify;
355 driver->target = us2e_freq_target;
356 driver->init = us2e_freq_cpu_init;
357 driver->exit = us2e_freq_cpu_exit;
358 driver->owner = THIS_MODULE,
359 strcpy(driver->name, "UltraSPARC-IIe");
360
361 cpufreq_us2e_driver = driver;
362 ret = cpufreq_register_driver(driver);
363 if (ret)
364 goto err_out;
365
366 return 0;
367
368err_out:
369 if (driver) {
370 kfree(driver);
371 cpufreq_us2e_driver = NULL;
372 }
373 if (us2e_freq_table) {
374 kfree(us2e_freq_table);
375 us2e_freq_table = NULL;
376 }
377 return ret;
378 }
379
380 return -ENODEV;
381}
382
383static void __exit us2e_freq_exit(void)
384{
385 if (cpufreq_us2e_driver) {
386 cpufreq_unregister_driver(cpufreq_us2e_driver);
387
388 kfree(cpufreq_us2e_driver);
389 cpufreq_us2e_driver = NULL;
390 kfree(us2e_freq_table);
391 us2e_freq_table = NULL;
392 }
393}
394
395MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
396MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
397MODULE_LICENSE("GPL");
398
399module_init(us2e_freq_init);
400module_exit(us2e_freq_exit);
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
new file mode 100644
index 000000000000..18fe54b8aa55
--- /dev/null
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -0,0 +1,255 @@
1/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 *
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/cpufreq.h>
14#include <linux/threads.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17
18#include <asm/head.h>
19#include <asm/timer.h>
20
21static struct cpufreq_driver *cpufreq_us3_driver;
22
23struct us3_freq_percpu_info {
24 struct cpufreq_frequency_table table[4];
25};
26
27/* Indexed by cpu number. */
28static struct us3_freq_percpu_info *us3_freq_table;
29
30/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
31 * in the Safari config register.
32 */
33#define SAFARI_CFG_DIV_1 0x0000000000000000UL
34#define SAFARI_CFG_DIV_2 0x0000000040000000UL
35#define SAFARI_CFG_DIV_32 0x0000000080000000UL
36#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
37
38static unsigned long read_safari_cfg(void)
39{
40 unsigned long ret;
41
42 __asm__ __volatile__("ldxa [%%g0] %1, %0"
43 : "=&r" (ret)
44 : "i" (ASI_SAFARI_CONFIG));
45 return ret;
46}
47
48static void write_safari_cfg(unsigned long val)
49{
50 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
51 "membar #Sync"
52 : /* no outputs */
53 : "r" (val), "i" (ASI_SAFARI_CONFIG)
54 : "memory");
55}
56
57static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
58{
59 unsigned long clock_tick = sparc64_get_clock_tick(cpu);
60 unsigned long ret;
61
62 switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
63 case SAFARI_CFG_DIV_1:
64 ret = clock_tick / 1;
65 break;
66 case SAFARI_CFG_DIV_2:
67 ret = clock_tick / 2;
68 break;
69 case SAFARI_CFG_DIV_32:
70 ret = clock_tick / 32;
71 break;
72 default:
73 BUG();
74 };
75
76 return ret;
77}
78
79static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
80{
81 unsigned long new_bits, new_freq, reg;
82 cpumask_t cpus_allowed;
83 struct cpufreq_freqs freqs;
84
85 if (!cpu_online(cpu))
86 return;
87
88 cpus_allowed = current->cpus_allowed;
89 set_cpus_allowed(current, cpumask_of_cpu(cpu));
90
91 new_freq = sparc64_get_clock_tick(cpu);
92 switch (index) {
93 case 0:
94 new_bits = SAFARI_CFG_DIV_1;
95 new_freq /= 1;
96 break;
97 case 1:
98 new_bits = SAFARI_CFG_DIV_2;
99 new_freq /= 2;
100 break;
101 case 2:
102 new_bits = SAFARI_CFG_DIV_32;
103 new_freq /= 32;
104 break;
105
106 default:
107 BUG();
108 };
109
110 reg = read_safari_cfg();
111
112 freqs.old = get_current_freq(cpu, reg);
113 freqs.new = new_freq;
114 freqs.cpu = cpu;
115 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
116
117 reg &= ~SAFARI_CFG_DIV_MASK;
118 reg |= new_bits;
119 write_safari_cfg(reg);
120
121 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
122
123 set_cpus_allowed(current, cpus_allowed);
124}
125
126static int us3_freq_target(struct cpufreq_policy *policy,
127 unsigned int target_freq,
128 unsigned int relation)
129{
130 unsigned int new_index = 0;
131
132 if (cpufreq_frequency_table_target(policy,
133 &us3_freq_table[policy->cpu].table[0],
134 target_freq,
135 relation,
136 &new_index))
137 return -EINVAL;
138
139 us3_set_cpu_divider_index(policy->cpu, new_index);
140
141 return 0;
142}
143
144static int us3_freq_verify(struct cpufreq_policy *policy)
145{
146 return cpufreq_frequency_table_verify(policy,
147 &us3_freq_table[policy->cpu].table[0]);
148}
149
150static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
151{
152 unsigned int cpu = policy->cpu;
153 unsigned long clock_tick = sparc64_get_clock_tick(cpu);
154 struct cpufreq_frequency_table *table =
155 &us3_freq_table[cpu].table[0];
156
157 table[0].index = 0;
158 table[0].frequency = clock_tick / 1;
159 table[1].index = 1;
160 table[1].frequency = clock_tick / 2;
161 table[2].index = 2;
162 table[2].frequency = clock_tick / 32;
163 table[3].index = 0;
164 table[3].frequency = CPUFREQ_TABLE_END;
165
166 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
167 policy->cpuinfo.transition_latency = 0;
168 policy->cur = clock_tick;
169
170 return cpufreq_frequency_table_cpuinfo(policy, table);
171}
172
173static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
174{
175 if (cpufreq_us3_driver)
176 us3_set_cpu_divider_index(policy->cpu, 0);
177
178 return 0;
179}
180
181static int __init us3_freq_init(void)
182{
183 unsigned long manuf, impl, ver;
184 int ret;
185
186 __asm__("rdpr %%ver, %0" : "=r" (ver));
187 manuf = ((ver >> 48) & 0xffff);
188 impl = ((ver >> 32) & 0xffff);
189
190 if (manuf == CHEETAH_MANUF &&
191 (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) {
192 struct cpufreq_driver *driver;
193
194 ret = -ENOMEM;
195 driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
196 if (!driver)
197 goto err_out;
198 memset(driver, 0, sizeof(*driver));
199
200 us3_freq_table = kmalloc(
201 (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
202 GFP_KERNEL);
203 if (!us3_freq_table)
204 goto err_out;
205
206 memset(us3_freq_table, 0,
207 (NR_CPUS * sizeof(struct us3_freq_percpu_info)));
208
209 driver->verify = us3_freq_verify;
210 driver->target = us3_freq_target;
211 driver->init = us3_freq_cpu_init;
212 driver->exit = us3_freq_cpu_exit;
213 driver->owner = THIS_MODULE,
214 strcpy(driver->name, "UltraSPARC-III");
215
216 cpufreq_us3_driver = driver;
217 ret = cpufreq_register_driver(driver);
218 if (ret)
219 goto err_out;
220
221 return 0;
222
223err_out:
224 if (driver) {
225 kfree(driver);
226 cpufreq_us3_driver = NULL;
227 }
228 if (us3_freq_table) {
229 kfree(us3_freq_table);
230 us3_freq_table = NULL;
231 }
232 return ret;
233 }
234
235 return -ENODEV;
236}
237
238static void __exit us3_freq_exit(void)
239{
240 if (cpufreq_us3_driver) {
241 cpufreq_unregister_driver(cpufreq_us3_driver);
242
243 kfree(cpufreq_us3_driver);
244 cpufreq_us3_driver = NULL;
245 kfree(us3_freq_table);
246 us3_freq_table = NULL;
247 }
248}
249
250MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
251MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
252MODULE_LICENSE("GPL");
253
254module_init(us3_freq_init);
255module_exit(us3_freq_exit);
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..382fd6798bb9
--- /dev/null
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -0,0 +1,106 @@
1/* ld script to make UltraLinux kernel */
2
3#include <asm-generic/vmlinux.lds.h>
4
5OUTPUT_FORMAT("elf64-sparc", "elf64-sparc", "elf64-sparc")
6OUTPUT_ARCH(sparc:v9a)
7ENTRY(_start)
8
9jiffies = jiffies_64;
10SECTIONS
11{
12 swapper_pmd_dir = 0x0000000000402000;
13 empty_pg_dir = 0x0000000000403000;
14 . = 0x4000;
15 .text 0x0000000000404000 :
16 {
17 *(.text)
18 SCHED_TEXT
19 LOCK_TEXT
20 *(.gnu.warning)
21 } =0
22 _etext = .;
23 PROVIDE (etext = .);
24
25 RODATA
26
27 .data :
28 {
29 *(.data)
30 CONSTRUCTORS
31 }
32 .data1 : { *(.data1) }
33 . = ALIGN(64);
34 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
35 _edata = .;
36 PROVIDE (edata = .);
37 .fixup : { *(.fixup) }
38
39 . = ALIGN(16);
40 __start___ex_table = .;
41 __ex_table : { *(__ex_table) }
42 __stop___ex_table = .;
43
44 . = ALIGN(8192);
45 __init_begin = .;
46 .init.text : {
47 _sinittext = .;
48 *(.init.text)
49 _einittext = .;
50 }
51 .init.data : { *(.init.data) }
52 . = ALIGN(16);
53 __setup_start = .;
54 .init.setup : { *(.init.setup) }
55 __setup_end = .;
56 __initcall_start = .;
57 .initcall.init : {
58 *(.initcall1.init)
59 *(.initcall2.init)
60 *(.initcall3.init)
61 *(.initcall4.init)
62 *(.initcall5.init)
63 *(.initcall6.init)
64 *(.initcall7.init)
65 }
66 __initcall_end = .;
67 __con_initcall_start = .;
68 .con_initcall.init : { *(.con_initcall.init) }
69 __con_initcall_end = .;
70 SECURITY_INIT
71 . = ALIGN(8192);
72 __initramfs_start = .;
73 .init.ramfs : { *(.init.ramfs) }
74 __initramfs_end = .;
75 . = ALIGN(8192);
76 __per_cpu_start = .;
77 .data.percpu : { *(.data.percpu) }
78 __per_cpu_end = .;
79 . = ALIGN(8192);
80 __init_end = .;
81 __bss_start = .;
82 .sbss : { *(.sbss) *(.scommon) }
83 .bss :
84 {
85 *(.dynbss)
86 *(.bss)
87 *(COMMON)
88 }
89 _end = . ;
90 PROVIDE (end = .);
91 /* Stabs debugging sections. */
92 .stab 0 : { *(.stab) }
93 .stabstr 0 : { *(.stabstr) }
94 .stab.excl 0 : { *(.stab.excl) }
95 .stab.exclstr 0 : { *(.stab.exclstr) }
96 .stab.index 0 : { *(.stab.index) }
97 .stab.indexstr 0 : { *(.stab.indexstr) }
98 .comment 0 : { *(.comment) }
99 .debug 0 : { *(.debug) }
100 .debug_srcinfo 0 : { *(.debug_srcinfo) }
101 .debug_aranges 0 : { *(.debug_aranges) }
102 .debug_pubnames 0 : { *(.debug_pubnames) }
103 .debug_sfnames 0 : { *(.debug_sfnames) }
104 .line 0 : { *(.line) }
105 /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) }
106}
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
new file mode 100644
index 000000000000..dfbc7e0dcf70
--- /dev/null
+++ b/arch/sparc64/kernel/winfixup.S
@@ -0,0 +1,417 @@
1/* $Id: winfixup.S,v 1.30 2002/02/09 19:49:30 davem Exp $
2 *
3 * winfixup.S: Handle cases where user stack pointer is found to be bogus.
4 *
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <asm/asi.h>
9#include <asm/head.h>
10#include <asm/page.h>
11#include <asm/ptrace.h>
12#include <asm/processor.h>
13#include <asm/spitfire.h>
14#include <asm/thread_info.h>
15
16 .text
17
18set_pcontext:
19cplus_winfixup_insn_1:
20 sethi %hi(0), %l1
21 mov PRIMARY_CONTEXT, %g1
22 sllx %l1, 32, %l1
23cplus_winfixup_insn_2:
24 sethi %hi(0), %g2
25 or %l1, %g2, %l1
26 stxa %l1, [%g1] ASI_DMMU
27 flush %g6
28 retl
29 nop
30
31cplus_wfinsn_1:
32 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
33cplus_wfinsn_2:
34 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
35
36 .align 32
37
38 /* Here are the rules, pay attention.
39 *
40 * The kernel is disallowed from touching user space while
41 * the trap level is greater than zero, except for from within
42 * the window spill/fill handlers. This must be followed
43 * so that we can easily detect the case where we tried to
44 * spill/fill with a bogus (or unmapped) user stack pointer.
45 *
46 * These are layed out in a special way for cache reasons,
47 * don't touch...
48 */
49 .globl fill_fixup, spill_fixup
50fill_fixup:
51 rdpr %tstate, %g1
52 andcc %g1, TSTATE_PRIV, %g0
53 or %g4, FAULT_CODE_WINFIXUP, %g4
54 be,pt %xcc, window_scheisse_from_user_common
55 and %g1, TSTATE_CWP, %g1
56
57 /* This is the extremely complex case, but it does happen from
58 * time to time if things are just right. Essentially the restore
59 * done in rtrap right before going back to user mode, with tl=1
60 * and that levels trap stack registers all setup, took a fill trap,
61 * the user stack was not mapped in the tlb, and tlb miss occurred,
62 * the pte found was not valid, and a simple ref bit watch update
63 * could not satisfy the miss, so we got here.
64 *
65 * We must carefully unwind the state so we get back to tl=0, preserve
66 * all the register values we were going to give to the user. Luckily
67 * most things are where they need to be, we also have the address
68 * which triggered the fault handy as well.
69 *
70 * Also note that we must preserve %l5 and %l6. If the user was
71 * returning from a system call, we must make it look this way
72 * after we process the fill fault on the users stack.
73 *
74 * First, get into the window where the original restore was executed.
75 */
76
77 rdpr %wstate, %g2 ! Grab user mode wstate.
78 wrpr %g1, %cwp ! Get into the right window.
79 sll %g2, 3, %g2 ! NORMAL-->OTHER
80
81 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
82 wrpr %g2, 0x0, %wstate ! This must be consistent.
83 wrpr %g0, 0x0, %otherwin ! We know this.
84 call set_pcontext ! Change contexts...
85 nop
86 rdpr %pstate, %l1 ! Prepare to change globals.
87 mov %g6, %o7 ! Get current.
88
89 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
90 stb %g4, [%g6 + TI_FAULT_CODE]
91 stx %g5, [%g6 + TI_FAULT_ADDR]
92 wrpr %g0, 0x0, %tl ! Out of trap levels.
93 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
94 mov %o7, %g6
95 ldx [%g6 + TI_TASK], %g4
96#ifdef CONFIG_SMP
97 mov TSB_REG, %g1
98 ldxa [%g1] ASI_IMMU, %g5
99#endif
100
101 /* This is the same as below, except we handle this a bit special
102 * since we must preserve %l5 and %l6, see comment above.
103 */
104 call do_sparc64_fault
105 add %sp, PTREGS_OFF, %o0
106 ba,pt %xcc, rtrap
107 nop ! yes, nop is correct
108
109 /* Be very careful about usage of the alternate globals here.
110 * You cannot touch %g4/%g5 as that has the fault information
111 * should this be from usermode. Also be careful for the case
112 * where we get here from the save instruction in etrap.S when
113 * coming from either user or kernel (does not matter which, it
114 * is the same problem in both cases). Essentially this means
115 * do not touch %g7 or %g2 so we handle the two cases fine.
116 */
117spill_fixup:
118 ldx [%g6 + TI_FLAGS], %g1
119 andcc %g1, _TIF_32BIT, %g0
120 ldub [%g6 + TI_WSAVED], %g1
121
122 sll %g1, 3, %g3
123 add %g6, %g3, %g3
124 stx %sp, [%g3 + TI_RWIN_SPTRS]
125 sll %g1, 7, %g3
126 bne,pt %xcc, 1f
127 add %g6, %g3, %g3
128 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
129 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
130
131 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
132 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
133 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
134 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
135 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
136 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
137 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
138 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
139
140 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
141 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
142 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
143 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
144 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
145 b,pt %xcc, 2f
146 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
1471: stw %l0, [%g3 + TI_REG_WINDOW + 0x00]
148
149 stw %l1, [%g3 + TI_REG_WINDOW + 0x04]
150 stw %l2, [%g3 + TI_REG_WINDOW + 0x08]
151 stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]
152 stw %l4, [%g3 + TI_REG_WINDOW + 0x10]
153 stw %l5, [%g3 + TI_REG_WINDOW + 0x14]
154 stw %l6, [%g3 + TI_REG_WINDOW + 0x18]
155 stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]
156 stw %i0, [%g3 + TI_REG_WINDOW + 0x20]
157
158 stw %i1, [%g3 + TI_REG_WINDOW + 0x24]
159 stw %i2, [%g3 + TI_REG_WINDOW + 0x28]
160 stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]
161 stw %i4, [%g3 + TI_REG_WINDOW + 0x30]
162 stw %i5, [%g3 + TI_REG_WINDOW + 0x34]
163 stw %i6, [%g3 + TI_REG_WINDOW + 0x38]
164 stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]
1652: add %g1, 1, %g1
166
167 stb %g1, [%g6 + TI_WSAVED]
168 rdpr %tstate, %g1
169 andcc %g1, TSTATE_PRIV, %g0
170 saved
171 and %g1, TSTATE_CWP, %g1
172 be,pn %xcc, window_scheisse_from_user_common
173 mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4
174 retry
175
176window_scheisse_from_user_common:
177 stb %g4, [%g6 + TI_FAULT_CODE]
178 stx %g5, [%g6 + TI_FAULT_ADDR]
179 wrpr %g1, %cwp
180 ba,pt %xcc, etrap
181 rd %pc, %g7
182 call do_sparc64_fault
183 add %sp, PTREGS_OFF, %o0
184 ba,a,pt %xcc, rtrap_clr_l6
185
186 .globl winfix_mna, fill_fixup_mna, spill_fixup_mna
187winfix_mna:
188 andn %g3, 0x7f, %g3
189 add %g3, 0x78, %g3
190 wrpr %g3, %tnpc
191 done
192fill_fixup_mna:
193 rdpr %tstate, %g1
194 andcc %g1, TSTATE_PRIV, %g0
195 be,pt %xcc, window_mna_from_user_common
196 and %g1, TSTATE_CWP, %g1
197
198 /* Please, see fill_fixup commentary about why we must preserve
199 * %l5 and %l6 to preserve absolute correct semantics.
200 */
201 rdpr %wstate, %g2 ! Grab user mode wstate.
202 wrpr %g1, %cwp ! Get into the right window.
203 sll %g2, 3, %g2 ! NORMAL-->OTHER
204 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
205
206 wrpr %g2, 0x0, %wstate ! This must be consistent.
207 wrpr %g0, 0x0, %otherwin ! We know this.
208 call set_pcontext ! Change contexts...
209 nop
210 rdpr %pstate, %l1 ! Prepare to change globals.
211 mov %g4, %o2 ! Setup args for
212 mov %g5, %o1 ! final call to mem_address_unaligned.
213 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
214
215 mov %g6, %o7 ! Stash away current.
216 wrpr %g0, 0x0, %tl ! Out of trap levels.
217 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
218 mov %o7, %g6 ! Get current back.
219 ldx [%g6 + TI_TASK], %g4 ! Finish it.
220#ifdef CONFIG_SMP
221 mov TSB_REG, %g1
222 ldxa [%g1] ASI_IMMU, %g5
223#endif
224 call mem_address_unaligned
225 add %sp, PTREGS_OFF, %o0
226
227 b,pt %xcc, rtrap
228 nop ! yes, the nop is correct
229spill_fixup_mna:
230 ldx [%g6 + TI_FLAGS], %g1
231 andcc %g1, _TIF_32BIT, %g0
232 ldub [%g6 + TI_WSAVED], %g1
233 sll %g1, 3, %g3
234 add %g6, %g3, %g3
235 stx %sp, [%g3 + TI_RWIN_SPTRS]
236
237 sll %g1, 7, %g3
238 bne,pt %xcc, 1f
239 add %g6, %g3, %g3
240 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
241 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
242 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
243 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
244 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
245
246 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
247 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
248 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
249 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
250 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
251 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
252 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
253 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
254
255 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
256 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
257 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
258 b,pt %xcc, 2f
259 add %g1, 1, %g1
2601: std %l0, [%g3 + TI_REG_WINDOW + 0x00]
261 std %l2, [%g3 + TI_REG_WINDOW + 0x08]
262 std %l4, [%g3 + TI_REG_WINDOW + 0x10]
263
264 std %l6, [%g3 + TI_REG_WINDOW + 0x18]
265 std %i0, [%g3 + TI_REG_WINDOW + 0x20]
266 std %i2, [%g3 + TI_REG_WINDOW + 0x28]
267 std %i4, [%g3 + TI_REG_WINDOW + 0x30]
268 std %i6, [%g3 + TI_REG_WINDOW + 0x38]
269 add %g1, 1, %g1
2702: stb %g1, [%g6 + TI_WSAVED]
271 rdpr %tstate, %g1
272
273 andcc %g1, TSTATE_PRIV, %g0
274 saved
275 be,pn %xcc, window_mna_from_user_common
276 and %g1, TSTATE_CWP, %g1
277 retry
278window_mna_from_user_common:
279 wrpr %g1, %cwp
280 sethi %hi(109f), %g7
281 ba,pt %xcc, etrap
282109: or %g7, %lo(109b), %g7
283 mov %l4, %o2
284 mov %l5, %o1
285 call mem_address_unaligned
286 add %sp, PTREGS_OFF, %o0
287 ba,pt %xcc, rtrap
288 clr %l6
289
290 /* These are only needed for 64-bit mode processes which
291 * put their stack pointer into the VPTE area and there
292 * happens to be a VPTE tlb entry mapped there during
293 * a spill/fill trap to that stack frame.
294 */
295 .globl winfix_dax, fill_fixup_dax, spill_fixup_dax
296winfix_dax:
297 andn %g3, 0x7f, %g3
298 add %g3, 0x74, %g3
299 wrpr %g3, %tnpc
300 done
301fill_fixup_dax:
302 rdpr %tstate, %g1
303 andcc %g1, TSTATE_PRIV, %g0
304 be,pt %xcc, window_dax_from_user_common
305 and %g1, TSTATE_CWP, %g1
306
307 /* Please, see fill_fixup commentary about why we must preserve
308 * %l5 and %l6 to preserve absolute correct semantics.
309 */
310 rdpr %wstate, %g2 ! Grab user mode wstate.
311 wrpr %g1, %cwp ! Get into the right window.
312 sll %g2, 3, %g2 ! NORMAL-->OTHER
313 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
314
315 wrpr %g2, 0x0, %wstate ! This must be consistent.
316 wrpr %g0, 0x0, %otherwin ! We know this.
317 call set_pcontext ! Change contexts...
318 nop
319 rdpr %pstate, %l1 ! Prepare to change globals.
320 mov %g4, %o1 ! Setup args for
321 mov %g5, %o2 ! final call to data_access_exception.
322 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
323
324 mov %g6, %o7 ! Stash away current.
325 wrpr %g0, 0x0, %tl ! Out of trap levels.
326 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
327 mov %o7, %g6 ! Get current back.
328 ldx [%g6 + TI_TASK], %g4 ! Finish it.
329#ifdef CONFIG_SMP
330 mov TSB_REG, %g1
331 ldxa [%g1] ASI_IMMU, %g5
332#endif
333 call data_access_exception
334 add %sp, PTREGS_OFF, %o0
335
336 b,pt %xcc, rtrap
337 nop ! yes, the nop is correct
338spill_fixup_dax:
339 ldx [%g6 + TI_FLAGS], %g1
340 andcc %g1, _TIF_32BIT, %g0
341 ldub [%g6 + TI_WSAVED], %g1
342 sll %g1, 3, %g3
343 add %g6, %g3, %g3
344 stx %sp, [%g3 + TI_RWIN_SPTRS]
345
346 sll %g1, 7, %g3
347 bne,pt %xcc, 1f
348 add %g6, %g3, %g3
349 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
350 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
351 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
352 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
353 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
354
355 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
356 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
357 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
358 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
359 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
360 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
361 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
362 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
363
364 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
365 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
366 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
367 b,pt %xcc, 2f
368 add %g1, 1, %g1
3691: std %l0, [%g3 + TI_REG_WINDOW + 0x00]
370 std %l2, [%g3 + TI_REG_WINDOW + 0x08]
371 std %l4, [%g3 + TI_REG_WINDOW + 0x10]
372
373 std %l6, [%g3 + TI_REG_WINDOW + 0x18]
374 std %i0, [%g3 + TI_REG_WINDOW + 0x20]
375 std %i2, [%g3 + TI_REG_WINDOW + 0x28]
376 std %i4, [%g3 + TI_REG_WINDOW + 0x30]
377 std %i6, [%g3 + TI_REG_WINDOW + 0x38]
378 add %g1, 1, %g1
3792: stb %g1, [%g6 + TI_WSAVED]
380 rdpr %tstate, %g1
381
382 andcc %g1, TSTATE_PRIV, %g0
383 saved
384 be,pn %xcc, window_dax_from_user_common
385 and %g1, TSTATE_CWP, %g1
386 retry
387window_dax_from_user_common:
388 wrpr %g1, %cwp
389 sethi %hi(109f), %g7
390 ba,pt %xcc, etrap
391109: or %g7, %lo(109b), %g7
392 mov %l4, %o1
393 mov %l5, %o2
394 call data_access_exception
395 add %sp, PTREGS_OFF, %o0
396 ba,pt %xcc, rtrap
397 clr %l6
398
399
400 .globl cheetah_plus_patch_winfixup
401cheetah_plus_patch_winfixup:
402 sethi %hi(cplus_wfinsn_1), %o0
403 sethi %hi(cplus_winfixup_insn_1), %o2
404 lduw [%o0 + %lo(cplus_wfinsn_1)], %o1
405 or %o2, %lo(cplus_winfixup_insn_1), %o2
406 stw %o1, [%o2]
407 flush %o2
408
409 sethi %hi(cplus_wfinsn_2), %o0
410 sethi %hi(cplus_winfixup_insn_2), %o2
411 lduw [%o0 + %lo(cplus_wfinsn_2)], %o1
412 or %o2, %lo(cplus_winfixup_insn_2), %o2
413 stw %o1, [%o2]
414 flush %o2
415
416 retl
417 nop