aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sparc/kernel
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/Makefile27
-rw-r--r--arch/sparc/kernel/apc.c186
-rw-r--r--arch/sparc/kernel/asm-offsets.c45
-rw-r--r--arch/sparc/kernel/auxio.c138
-rw-r--r--arch/sparc/kernel/cpu.c168
-rw-r--r--arch/sparc/kernel/devices.c160
-rw-r--r--arch/sparc/kernel/ebus.c361
-rw-r--r--arch/sparc/kernel/entry.S1956
-rw-r--r--arch/sparc/kernel/errtbls.c276
-rw-r--r--arch/sparc/kernel/etrap.S321
-rw-r--r--arch/sparc/kernel/head.S1326
-rw-r--r--arch/sparc/kernel/idprom.c108
-rw-r--r--arch/sparc/kernel/init_task.c28
-rw-r--r--arch/sparc/kernel/ioport.c731
-rw-r--r--arch/sparc/kernel/irq.c614
-rw-r--r--arch/sparc/kernel/module.c159
-rw-r--r--arch/sparc/kernel/muldiv.c240
-rw-r--r--arch/sparc/kernel/pcic.c1041
-rw-r--r--arch/sparc/kernel/pmc.c99
-rw-r--r--arch/sparc/kernel/process.c746
-rw-r--r--arch/sparc/kernel/ptrace.c632
-rw-r--r--arch/sparc/kernel/rtrap.S319
-rw-r--r--arch/sparc/kernel/sclow.S86
-rw-r--r--arch/sparc/kernel/semaphore.c155
-rw-r--r--arch/sparc/kernel/setup.c476
-rw-r--r--arch/sparc/kernel/signal.c1181
-rw-r--r--arch/sparc/kernel/smp.c295
-rw-r--r--arch/sparc/kernel/sparc-stub.c724
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c334
-rw-r--r--arch/sparc/kernel/sun4c_irq.c250
-rw-r--r--arch/sparc/kernel/sun4d_irq.c594
-rw-r--r--arch/sparc/kernel/sun4d_smp.c486
-rw-r--r--arch/sparc/kernel/sun4m_irq.c399
-rw-r--r--arch/sparc/kernel/sun4m_smp.c451
-rw-r--r--arch/sparc/kernel/sun4setup.c75
-rw-r--r--arch/sparc/kernel/sunos_asm.S67
-rw-r--r--arch/sparc/kernel/sunos_ioctl.c231
-rw-r--r--arch/sparc/kernel/sys_solaris.c37
-rw-r--r--arch/sparc/kernel/sys_sparc.c485
-rw-r--r--arch/sparc/kernel/sys_sunos.c1194
-rw-r--r--arch/sparc/kernel/systbls.S186
-rw-r--r--arch/sparc/kernel/tadpole.c126
-rw-r--r--arch/sparc/kernel/tick14.c85
-rw-r--r--arch/sparc/kernel/time.c641
-rw-r--r--arch/sparc/kernel/trampoline.S162
-rw-r--r--arch/sparc/kernel/traps.c515
-rw-r--r--arch/sparc/kernel/unaligned.c548
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S103
-rw-r--r--arch/sparc/kernel/windows.c127
-rw-r--r--arch/sparc/kernel/wof.S428
-rw-r--r--arch/sparc/kernel/wuf.S360
51 files changed, 20482 insertions, 0 deletions
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
new file mode 100644
index 000000000000..3d22ba2af01c
--- /dev/null
+++ b/arch/sparc/kernel/Makefile
@@ -0,0 +1,27 @@
1# $Id: Makefile,v 1.62 2000/12/15 00:41:17 davem Exp $
2# Makefile for the linux kernel.
3#
4
5extra-y := head.o init_task.o vmlinux.lds
6
7EXTRA_AFLAGS := -ansi
8
9IRQ_OBJS := irq.o sun4m_irq.o sun4c_irq.o sun4d_irq.o
10obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
11 process.o signal.o ioport.o setup.o idprom.o \
12 sys_sparc.o sunos_asm.o systbls.o \
13 time.o windows.o cpu.o devices.o sclow.o \
14 tadpole.o tick14.o ptrace.o sys_solaris.o \
15 unaligned.o muldiv.o semaphore.o
16
17obj-$(CONFIG_PCI) += pcic.o
18obj-$(CONFIG_SUN4) += sun4setup.o
19obj-$(CONFIG_SMP) += trampoline.o smp.o sun4m_smp.o sun4d_smp.o
20obj-$(CONFIG_SUN_AUXIO) += auxio.o
21obj-$(CONFIG_PCI) += ebus.o
22obj-$(CONFIG_SUN_PM) += apc.o pmc.o
23obj-$(CONFIG_MODULES) += module.o sparc_ksyms.o
24
25ifdef CONFIG_SUNOS_EMUL
26obj-y += sys_sunos.o sunos_ioctl.o
27endif
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
new file mode 100644
index 000000000000..406dd94afb45
--- /dev/null
+++ b/arch/sparc/kernel/apc.c
@@ -0,0 +1,186 @@
1/* apc - Driver implementation for power management functions
2 * of Aurora Personality Chip (APC) on SPARCstation-4/5 and
3 * derivatives.
4 *
5 * Copyright (c) 2002 Eric Brower (ebrower@usa.net)
6 */
7
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/miscdevice.h>
13#include <linux/pm.h>
14
15#include <asm/io.h>
16#include <asm/sbus.h>
17#include <asm/oplib.h>
18#include <asm/uaccess.h>
19#include <asm/auxio.h>
20#include <asm/apc.h>
21
22/* Debugging
23 *
24 * #define APC_DEBUG_LED
25 */
26
27#define APC_MINOR MISC_DYNAMIC_MINOR
28#define APC_OBPNAME "power-management"
29#define APC_DEVNAME "apc"
30
31volatile static u8 __iomem *regs;
32static int apc_regsize;
33static int apc_no_idle __initdata = 0;
34
35#define apc_readb(offs) (sbus_readb(regs+offs))
36#define apc_writeb(val, offs) (sbus_writeb(val, regs+offs))
37
38/* Specify "apc=noidle" on the kernel command line to
39 * disable APC CPU standby support. Certain prototype
40 * systems (SPARCstation-Fox) do not play well with APC
41 * CPU idle, so disable this if your system has APC and
42 * crashes randomly.
43 */
44static int __init apc_setup(char *str)
45{
46 if(!strncmp(str, "noidle", strlen("noidle"))) {
47 apc_no_idle = 1;
48 return 1;
49 }
50 return 0;
51}
52__setup("apc=", apc_setup);
53
54/*
55 * CPU idle callback function
56 * See .../arch/sparc/kernel/process.c
57 */
58void apc_swift_idle(void)
59{
60#ifdef APC_DEBUG_LED
61 set_auxio(0x00, AUXIO_LED);
62#endif
63
64 apc_writeb(apc_readb(APC_IDLE_REG) | APC_IDLE_ON, APC_IDLE_REG);
65
66#ifdef APC_DEBUG_LED
67 set_auxio(AUXIO_LED, 0x00);
68#endif
69}
70
71static inline void apc_free(void)
72{
73 sbus_iounmap(regs, apc_regsize);
74}
75
76static int apc_open(struct inode *inode, struct file *f)
77{
78 return 0;
79}
80
81static int apc_release(struct inode *inode, struct file *f)
82{
83 return 0;
84}
85
86static int apc_ioctl(struct inode *inode, struct file *f,
87 unsigned int cmd, unsigned long __arg)
88{
89 __u8 inarg, __user *arg;
90
91 arg = (__u8 __user *) __arg;
92 switch (cmd) {
93 case APCIOCGFANCTL:
94 if (put_user(apc_readb(APC_FANCTL_REG) & APC_REGMASK, arg))
95 return -EFAULT;
96 break;
97
98 case APCIOCGCPWR:
99 if (put_user(apc_readb(APC_CPOWER_REG) & APC_REGMASK, arg))
100 return -EFAULT;
101 break;
102
103 case APCIOCGBPORT:
104 if (put_user(apc_readb(APC_BPORT_REG) & APC_BPMASK, arg))
105 return -EFAULT;
106 break;
107
108 case APCIOCSFANCTL:
109 if (get_user(inarg, arg))
110 return -EFAULT;
111 apc_writeb(inarg & APC_REGMASK, APC_FANCTL_REG);
112 break;
113 case APCIOCSCPWR:
114 if (get_user(inarg, arg))
115 return -EFAULT;
116 apc_writeb(inarg & APC_REGMASK, APC_CPOWER_REG);
117 break;
118 case APCIOCSBPORT:
119 if (get_user(inarg, arg))
120 return -EFAULT;
121 apc_writeb(inarg & APC_BPMASK, APC_BPORT_REG);
122 break;
123 default:
124 return -EINVAL;
125 };
126
127 return 0;
128}
129
130static struct file_operations apc_fops = {
131 .ioctl = apc_ioctl,
132 .open = apc_open,
133 .release = apc_release,
134};
135
136static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops };
137
138static int __init apc_probe(void)
139{
140 struct sbus_bus *sbus = NULL;
141 struct sbus_dev *sdev = NULL;
142 int iTmp = 0;
143
144 for_each_sbus(sbus) {
145 for_each_sbusdev(sdev, sbus) {
146 if (!strcmp(sdev->prom_name, APC_OBPNAME)) {
147 goto sbus_done;
148 }
149 }
150 }
151
152sbus_done:
153 if (!sdev) {
154 return -ENODEV;
155 }
156
157 apc_regsize = sdev->reg_addrs[0].reg_size;
158 regs = sbus_ioremap(&sdev->resource[0], 0,
159 apc_regsize, APC_OBPNAME);
160 if(!regs) {
161 printk(KERN_ERR "%s: unable to map registers\n", APC_DEVNAME);
162 return -ENODEV;
163 }
164
165 iTmp = misc_register(&apc_miscdev);
166 if (iTmp != 0) {
167 printk(KERN_ERR "%s: unable to register device\n", APC_DEVNAME);
168 apc_free();
169 return -ENODEV;
170 }
171
172 /* Assign power management IDLE handler */
173 if(!apc_no_idle)
174 pm_idle = apc_swift_idle;
175
176 printk(KERN_INFO "%s: power management initialized%s\n",
177 APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : "");
178 return 0;
179}
180
181/* This driver is not critical to the boot process
182 * and is easiest to ioremap when SBus is already
183 * initialized, so we install ourselves thusly:
184 */
185__initcall(apc_probe);
186
diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..1f55231f07de
--- /dev/null
+++ b/arch/sparc/kernel/asm-offsets.c
@@ -0,0 +1,45 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 *
10 * On sparc, thread_info data is static and TI_XXX offsets are computed by hand.
11 */
12
13#include <linux/config.h>
14#include <linux/sched.h>
15// #include <linux/mm.h>
16
17#define DEFINE(sym, val) \
18 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
19
20#define BLANK() asm volatile("\n->" : : )
21
22int foo(void)
23{
24 DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
25 BLANK();
26 /* XXX This is the stuff for sclow.S, kill it. */
27 DEFINE(AOFF_task_pid, offsetof(struct task_struct, pid));
28 DEFINE(AOFF_task_uid, offsetof(struct task_struct, uid));
29 DEFINE(AOFF_task_gid, offsetof(struct task_struct, gid));
30 DEFINE(AOFF_task_euid, offsetof(struct task_struct, euid));
31 DEFINE(AOFF_task_egid, offsetof(struct task_struct, egid));
32 /* DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); */
33 DEFINE(ASIZ_task_uid, sizeof(current->uid));
34 DEFINE(ASIZ_task_gid, sizeof(current->gid));
35 DEFINE(ASIZ_task_euid, sizeof(current->euid));
36 DEFINE(ASIZ_task_egid, sizeof(current->egid));
37 BLANK();
38 DEFINE(AOFF_thread_fork_kpsr,
39 offsetof(struct thread_struct, fork_kpsr));
40 BLANK();
41 DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
42
43 /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
44 return 0;
45}
diff --git a/arch/sparc/kernel/auxio.c b/arch/sparc/kernel/auxio.c
new file mode 100644
index 000000000000..d3b3648362c0
--- /dev/null
+++ b/arch/sparc/kernel/auxio.c
@@ -0,0 +1,138 @@
1/* auxio.c: Probing for the Sparc AUXIO register at boot time.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/stddef.h>
7#include <linux/init.h>
8#include <linux/config.h>
9#include <linux/spinlock.h>
10#include <asm/oplib.h>
11#include <asm/io.h>
12#include <asm/auxio.h>
13#include <asm/string.h> /* memset(), Linux has no bzero() */
14
15/* Probe and map in the Auxiliary I/O register */
16
17/* auxio_register is not static because it is referenced
18 * in entry.S::floppy_tdone
19 */
20void __iomem *auxio_register = NULL;
21static DEFINE_SPINLOCK(auxio_lock);
22
23void __init auxio_probe(void)
24{
25 int node, auxio_nd;
26 struct linux_prom_registers auxregs[1];
27 struct resource r;
28
29 switch (sparc_cpu_model) {
30 case sun4d:
31 case sun4:
32 return;
33 default:
34 break;
35 }
36 node = prom_getchild(prom_root_node);
37 auxio_nd = prom_searchsiblings(node, "auxiliary-io");
38 if(!auxio_nd) {
39 node = prom_searchsiblings(node, "obio");
40 node = prom_getchild(node);
41 auxio_nd = prom_searchsiblings(node, "auxio");
42 if(!auxio_nd) {
43#ifdef CONFIG_PCI
44 /* There may be auxio on Ebus */
45 return;
46#else
47 if(prom_searchsiblings(node, "leds")) {
48 /* VME chassis sun4m machine, no auxio exists. */
49 return;
50 }
51 prom_printf("Cannot find auxio node, cannot continue...\n");
52 prom_halt();
53#endif
54 }
55 }
56 if(prom_getproperty(auxio_nd, "reg", (char *) auxregs, sizeof(auxregs)) <= 0)
57 return;
58 prom_apply_obio_ranges(auxregs, 0x1);
59 /* Map the register both read and write */
60 r.flags = auxregs[0].which_io & 0xF;
61 r.start = auxregs[0].phys_addr;
62 r.end = auxregs[0].phys_addr + auxregs[0].reg_size - 1;
63 auxio_register = sbus_ioremap(&r, 0, auxregs[0].reg_size, "auxio");
64 /* Fix the address on sun4m and sun4c. */
65 if((((unsigned long) auxregs[0].phys_addr) & 3) == 3 ||
66 sparc_cpu_model == sun4c)
67 auxio_register += (3 - ((unsigned long)auxio_register & 3));
68
69 set_auxio(AUXIO_LED, 0);
70}
71
72unsigned char get_auxio(void)
73{
74 if(auxio_register)
75 return sbus_readb(auxio_register);
76 return 0;
77}
78
79void set_auxio(unsigned char bits_on, unsigned char bits_off)
80{
81 unsigned char regval;
82 unsigned long flags;
83 spin_lock_irqsave(&auxio_lock, flags);
84 switch(sparc_cpu_model) {
85 case sun4c:
86 regval = sbus_readb(auxio_register);
87 sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN,
88 auxio_register);
89 break;
90 case sun4m:
91 if(!auxio_register)
92 break; /* VME chassic sun4m, no auxio. */
93 regval = sbus_readb(auxio_register);
94 sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN4M,
95 auxio_register);
96 break;
97 case sun4d:
98 break;
99 default:
100 panic("Can't set AUXIO register on this machine.");
101 };
102 spin_unlock_irqrestore(&auxio_lock, flags);
103}
104
105
106/* sun4m power control register (AUXIO2) */
107
108volatile unsigned char * auxio_power_register = NULL;
109
110void __init auxio_power_probe(void)
111{
112 struct linux_prom_registers regs;
113 int node;
114 struct resource r;
115
116 /* Attempt to find the sun4m power control node. */
117 node = prom_getchild(prom_root_node);
118 node = prom_searchsiblings(node, "obio");
119 node = prom_getchild(node);
120 node = prom_searchsiblings(node, "power");
121 if (node == 0 || node == -1)
122 return;
123
124 /* Map the power control register. */
125 if (prom_getproperty(node, "reg", (char *)&regs, sizeof(regs)) <= 0)
126 return;
127 prom_apply_obio_ranges(&regs, 1);
128 memset(&r, 0, sizeof(r));
129 r.flags = regs.which_io & 0xF;
130 r.start = regs.phys_addr;
131 r.end = regs.phys_addr + regs.reg_size - 1;
132 auxio_power_register = (unsigned char *) sbus_ioremap(&r, 0,
133 regs.reg_size, "auxpower");
134
135 /* Display a quick message on the console. */
136 if (auxio_power_register)
137 printk(KERN_INFO "Power off control detected.\n");
138}
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
new file mode 100644
index 000000000000..6a4ebc62193e
--- /dev/null
+++ b/arch/sparc/kernel/cpu.c
@@ -0,0 +1,168 @@
1/* cpu.c: Dinky routines to look for the kind of Sparc cpu
2 * we are on.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/smp.h>
11#include <linux/threads.h>
12#include <asm/oplib.h>
13#include <asm/page.h>
14#include <asm/head.h>
15#include <asm/psr.h>
16#include <asm/mbus.h>
17#include <asm/cpudata.h>
18
19DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
20
21struct cpu_iu_info {
22 int psr_impl;
23 int psr_vers;
24 char* cpu_name; /* should be enough I hope... */
25};
26
27struct cpu_fp_info {
28 int psr_impl;
29 int fp_vers;
30 char* fp_name;
31};
32
33/* In order to get the fpu type correct, you need to take the IDPROM's
34 * machine type value into consideration too. I will fix this.
35 */
36struct cpu_fp_info linux_sparc_fpu[] = {
37 { 0, 0, "Fujitsu MB86910 or Weitek WTL1164/5"},
38 { 0, 1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"},
39 { 0, 2, "LSI Logic L64802 or Texas Instruments ACT8847"},
40 /* SparcStation SLC, SparcStation1 */
41 { 0, 3, "Weitek WTL3170/2"},
42 /* SPARCstation-5 */
43 { 0, 4, "Lsi Logic/Meiko L64804 or compatible"},
44 { 0, 5, "reserved"},
45 { 0, 6, "reserved"},
46 { 0, 7, "No FPU"},
47 { 1, 0, "ROSS HyperSparc combined IU/FPU"},
48 { 1, 1, "Lsi Logic L64814"},
49 { 1, 2, "Texas Instruments TMS390-C602A"},
50 { 1, 3, "Cypress CY7C602 FPU"},
51 { 1, 4, "reserved"},
52 { 1, 5, "reserved"},
53 { 1, 6, "reserved"},
54 { 1, 7, "No FPU"},
55 { 2, 0, "BIT B5010 or B5110/20 or B5210"},
56 { 2, 1, "reserved"},
57 { 2, 2, "reserved"},
58 { 2, 3, "reserved"},
59 { 2, 4, "reserved"},
60 { 2, 5, "reserved"},
61 { 2, 6, "reserved"},
62 { 2, 7, "No FPU"},
63 /* SuperSparc 50 module */
64 { 4, 0, "SuperSparc on-chip FPU"},
65 /* SparcClassic */
66 { 4, 4, "TI MicroSparc on chip FPU"},
67 { 5, 0, "Matsushita MN10501"},
68 { 5, 1, "reserved"},
69 { 5, 2, "reserved"},
70 { 5, 3, "reserved"},
71 { 5, 4, "reserved"},
72 { 5, 5, "reserved"},
73 { 5, 6, "reserved"},
74 { 5, 7, "No FPU"},
75 { 9, 3, "Fujitsu or Weitek on-chip FPU"},
76};
77
78#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
79
80struct cpu_iu_info linux_sparc_chips[] = {
81 /* Sun4/100, 4/200, SLC */
82 { 0, 0, "Fujitsu MB86900/1A or LSI L64831 SparcKIT-40"},
83 /* borned STP1012PGA */
84 { 0, 4, "Fujitsu MB86904"},
85 { 0, 5, "Fujitsu TurboSparc MB86907"},
86 /* SparcStation2, SparcServer 490 & 690 */
87 { 1, 0, "LSI Logic Corporation - L64811"},
88 /* SparcStation2 */
89 { 1, 1, "Cypress/ROSS CY7C601"},
90 /* Embedded controller */
91 { 1, 3, "Cypress/ROSS CY7C611"},
92 /* Ross Technologies HyperSparc */
93 { 1, 0xf, "ROSS HyperSparc RT620"},
94 { 1, 0xe, "ROSS HyperSparc RT625 or RT626"},
95 /* ECL Implementation, CRAY S-MP Supercomputer... AIEEE! */
96 /* Someone please write the code to support this beast! ;) */
97 { 2, 0, "Bipolar Integrated Technology - B5010"},
98 { 3, 0, "LSI Logic Corporation - unknown-type"},
99 { 4, 0, "Texas Instruments, Inc. - SuperSparc-(II)"},
100 /* SparcClassic -- borned STP1010TAB-50*/
101 { 4, 1, "Texas Instruments, Inc. - MicroSparc"},
102 { 4, 2, "Texas Instruments, Inc. - MicroSparc II"},
103 { 4, 3, "Texas Instruments, Inc. - SuperSparc 51"},
104 { 4, 4, "Texas Instruments, Inc. - SuperSparc 61"},
105 { 4, 5, "Texas Instruments, Inc. - unknown"},
106 { 5, 0, "Matsushita - MN10501"},
107 { 6, 0, "Philips Corporation - unknown"},
108 { 7, 0, "Harvest VLSI Design Center, Inc. - unknown"},
109 /* Gallium arsenide 200MHz, BOOOOGOOOOMIPS!!! */
110 { 8, 0, "Systems and Processes Engineering Corporation (SPEC)"},
111 { 9, 0, "Fujitsu or Weitek Power-UP"},
112 { 9, 1, "Fujitsu or Weitek Power-UP"},
113 { 9, 2, "Fujitsu or Weitek Power-UP"},
114 { 9, 3, "Fujitsu or Weitek Power-UP"},
115 { 0xa, 0, "UNKNOWN CPU-VENDOR/TYPE"},
116 { 0xb, 0, "UNKNOWN CPU-VENDOR/TYPE"},
117 { 0xc, 0, "UNKNOWN CPU-VENDOR/TYPE"},
118 { 0xd, 0, "UNKNOWN CPU-VENDOR/TYPE"},
119 { 0xe, 0, "UNKNOWN CPU-VENDOR/TYPE"},
120 { 0xf, 0, "UNKNOWN CPU-VENDOR/TYPE"},
121};
122
123#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
124
125char *sparc_cpu_type;
126char *sparc_fpu_type;
127
128unsigned int fsr_storage;
129
130void __init cpu_probe(void)
131{
132 int psr_impl, psr_vers, fpu_vers;
133 int i, psr;
134
135 psr_impl = ((get_psr()>>28)&0xf);
136 psr_vers = ((get_psr()>>24)&0xf);
137
138 psr = get_psr();
139 put_psr(psr | PSR_EF);
140 fpu_vers = ((get_fsr()>>17)&0x7);
141 put_psr(psr);
142
143 for(i = 0; i<NSPARCCHIPS; i++) {
144 if(linux_sparc_chips[i].psr_impl == psr_impl)
145 if(linux_sparc_chips[i].psr_vers == psr_vers) {
146 sparc_cpu_type = linux_sparc_chips[i].cpu_name;
147 break;
148 }
149 }
150
151 if(i==NSPARCCHIPS)
152 printk("DEBUG: psr.impl = 0x%x psr.vers = 0x%x\n", psr_impl,
153 psr_vers);
154
155 for(i = 0; i<NSPARCFPU; i++) {
156 if(linux_sparc_fpu[i].psr_impl == psr_impl)
157 if(linux_sparc_fpu[i].fp_vers == fpu_vers) {
158 sparc_fpu_type = linux_sparc_fpu[i].fp_name;
159 break;
160 }
161 }
162
163 if(i == NSPARCFPU) {
164 printk("DEBUG: psr.impl = 0x%x fsr.vers = 0x%x\n", psr_impl,
165 fpu_vers);
166 sparc_fpu_type = linux_sparc_fpu[31].fp_name;
167 }
168}
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
new file mode 100644
index 000000000000..fcb0c049c3fe
--- /dev/null
+++ b/arch/sparc/kernel/devices.c
@@ -0,0 +1,160 @@
1/* devices.c: Initial scan of the prom device tree for important
2 * Sparc device nodes which we need to find.
3 *
4 * This is based on the sparc64 version, but sun4m doesn't always use
5 * the hardware MIDs, so be careful.
6 *
7 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 */
9
10#include <linux/config.h>
11#include <linux/kernel.h>
12#include <linux/threads.h>
13#include <linux/string.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16
17#include <asm/page.h>
18#include <asm/oplib.h>
19#include <asm/smp.h>
20#include <asm/system.h>
21#include <asm/cpudata.h>
22
23extern void cpu_probe(void);
24extern void clock_stop_probe(void); /* tadpole.c */
25extern void sun4c_probe_memerr_reg(void);
26
27static char *cpu_mid_prop(void)
28{
29 if (sparc_cpu_model == sun4d)
30 return "cpu-id";
31 return "mid";
32}
33
34static int check_cpu_node(int nd, int *cur_inst,
35 int (*compare)(int, int, void *), void *compare_arg,
36 int *prom_node, int *mid)
37{
38 char node_str[128];
39
40 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
41 if (strcmp(node_str, "cpu"))
42 return -ENODEV;
43
44 if (!compare(nd, *cur_inst, compare_arg)) {
45 if (prom_node)
46 *prom_node = nd;
47 if (mid) {
48 *mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
49 if (sparc_cpu_model == sun4m)
50 *mid &= 3;
51 }
52 return 0;
53 }
54
55 (*cur_inst)++;
56
57 return -ENODEV;
58}
59
60static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
61 int *prom_node, int *mid)
62{
63 int nd, cur_inst, err;
64
65 nd = prom_root_node;
66 cur_inst = 0;
67
68 err = check_cpu_node(nd, &cur_inst, compare, compare_arg,
69 prom_node, mid);
70 if (!err)
71 return 0;
72
73 nd = prom_getchild(nd);
74 while ((nd = prom_getsibling(nd)) != 0) {
75 err = check_cpu_node(nd, &cur_inst, compare, compare_arg,
76 prom_node, mid);
77 if (!err)
78 return 0;
79 }
80
81 return -ENODEV;
82}
83
84static int cpu_instance_compare(int nd, int instance, void *_arg)
85{
86 int desired_instance = (int) _arg;
87
88 if (instance == desired_instance)
89 return 0;
90 return -ENODEV;
91}
92
93int cpu_find_by_instance(int instance, int *prom_node, int *mid)
94{
95 return __cpu_find_by(cpu_instance_compare, (void *)instance,
96 prom_node, mid);
97}
98
99static int cpu_mid_compare(int nd, int instance, void *_arg)
100{
101 int desired_mid = (int) _arg;
102 int this_mid;
103
104 this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
105 if (this_mid == desired_mid
106 || (sparc_cpu_model == sun4m && (this_mid & 3) == desired_mid))
107 return 0;
108 return -ENODEV;
109}
110
111int cpu_find_by_mid(int mid, int *prom_node)
112{
113 return __cpu_find_by(cpu_mid_compare, (void *)mid,
114 prom_node, NULL);
115}
116
117/* sun4m uses truncated mids since we base the cpuid on the ttable/irqset
118 * address (0-3). This gives us the true hardware mid, which might have
119 * some other bits set. On 4d hardware and software mids are the same.
120 */
121int cpu_get_hwmid(int prom_node)
122{
123 return prom_getintdefault(prom_node, cpu_mid_prop(), -ENODEV);
124}
125
126void __init device_scan(void)
127{
128 prom_printf("Booting Linux...\n");
129
130#ifndef CONFIG_SMP
131 {
132 int err, cpu_node;
133 err = cpu_find_by_instance(0, &cpu_node, NULL);
134 if (err) {
135 /* Probably a sun4e, Sun is trying to trick us ;-) */
136 prom_printf("No cpu nodes, cannot continue\n");
137 prom_halt();
138 }
139 cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
140 "clock-frequency",
141 0);
142 }
143#endif /* !CONFIG_SMP */
144
145 cpu_probe();
146#ifdef CONFIG_SUN_AUXIO
147 {
148 extern void auxio_probe(void);
149 extern void auxio_power_probe(void);
150 auxio_probe();
151 auxio_power_probe();
152 }
153#endif
154 clock_stop_probe();
155
156 if (ARCH_SUN4C_SUN4)
157 sun4c_probe_memerr_reg();
158
159 return;
160}
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c
new file mode 100644
index 000000000000..1754192c69d0
--- /dev/null
+++ b/arch/sparc/kernel/ebus.c
@@ -0,0 +1,361 @@
1/* $Id: ebus.c,v 1.20 2002/01/05 01:13:43 davem Exp $
2 * ebus.c: PCI to EBus bridge device.
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 *
6 * Adopted for sparc by V. Roganov and G. Raiko.
7 * Fixes for different platforms by Pete Zaitcev.
8 */
9
10#include <linux/config.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16
17#include <asm/system.h>
18#include <asm/page.h>
19#include <asm/pbm.h>
20#include <asm/ebus.h>
21#include <asm/io.h>
22#include <asm/oplib.h>
23#include <asm/bpp.h>
24
25struct linux_ebus *ebus_chain = 0;
26
27/* We are together with pcic.c under CONFIG_PCI. */
28extern unsigned int pcic_pin_to_irq(unsigned int, char *name);
29
30/*
31 * IRQ Blacklist
32 * Here we list PROMs and systems that are known to supply crap as IRQ numbers.
33 */
34struct ebus_device_irq {
35 char *name;
36 unsigned int pin;
37};
38
39struct ebus_system_entry {
40 char *esname;
41 struct ebus_device_irq *ipt;
42};
43
44static struct ebus_device_irq je1_1[] = {
45 { "8042", 3 },
46 { "SUNW,CS4231", 0 },
47 { "parallel", 0 },
48 { "se", 2 },
49 { 0, 0 }
50};
51
52/*
53 * Gleb's JE1 supplied reasonable pin numbers, but mine did not (OBP 2.32).
54 * Blacklist the sucker... Note that Gleb's system will work.
55 */
56static struct ebus_system_entry ebus_blacklist[] = {
57 { "SUNW,JavaEngine1", je1_1 },
58 { 0, 0 }
59};
60
61static struct ebus_device_irq *ebus_blackp = NULL;
62
63/*
64 */
65static inline unsigned long ebus_alloc(size_t size)
66{
67 return (unsigned long)kmalloc(size, GFP_ATOMIC);
68}
69
70/*
71 */
72int __init ebus_blacklist_irq(char *name)
73{
74 struct ebus_device_irq *dp;
75
76 if ((dp = ebus_blackp) != NULL) {
77 for (; dp->name != NULL; dp++) {
78 if (strcmp(name, dp->name) == 0) {
79 return pcic_pin_to_irq(dp->pin, name);
80 }
81 }
82 }
83 return 0;
84}
85
86void __init fill_ebus_child(int node, struct linux_prom_registers *preg,
87 struct linux_ebus_child *dev)
88{
89 int regs[PROMREG_MAX];
90 int irqs[PROMREG_MAX];
91 char lbuf[128];
92 int i, len;
93
94 dev->prom_node = node;
95 prom_getstring(node, "name", lbuf, sizeof(lbuf));
96 strcpy(dev->prom_name, lbuf);
97
98 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
99 if (len == -1) len = 0;
100 dev->num_addrs = len / sizeof(regs[0]);
101
102 for (i = 0; i < dev->num_addrs; i++) {
103 if (regs[i] >= dev->parent->num_addrs) {
104 prom_printf("UGH: property for %s was %d, need < %d\n",
105 dev->prom_name, len, dev->parent->num_addrs);
106 panic(__FUNCTION__);
107 }
108 dev->resource[i].start = dev->parent->resource[regs[i]].start; /* XXX resource */
109 }
110
111 for (i = 0; i < PROMINTR_MAX; i++)
112 dev->irqs[i] = PCI_IRQ_NONE;
113
114 if ((dev->irqs[0] = ebus_blacklist_irq(dev->prom_name)) != 0) {
115 dev->num_irqs = 1;
116 } else if ((len = prom_getproperty(node, "interrupts",
117 (char *)&irqs, sizeof(irqs))) == -1 || len == 0) {
118 dev->num_irqs = 0;
119 dev->irqs[0] = 0;
120 if (dev->parent->num_irqs != 0) {
121 dev->num_irqs = 1;
122 dev->irqs[0] = dev->parent->irqs[0];
123/* P3 */ /* printk("EBUS: dev %s irq %d from parent\n", dev->prom_name, dev->irqs[0]); */
124 }
125 } else {
126 dev->num_irqs = len / sizeof(irqs[0]);
127 if (irqs[0] == 0 || irqs[0] >= 8) {
128 /*
129 * XXX Zero is a valid pin number...
130 * This works as long as Ebus is not wired to INTA#.
131 */
132 printk("EBUS: %s got bad irq %d from PROM\n",
133 dev->prom_name, irqs[0]);
134 dev->num_irqs = 0;
135 dev->irqs[0] = 0;
136 } else {
137 dev->irqs[0] = pcic_pin_to_irq(irqs[0], dev->prom_name);
138 }
139 }
140}
141
142void __init fill_ebus_device(int node, struct linux_ebus_device *dev)
143{
144 struct linux_prom_registers regs[PROMREG_MAX];
145 struct linux_ebus_child *child;
146 int irqs[PROMINTR_MAX];
147 char lbuf[128];
148 int i, n, len;
149 unsigned long baseaddr;
150
151 dev->prom_node = node;
152 prom_getstring(node, "name", lbuf, sizeof(lbuf));
153 strcpy(dev->prom_name, lbuf);
154
155 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
156 if (len % sizeof(struct linux_prom_registers)) {
157 prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
158 dev->prom_name, len,
159 (int)sizeof(struct linux_prom_registers));
160 panic(__FUNCTION__);
161 }
162 dev->num_addrs = len / sizeof(struct linux_prom_registers);
163
164 for (i = 0; i < dev->num_addrs; i++) {
165 /*
166 * XXX Collect JE-1 PROM
167 *
168 * Example - JS-E with 3.11:
169 * /ebus
170 * regs
171 * 0x00000000, 0x0, 0x00000000, 0x0, 0x00000000,
172 * 0x82000010, 0x0, 0xf0000000, 0x0, 0x01000000,
173 * 0x82000014, 0x0, 0x38800000, 0x0, 0x00800000,
174 * ranges
175 * 0x00, 0x00000000, 0x02000010, 0x0, 0x0, 0x01000000,
176 * 0x01, 0x01000000, 0x02000014, 0x0, 0x0, 0x00800000,
177 * /ebus/8042
178 * regs
179 * 0x00000001, 0x00300060, 0x00000008,
180 * 0x00000001, 0x00300060, 0x00000008,
181 */
182 n = regs[i].which_io;
183 if (n >= 4) {
184 /* XXX This is copied from old JE-1 by Gleb. */
185 n = (regs[i].which_io - 0x10) >> 2;
186 } else {
187 ;
188 }
189
190/*
191 * XXX Now as we have regions, why don't we make an on-demand allocation...
192 */
193 dev->resource[i].start = 0;
194 if ((baseaddr = dev->bus->self->resource[n].start +
195 regs[i].phys_addr) != 0) {
196 /* dev->resource[i].name = dev->prom_name; */
197 if ((baseaddr = (unsigned long) ioremap(baseaddr,
198 regs[i].reg_size)) == 0) {
199 panic("ebus: unable to remap dev %s",
200 dev->prom_name);
201 }
202 }
203 dev->resource[i].start = baseaddr; /* XXX Unaligned */
204 }
205
206 for (i = 0; i < PROMINTR_MAX; i++)
207 dev->irqs[i] = PCI_IRQ_NONE;
208
209 if ((dev->irqs[0] = ebus_blacklist_irq(dev->prom_name)) != 0) {
210 dev->num_irqs = 1;
211 } else if ((len = prom_getproperty(node, "interrupts",
212 (char *)&irqs, sizeof(irqs))) == -1 || len == 0) {
213 dev->num_irqs = 0;
214 if ((dev->irqs[0] = dev->bus->self->irq) != 0) {
215 dev->num_irqs = 1;
216/* P3 */ /* printk("EBUS: child %s irq %d from parent\n", dev->prom_name, dev->irqs[0]); */
217 }
218 } else {
219 dev->num_irqs = 1; /* dev->num_irqs = len / sizeof(irqs[0]); */
220 if (irqs[0] == 0 || irqs[0] >= 8) {
221 /* See above for the parent. XXX */
222 printk("EBUS: %s got bad irq %d from PROM\n",
223 dev->prom_name, irqs[0]);
224 dev->num_irqs = 0;
225 dev->irqs[0] = 0;
226 } else {
227 dev->irqs[0] = pcic_pin_to_irq(irqs[0], dev->prom_name);
228 }
229 }
230
231 if ((node = prom_getchild(node))) {
232 dev->children = (struct linux_ebus_child *)
233 ebus_alloc(sizeof(struct linux_ebus_child));
234
235 child = dev->children;
236 child->next = 0;
237 child->parent = dev;
238 child->bus = dev->bus;
239 fill_ebus_child(node, &regs[0], child);
240
241 while ((node = prom_getsibling(node)) != 0) {
242 child->next = (struct linux_ebus_child *)
243 ebus_alloc(sizeof(struct linux_ebus_child));
244
245 child = child->next;
246 child->next = 0;
247 child->parent = dev;
248 child->bus = dev->bus;
249 fill_ebus_child(node, &regs[0], child);
250 }
251 }
252}
253
254void __init ebus_init(void)
255{
256 struct linux_prom_pci_registers regs[PROMREG_MAX];
257 struct linux_pbm_info *pbm;
258 struct linux_ebus_device *dev;
259 struct linux_ebus *ebus;
260 struct ebus_system_entry *sp;
261 struct pci_dev *pdev;
262 struct pcidev_cookie *cookie;
263 char lbuf[128];
264 unsigned long addr, *base;
265 unsigned short pci_command;
266 int nd, len, ebusnd;
267 int reg, nreg;
268 int num_ebus = 0;
269
270 prom_getstring(prom_root_node, "name", lbuf, sizeof(lbuf));
271 for (sp = ebus_blacklist; sp->esname != NULL; sp++) {
272 if (strcmp(lbuf, sp->esname) == 0) {
273 ebus_blackp = sp->ipt;
274 break;
275 }
276 }
277
278 pdev = pci_get_device(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_EBUS, 0);
279 if (!pdev) {
280 return;
281 }
282 cookie = pdev->sysdata;
283 ebusnd = cookie->prom_node;
284
285 ebus_chain = ebus = (struct linux_ebus *)
286 ebus_alloc(sizeof(struct linux_ebus));
287 ebus->next = 0;
288
289 while (ebusnd) {
290
291 prom_getstring(ebusnd, "name", lbuf, sizeof(lbuf));
292 ebus->prom_node = ebusnd;
293 strcpy(ebus->prom_name, lbuf);
294 ebus->self = pdev;
295 ebus->parent = pbm = cookie->pbm;
296
297 /* Enable BUS Master. */
298 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
299 pci_command |= PCI_COMMAND_MASTER;
300 pci_write_config_word(pdev, PCI_COMMAND, pci_command);
301
302 len = prom_getproperty(ebusnd, "reg", (void *)regs,
303 sizeof(regs));
304 if (len == 0 || len == -1) {
305 prom_printf("%s: can't find reg property\n",
306 __FUNCTION__);
307 prom_halt();
308 }
309 nreg = len / sizeof(struct linux_prom_pci_registers);
310
311 base = &ebus->self->resource[0].start;
312 for (reg = 0; reg < nreg; reg++) {
313 if (!(regs[reg].which_io & 0x03000000))
314 continue;
315
316 addr = regs[reg].phys_lo;
317 *base++ = addr;
318 }
319
320 nd = prom_getchild(ebusnd);
321 if (!nd)
322 goto next_ebus;
323
324 ebus->devices = (struct linux_ebus_device *)
325 ebus_alloc(sizeof(struct linux_ebus_device));
326
327 dev = ebus->devices;
328 dev->next = 0;
329 dev->children = 0;
330 dev->bus = ebus;
331 fill_ebus_device(nd, dev);
332
333 while ((nd = prom_getsibling(nd)) != 0) {
334 dev->next = (struct linux_ebus_device *)
335 ebus_alloc(sizeof(struct linux_ebus_device));
336
337 dev = dev->next;
338 dev->next = 0;
339 dev->children = 0;
340 dev->bus = ebus;
341 fill_ebus_device(nd, dev);
342 }
343
344 next_ebus:
345 pdev = pci_get_device(PCI_VENDOR_ID_SUN,
346 PCI_DEVICE_ID_SUN_EBUS, pdev);
347 if (!pdev)
348 break;
349
350 cookie = pdev->sysdata;
351 ebusnd = cookie->prom_node;
352
353 ebus->next = (struct linux_ebus *)
354 ebus_alloc(sizeof(struct linux_ebus));
355 ebus = ebus->next;
356 ebus->next = 0;
357 ++num_ebus;
358 }
359 if (pdev)
360 pci_dev_put(pdev);
361}
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
new file mode 100644
index 000000000000..b448166f5da9
--- /dev/null
+++ b/arch/sparc/kernel/entry.S
@@ -0,0 +1,1956 @@
1/* $Id: entry.S,v 1.170 2001/11/13 00:57:05 davem Exp $
2 * arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
9 */
10
11#include <linux/config.h>
12#include <linux/errno.h>
13
14#include <asm/head.h>
15#include <asm/asi.h>
16#include <asm/smp.h>
17#include <asm/kgdb.h>
18#include <asm/contregs.h>
19#include <asm/ptrace.h>
20#include <asm/asm_offsets.h>
21#include <asm/psr.h>
22#include <asm/vaddrs.h>
23#include <asm/memreg.h>
24#include <asm/page.h>
25#ifdef CONFIG_SUN4
26#include <asm/pgtsun4.h>
27#else
28#include <asm/pgtsun4c.h>
29#endif
30#include <asm/winmacro.h>
31#include <asm/signal.h>
32#include <asm/obio.h>
33#include <asm/mxcc.h>
34#include <asm/thread_info.h>
35#include <asm/param.h>
36
37#include <asm/asmmacro.h>
38
39#define curptr g6
40
41#define NR_SYSCALLS 284 /* Each OS is different... */
42
43/* These are just handy. */
44#define _SV save %sp, -STACKFRAME_SZ, %sp
45#define _RS restore
46
47#define FLUSH_ALL_KERNEL_WINDOWS \
48 _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
49 _RS; _RS; _RS; _RS; _RS; _RS; _RS;
50
51/* First, KGDB low level things. This is a rewrite
52 * of the routines found in the sparc-stub.c asm() statement
53 * from the gdb distribution. This is also dual-purpose
54 * as a software trap for userlevel programs.
55 */
56 .data
57 .align 4
58
59in_trap_handler:
60 .word 0
61
62 .text
63 .align 4
64
65#if 0 /* kgdb is dropped from 2.5.33 */
66! This function is called when any SPARC trap (except window overflow or
67! underflow) occurs. It makes sure that the invalid register window is still
68! available before jumping into C code. It will also restore the world if you
69! return from handle_exception.
70
71 .globl trap_low
72trap_low:
73 rd %wim, %l3
74 SAVE_ALL
75
76 sethi %hi(in_trap_handler), %l4
77 ld [%lo(in_trap_handler) + %l4], %l5
78 inc %l5
79 st %l5, [%lo(in_trap_handler) + %l4]
80
81 /* Make sure kgdb sees the same state we just saved. */
82 LOAD_PT_GLOBALS(sp)
83 LOAD_PT_INS(sp)
84 ld [%sp + STACKFRAME_SZ + PT_Y], %l4
85 ld [%sp + STACKFRAME_SZ + PT_WIM], %l3
86 ld [%sp + STACKFRAME_SZ + PT_PSR], %l0
87 ld [%sp + STACKFRAME_SZ + PT_PC], %l1
88 ld [%sp + STACKFRAME_SZ + PT_NPC], %l2
89 rd %tbr, %l5 /* Never changes... */
90
91 /* Make kgdb exception frame. */
92 sub %sp,(16+1+6+1+72)*4,%sp ! Make room for input & locals
93 ! + hidden arg + arg spill
94 ! + doubleword alignment
95 ! + registers[72] local var
96 SAVE_KGDB_GLOBALS(sp)
97 SAVE_KGDB_INS(sp)
98 SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
99
100 /* We are increasing PIL, so two writes. */
101 or %l0, PSR_PIL, %l0
102 wr %l0, 0, %psr
103 WRITE_PAUSE
104 wr %l0, PSR_ET, %psr
105 WRITE_PAUSE
106
107 call handle_exception
108 add %sp, STACKFRAME_SZ, %o0 ! Pass address of registers
109
110 /* Load new kgdb register set. */
111 LOAD_KGDB_GLOBALS(sp)
112 LOAD_KGDB_INS(sp)
113 LOAD_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
114 wr %l4, 0x0, %y
115
116 sethi %hi(in_trap_handler), %l4
117 ld [%lo(in_trap_handler) + %l4], %l5
118 dec %l5
119 st %l5, [%lo(in_trap_handler) + %l4]
120
121 add %sp,(16+1+6+1+72)*4,%sp ! Undo the kgdb trap frame.
122
123 /* Now take what kgdb did and place it into the pt_regs
124 * frame which SparcLinux RESTORE_ALL understands.,
125 */
126 STORE_PT_INS(sp)
127 STORE_PT_GLOBALS(sp)
128 STORE_PT_YREG(sp, g2)
129 STORE_PT_PRIV(sp, l0, l1, l2)
130
131 RESTORE_ALL
132#endif
133
134#ifdef CONFIG_BLK_DEV_FD
135 .text
136 .align 4
137 .globl floppy_hardint
138floppy_hardint:
139 /*
140 * This code cannot touch registers %l0 %l1 and %l2
141 * because SAVE_ALL depends on their values. It depends
142 * on %l3 also, but we regenerate it before a call.
143 * Other registers are:
144 * %l3 -- base address of fdc registers
145 * %l4 -- pdma_vaddr
146 * %l5 -- scratch for ld/st address
147 * %l6 -- pdma_size
148 * %l7 -- scratch [floppy byte, ld/st address, aux. data]
149 */
150
151 /* Do we have work to do? */
152 sethi %hi(doing_pdma), %l7
153 ld [%l7 + %lo(doing_pdma)], %l7
154 cmp %l7, 0
155 be floppy_dosoftint
156 nop
157
158 /* Load fdc register base */
159 sethi %hi(fdc_status), %l3
160 ld [%l3 + %lo(fdc_status)], %l3
161
162 /* Setup register addresses */
163 sethi %hi(pdma_vaddr), %l5 ! transfer buffer
164 ld [%l5 + %lo(pdma_vaddr)], %l4
165 sethi %hi(pdma_size), %l5 ! bytes to go
166 ld [%l5 + %lo(pdma_size)], %l6
167next_byte:
168 ldub [%l3], %l7
169
170 andcc %l7, 0x80, %g0 ! Does fifo still have data
171 bz floppy_fifo_emptied ! fifo has been emptied...
172 andcc %l7, 0x20, %g0 ! in non-dma mode still?
173 bz floppy_overrun ! nope, overrun
174 andcc %l7, 0x40, %g0 ! 0=write 1=read
175 bz floppy_write
176 sub %l6, 0x1, %l6
177
178 /* Ok, actually read this byte */
179 ldub [%l3 + 1], %l7
180 orcc %g0, %l6, %g0
181 stb %l7, [%l4]
182 bne next_byte
183 add %l4, 0x1, %l4
184
185 b floppy_tdone
186 nop
187
188floppy_write:
189 /* Ok, actually write this byte */
190 ldub [%l4], %l7
191 orcc %g0, %l6, %g0
192 stb %l7, [%l3 + 1]
193 bne next_byte
194 add %l4, 0x1, %l4
195
196 /* fall through... */
197floppy_tdone:
198 sethi %hi(pdma_vaddr), %l5
199 st %l4, [%l5 + %lo(pdma_vaddr)]
200 sethi %hi(pdma_size), %l5
201 st %l6, [%l5 + %lo(pdma_size)]
202 /* Flip terminal count pin */
203 set auxio_register, %l7
204 ld [%l7], %l7
205
206 set sparc_cpu_model, %l5
207 ld [%l5], %l5
208 subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */
209 be 1f
210 ldub [%l7], %l5
211
212 or %l5, 0xc2, %l5
213 stb %l5, [%l7]
214 andn %l5, 0x02, %l5
215 b 2f
216 nop
217
2181:
219 or %l5, 0xf4, %l5
220 stb %l5, [%l7]
221 andn %l5, 0x04, %l5
222
2232:
224 /* Kill some time so the bits set */
225 WRITE_PAUSE
226 WRITE_PAUSE
227
228 stb %l5, [%l7]
229
230 /* Prevent recursion */
231 sethi %hi(doing_pdma), %l7
232 b floppy_dosoftint
233 st %g0, [%l7 + %lo(doing_pdma)]
234
235 /* We emptied the FIFO, but we haven't read everything
236 * as of yet. Store the current transfer address and
237 * bytes left to read so we can continue when the next
238 * fast IRQ comes in.
239 */
240floppy_fifo_emptied:
241 sethi %hi(pdma_vaddr), %l5
242 st %l4, [%l5 + %lo(pdma_vaddr)]
243 sethi %hi(pdma_size), %l7
244 st %l6, [%l7 + %lo(pdma_size)]
245
246 /* Restore condition codes */
247 wr %l0, 0x0, %psr
248 WRITE_PAUSE
249
250 jmp %l1
251 rett %l2
252
253floppy_overrun:
254 sethi %hi(pdma_vaddr), %l5
255 st %l4, [%l5 + %lo(pdma_vaddr)]
256 sethi %hi(pdma_size), %l5
257 st %l6, [%l5 + %lo(pdma_size)]
258 /* Prevent recursion */
259 sethi %hi(doing_pdma), %l7
260 st %g0, [%l7 + %lo(doing_pdma)]
261
262 /* fall through... */
263floppy_dosoftint:
264 rd %wim, %l3
265 SAVE_ALL
266
267 /* Set all IRQs off. */
268 or %l0, PSR_PIL, %l4
269 wr %l4, 0x0, %psr
270 WRITE_PAUSE
271 wr %l4, PSR_ET, %psr
272 WRITE_PAUSE
273
274 mov 11, %o0 ! floppy irq level (unused anyway)
275 mov %g0, %o1 ! devid is not used in fast interrupts
276 call sparc_floppy_irq
277 add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
278
279 RESTORE_ALL
280
281#endif /* (CONFIG_BLK_DEV_FD) */
282
283 /* Bad trap handler */
284 .globl bad_trap_handler
285bad_trap_handler:
286 SAVE_ALL
287
288 wr %l0, PSR_ET, %psr
289 WRITE_PAUSE
290
291 add %sp, STACKFRAME_SZ, %o0 ! pt_regs
292 call do_hw_interrupt
293 mov %l7, %o1 ! trap number
294
295 RESTORE_ALL
296
297/* For now all IRQ's not registered get sent here. handler_irq() will
298 * see if a routine is registered to handle this interrupt and if not
299 * it will say so on the console.
300 */
301
302 .align 4
303 .globl real_irq_entry, patch_handler_irq
304real_irq_entry:
305 SAVE_ALL
306
307#ifdef CONFIG_SMP
308 .globl patchme_maybe_smp_msg
309
310 cmp %l7, 12
311patchme_maybe_smp_msg:
312 bgu maybe_smp4m_msg
313 nop
314#endif
315
316real_irq_continue:
317 or %l0, PSR_PIL, %g2
318 wr %g2, 0x0, %psr
319 WRITE_PAUSE
320 wr %g2, PSR_ET, %psr
321 WRITE_PAUSE
322 mov %l7, %o0 ! irq level
323patch_handler_irq:
324 call handler_irq
325 add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
326 or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq
327 wr %g2, PSR_ET, %psr ! keep ET up
328 WRITE_PAUSE
329
330 RESTORE_ALL
331
332#ifdef CONFIG_SMP
333 /* SMP per-cpu ticker interrupts are handled specially. */
334smp4m_ticker:
335 bne real_irq_continue+4
336 or %l0, PSR_PIL, %g2
337 wr %g2, 0x0, %psr
338 WRITE_PAUSE
339 wr %g2, PSR_ET, %psr
340 WRITE_PAUSE
341 call smp4m_percpu_timer_interrupt
342 add %sp, STACKFRAME_SZ, %o0
343 wr %l0, PSR_ET, %psr
344 WRITE_PAUSE
345 RESTORE_ALL
346
347 /* Here is where we check for possible SMP IPI passed to us
348 * on some level other than 15 which is the NMI and only used
349 * for cross calls. That has a separate entry point below.
350 */
351maybe_smp4m_msg:
352 GET_PROCESSOR4M_ID(o3)
353 set sun4m_interrupts, %l5
354 ld [%l5], %o5
355 sethi %hi(0x40000000), %o2
356 sll %o3, 12, %o3
357 ld [%o5 + %o3], %o1
358 andcc %o1, %o2, %g0
359 be,a smp4m_ticker
360 cmp %l7, 14
361 st %o2, [%o5 + 0x4]
362 WRITE_PAUSE
363 ld [%o5], %g0
364 WRITE_PAUSE
365 or %l0, PSR_PIL, %l4
366 wr %l4, 0x0, %psr
367 WRITE_PAUSE
368 wr %l4, PSR_ET, %psr
369 WRITE_PAUSE
370 call smp_reschedule_irq
371 nop
372
373 RESTORE_ALL
374
375 .align 4
376 .globl linux_trap_ipi15_sun4m
377linux_trap_ipi15_sun4m:
378 SAVE_ALL
379 sethi %hi(0x80000000), %o2
380 GET_PROCESSOR4M_ID(o0)
381 set sun4m_interrupts, %l5
382 ld [%l5], %o5
383 sll %o0, 12, %o0
384 add %o5, %o0, %o5
385 ld [%o5], %o3
386 andcc %o3, %o2, %g0
387 be 1f ! Must be an NMI async memory error
388 st %o2, [%o5 + 4]
389 WRITE_PAUSE
390 ld [%o5], %g0
391 WRITE_PAUSE
392 or %l0, PSR_PIL, %l4
393 wr %l4, 0x0, %psr
394 WRITE_PAUSE
395 wr %l4, PSR_ET, %psr
396 WRITE_PAUSE
397 call smp4m_cross_call_irq
398 nop
399 b ret_trap_lockless_ipi
400 clr %l6
4011:
402 /* NMI async memory error handling. */
403 sethi %hi(0x80000000), %l4
404 sethi %hi(0x4000), %o3
405 sub %o5, %o0, %o5
406 add %o5, %o3, %l5
407 st %l4, [%l5 + 0xc]
408 WRITE_PAUSE
409 ld [%l5], %g0
410 WRITE_PAUSE
411 or %l0, PSR_PIL, %l4
412 wr %l4, 0x0, %psr
413 WRITE_PAUSE
414 wr %l4, PSR_ET, %psr
415 WRITE_PAUSE
416 call sun4m_nmi
417 nop
418 st %l4, [%l5 + 0x8]
419 WRITE_PAUSE
420 ld [%l5], %g0
421 WRITE_PAUSE
422 RESTORE_ALL
423
424 .globl smp4d_ticker
425 /* SMP per-cpu ticker interrupts are handled specially. */
426smp4d_ticker:
427 SAVE_ALL
428 or %l0, PSR_PIL, %g2
429 sethi %hi(CC_ICLR), %o0
430 sethi %hi(1 << 14), %o1
431 or %o0, %lo(CC_ICLR), %o0
432 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */
433 wr %g2, 0x0, %psr
434 WRITE_PAUSE
435 wr %g2, PSR_ET, %psr
436 WRITE_PAUSE
437 call smp4d_percpu_timer_interrupt
438 add %sp, STACKFRAME_SZ, %o0
439 wr %l0, PSR_ET, %psr
440 WRITE_PAUSE
441 RESTORE_ALL
442
443 .align 4
444 .globl linux_trap_ipi15_sun4d
445linux_trap_ipi15_sun4d:
446 SAVE_ALL
447 sethi %hi(CC_BASE), %o4
448 sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
449 or %o4, (CC_EREG - CC_BASE), %o0
450 ldda [%o0] ASI_M_MXCC, %o0
451 andcc %o0, %o2, %g0
452 bne 1f
453 sethi %hi(BB_STAT2), %o2
454 lduba [%o2] ASI_M_CTL, %o2
455 andcc %o2, BB_STAT2_MASK, %g0
456 bne 2f
457 or %o4, (CC_ICLR - CC_BASE), %o0
458 sethi %hi(1 << 15), %o1
459 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */
460 or %l0, PSR_PIL, %l4
461 wr %l4, 0x0, %psr
462 WRITE_PAUSE
463 wr %l4, PSR_ET, %psr
464 WRITE_PAUSE
465 call smp4d_cross_call_irq
466 nop
467 b ret_trap_lockless_ipi
468 clr %l6
469
4701: /* MXCC error */
4712: /* BB error */
472 /* Disable PIL 15 */
473 set CC_IMSK, %l4
474 lduha [%l4] ASI_M_MXCC, %l5
475 sethi %hi(1 << 15), %l7
476 or %l5, %l7, %l5
477 stha %l5, [%l4] ASI_M_MXCC
478 /* FIXME */
4791: b,a 1b
480
481#endif /* CONFIG_SMP */
482
483 /* This routine handles illegal instructions and privileged
484 * instruction attempts from user code.
485 */
486 .align 4
487 .globl bad_instruction
488bad_instruction:
489 sethi %hi(0xc1f80000), %l4
490 ld [%l1], %l5
491 sethi %hi(0x81d80000), %l7
492 and %l5, %l4, %l5
493 cmp %l5, %l7
494 be 1f
495 SAVE_ALL
496
497 wr %l0, PSR_ET, %psr ! re-enable traps
498 WRITE_PAUSE
499
500 add %sp, STACKFRAME_SZ, %o0
501 mov %l1, %o1
502 mov %l2, %o2
503 call do_illegal_instruction
504 mov %l0, %o3
505
506 RESTORE_ALL
507
5081: /* unimplemented flush - just skip */
509 jmpl %l2, %g0
510 rett %l2 + 4
511
512 .align 4
513 .globl priv_instruction
514priv_instruction:
515 SAVE_ALL
516
517 wr %l0, PSR_ET, %psr
518 WRITE_PAUSE
519
520 add %sp, STACKFRAME_SZ, %o0
521 mov %l1, %o1
522 mov %l2, %o2
523 call do_priv_instruction
524 mov %l0, %o3
525
526 RESTORE_ALL
527
528 /* This routine handles unaligned data accesses. */
529 .align 4
530 .globl mna_handler
531mna_handler:
532 andcc %l0, PSR_PS, %g0
533 be mna_fromuser
534 nop
535
536 SAVE_ALL
537
538 wr %l0, PSR_ET, %psr
539 WRITE_PAUSE
540
541 ld [%l1], %o1
542 call kernel_unaligned_trap
543 add %sp, STACKFRAME_SZ, %o0
544
545 RESTORE_ALL
546
547mna_fromuser:
548 SAVE_ALL
549
550 wr %l0, PSR_ET, %psr ! re-enable traps
551 WRITE_PAUSE
552
553 ld [%l1], %o1
554 call user_unaligned_trap
555 add %sp, STACKFRAME_SZ, %o0
556
557 RESTORE_ALL
558
559 /* This routine handles floating point disabled traps. */
560 .align 4
561 .globl fpd_trap_handler
562fpd_trap_handler:
563 SAVE_ALL
564
565 wr %l0, PSR_ET, %psr ! re-enable traps
566 WRITE_PAUSE
567
568 add %sp, STACKFRAME_SZ, %o0
569 mov %l1, %o1
570 mov %l2, %o2
571 call do_fpd_trap
572 mov %l0, %o3
573
574 RESTORE_ALL
575
576 /* This routine handles Floating Point Exceptions. */
577 .align 4
578 .globl fpe_trap_handler
579fpe_trap_handler:
580 set fpsave_magic, %l5
581 cmp %l1, %l5
582 be 1f
583 sethi %hi(fpsave), %l5
584 or %l5, %lo(fpsave), %l5
585 cmp %l1, %l5
586 bne 2f
587 sethi %hi(fpsave_catch2), %l5
588 or %l5, %lo(fpsave_catch2), %l5
589 wr %l0, 0x0, %psr
590 WRITE_PAUSE
591 jmp %l5
592 rett %l5 + 4
5931:
594 sethi %hi(fpsave_catch), %l5
595 or %l5, %lo(fpsave_catch), %l5
596 wr %l0, 0x0, %psr
597 WRITE_PAUSE
598 jmp %l5
599 rett %l5 + 4
600
6012:
602 SAVE_ALL
603
604 wr %l0, PSR_ET, %psr ! re-enable traps
605 WRITE_PAUSE
606
607 add %sp, STACKFRAME_SZ, %o0
608 mov %l1, %o1
609 mov %l2, %o2
610 call do_fpe_trap
611 mov %l0, %o3
612
613 RESTORE_ALL
614
615 /* This routine handles Tag Overflow Exceptions. */
616 .align 4
617 .globl do_tag_overflow
618do_tag_overflow:
619 SAVE_ALL
620
621 wr %l0, PSR_ET, %psr ! re-enable traps
622 WRITE_PAUSE
623
624 add %sp, STACKFRAME_SZ, %o0
625 mov %l1, %o1
626 mov %l2, %o2
627 call handle_tag_overflow
628 mov %l0, %o3
629
630 RESTORE_ALL
631
632 /* This routine handles Watchpoint Exceptions. */
633 .align 4
634 .globl do_watchpoint
635do_watchpoint:
636 SAVE_ALL
637
638 wr %l0, PSR_ET, %psr ! re-enable traps
639 WRITE_PAUSE
640
641 add %sp, STACKFRAME_SZ, %o0
642 mov %l1, %o1
643 mov %l2, %o2
644 call handle_watchpoint
645 mov %l0, %o3
646
647 RESTORE_ALL
648
649 /* This routine handles Register Access Exceptions. */
650 .align 4
651 .globl do_reg_access
652do_reg_access:
653 SAVE_ALL
654
655 wr %l0, PSR_ET, %psr ! re-enable traps
656 WRITE_PAUSE
657
658 add %sp, STACKFRAME_SZ, %o0
659 mov %l1, %o1
660 mov %l2, %o2
661 call handle_reg_access
662 mov %l0, %o3
663
664 RESTORE_ALL
665
666 /* This routine handles Co-Processor Disabled Exceptions. */
667 .align 4
668 .globl do_cp_disabled
669do_cp_disabled:
670 SAVE_ALL
671
672 wr %l0, PSR_ET, %psr ! re-enable traps
673 WRITE_PAUSE
674
675 add %sp, STACKFRAME_SZ, %o0
676 mov %l1, %o1
677 mov %l2, %o2
678 call handle_cp_disabled
679 mov %l0, %o3
680
681 RESTORE_ALL
682
683 /* This routine handles Co-Processor Exceptions. */
684 .align 4
685 .globl do_cp_exception
686do_cp_exception:
687 SAVE_ALL
688
689 wr %l0, PSR_ET, %psr ! re-enable traps
690 WRITE_PAUSE
691
692 add %sp, STACKFRAME_SZ, %o0
693 mov %l1, %o1
694 mov %l2, %o2
695 call handle_cp_exception
696 mov %l0, %o3
697
698 RESTORE_ALL
699
700 /* This routine handles Hardware Divide By Zero Exceptions. */
701 .align 4
702 .globl do_hw_divzero
703do_hw_divzero:
704 SAVE_ALL
705
706 wr %l0, PSR_ET, %psr ! re-enable traps
707 WRITE_PAUSE
708
709 add %sp, STACKFRAME_SZ, %o0
710 mov %l1, %o1
711 mov %l2, %o2
712 call handle_hw_divzero
713 mov %l0, %o3
714
715 RESTORE_ALL
716
717 .align 4
718 .globl do_flush_windows
719do_flush_windows:
720 SAVE_ALL
721
722 wr %l0, PSR_ET, %psr
723 WRITE_PAUSE
724
725 andcc %l0, PSR_PS, %g0
726 bne dfw_kernel
727 nop
728
729 call flush_user_windows
730 nop
731
732 /* Advance over the trap instruction. */
733 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1
734 add %l1, 0x4, %l2
735 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
736 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
737
738 RESTORE_ALL
739
740 .globl flush_patch_one
741
742 /* We get these for debugging routines using __builtin_return_address() */
743dfw_kernel:
744flush_patch_one:
745 FLUSH_ALL_KERNEL_WINDOWS
746
747 /* Advance over the trap instruction. */
748 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1
749 add %l1, 0x4, %l2
750 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
751 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
752
753 RESTORE_ALL
754
755 /* The getcc software trap. The user wants the condition codes from
756 * the %psr in register %g1.
757 */
758
759 .align 4
760 .globl getcc_trap_handler
761getcc_trap_handler:
762 srl %l0, 20, %g1 ! give user
763 and %g1, 0xf, %g1 ! only ICC bits in %psr
764 jmp %l2 ! advance over trap instruction
765 rett %l2 + 0x4 ! like this...
766
767 /* The setcc software trap. The user has condition codes in %g1
768 * that it would like placed in the %psr. Be careful not to flip
769 * any unintentional bits!
770 */
771
772 .align 4
773 .globl setcc_trap_handler
774setcc_trap_handler:
775 sll %g1, 0x14, %l4
776 set PSR_ICC, %l5
777 andn %l0, %l5, %l0 ! clear ICC bits in %psr
778 and %l4, %l5, %l4 ! clear non-ICC bits in user value
779 or %l4, %l0, %l4 ! or them in... mix mix mix
780
781 wr %l4, 0x0, %psr ! set new %psr
782 WRITE_PAUSE ! TI scumbags...
783
784 jmp %l2 ! advance over trap instruction
785 rett %l2 + 0x4 ! like this...
786
787 .align 4
788 .globl linux_trap_nmi_sun4c
789linux_trap_nmi_sun4c:
790 SAVE_ALL
791
792 /* Ugh, we need to clear the IRQ line. This is now
793 * a very sun4c specific trap handler...
794 */
795 sethi %hi(interrupt_enable), %l5
796 ld [%l5 + %lo(interrupt_enable)], %l5
797 ldub [%l5], %l6
798 andn %l6, INTS_ENAB, %l6
799 stb %l6, [%l5]
800
801 /* Now it is safe to re-enable traps without recursion. */
802 or %l0, PSR_PIL, %l0
803 wr %l0, PSR_ET, %psr
804 WRITE_PAUSE
805
806 /* Now call the c-code with the pt_regs frame ptr and the
807 * memory error registers as arguments. The ordering chosen
808 * here is due to unlatching semantics.
809 */
810 sethi %hi(AC_SYNC_ERR), %o0
811 add %o0, 0x4, %o0
812 lda [%o0] ASI_CONTROL, %o2 ! sync vaddr
813 sub %o0, 0x4, %o0
814 lda [%o0] ASI_CONTROL, %o1 ! sync error
815 add %o0, 0xc, %o0
816 lda [%o0] ASI_CONTROL, %o4 ! async vaddr
817 sub %o0, 0x4, %o0
818 lda [%o0] ASI_CONTROL, %o3 ! async error
819 call sparc_lvl15_nmi
820 add %sp, STACKFRAME_SZ, %o0
821
822 RESTORE_ALL
823
824 .align 4
825 .globl invalid_segment_patch1_ff
826 .globl invalid_segment_patch2_ff
827invalid_segment_patch1_ff: cmp %l4, 0xff
828invalid_segment_patch2_ff: mov 0xff, %l3
829
830 .align 4
831 .globl invalid_segment_patch1_1ff
832 .globl invalid_segment_patch2_1ff
833invalid_segment_patch1_1ff: cmp %l4, 0x1ff
834invalid_segment_patch2_1ff: mov 0x1ff, %l3
835
836 .align 4
837 .globl num_context_patch1_16, num_context_patch2_16
838num_context_patch1_16: mov 0x10, %l7
839num_context_patch2_16: mov 0x10, %l7
840
841 .align 4
842 .globl vac_linesize_patch_32
843vac_linesize_patch_32: subcc %l7, 32, %l7
844
845 .align 4
846 .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on
847
848/*
849 * Ugly, but we cant use hardware flushing on the sun4 and we'd require
850 * two instructions (Anton)
851 */
852#ifdef CONFIG_SUN4
853vac_hwflush_patch1_on: nop
854#else
855vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7
856#endif
857
858vac_hwflush_patch2_on: sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG
859
860 .globl invalid_segment_patch1, invalid_segment_patch2
861 .globl num_context_patch1
862 .globl vac_linesize_patch, vac_hwflush_patch1
863 .globl vac_hwflush_patch2
864
865 .align 4
866 .globl sun4c_fault
867
868! %l0 = %psr
869! %l1 = %pc
870! %l2 = %npc
871! %l3 = %wim
872! %l7 = 1 for textfault
873! We want error in %l5, vaddr in %l6
874sun4c_fault:
875#ifdef CONFIG_SUN4
876 sethi %hi(sun4c_memerr_reg), %l4
877 ld [%l4+%lo(sun4c_memerr_reg)], %l4 ! memerr ctrl reg addr
878 ld [%l4], %l6 ! memerr ctrl reg
879 ld [%l4 + 4], %l5 ! memerr vaddr reg
880 andcc %l6, 0x80, %g0 ! check for error type
881 st %g0, [%l4 + 4] ! clear the error
882 be 0f ! normal error
883 sethi %hi(AC_BUS_ERROR), %l4 ! bus err reg addr
884
885 call prom_halt ! something weird happened
886 ! what exactly did happen?
887 ! what should we do here?
888
8890: or %l4, %lo(AC_BUS_ERROR), %l4 ! bus err reg addr
890 lduba [%l4] ASI_CONTROL, %l6 ! bus err reg
891
892 cmp %l7, 1 ! text fault?
893 be 1f ! yes
894 nop
895
896 ld [%l1], %l4 ! load instruction that caused fault
897 srl %l4, 21, %l4
898 andcc %l4, 1, %g0 ! store instruction?
899
900 be 1f ! no
901 sethi %hi(SUN4C_SYNC_BADWRITE), %l4 ! yep
902 ! %lo(SUN4C_SYNC_BADWRITE) = 0
903 or %l4, %l6, %l6 ! set write bit to emulate sun4c
9041:
905#else
906 sethi %hi(AC_SYNC_ERR), %l4
907 add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6
908 lda [%l6] ASI_CONTROL, %l5 ! Address
909 lda [%l4] ASI_CONTROL, %l6 ! Error, retained for a bit
910#endif
911
912 andn %l5, 0xfff, %l5 ! Encode all info into l7
913 srl %l6, 14, %l4
914
915 and %l4, 2, %l4
916 or %l5, %l4, %l4
917
918 or %l4, %l7, %l7 ! l7 = [addr,write,txtfault]
919
920 andcc %l0, PSR_PS, %g0
921 be sun4c_fault_fromuser
922 andcc %l7, 1, %g0 ! Text fault?
923
924 be 1f
925 sethi %hi(KERNBASE), %l4
926
927 mov %l1, %l5 ! PC
928
9291:
930 cmp %l5, %l4
931 blu sun4c_fault_fromuser
932 sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
933
934 /* If the kernel references a bum kernel pointer, or a pte which
935 * points to a non existant page in ram, we will run this code
936 * _forever_ and lock up the machine!!!!! So we must check for
937 * this condition, the AC_SYNC_ERR bits are what we must examine.
938 * Also a parity error would make this happen as well. So we just
939 * check that we are in fact servicing a tlb miss and not some
940 * other type of fault for the kernel.
941 */
942 andcc %l6, 0x80, %g0
943 be sun4c_fault_fromuser
944 and %l5, %l4, %l5
945
946 /* Test for NULL pte_t * in vmalloc area. */
947 sethi %hi(VMALLOC_START), %l4
948 cmp %l5, %l4
949 blu,a invalid_segment_patch1
950 lduXa [%l5] ASI_SEGMAP, %l4
951
952 sethi %hi(swapper_pg_dir), %l4
953 srl %l5, SUN4C_PGDIR_SHIFT, %l6
954 or %l4, %lo(swapper_pg_dir), %l4
955 sll %l6, 2, %l6
956 ld [%l4 + %l6], %l4
957#ifdef CONFIG_SUN4
958 sethi %hi(PAGE_MASK), %l6
959 andcc %l4, %l6, %g0
960#else
961 andcc %l4, PAGE_MASK, %g0
962#endif
963 be sun4c_fault_fromuser
964 lduXa [%l5] ASI_SEGMAP, %l4
965
966invalid_segment_patch1:
967 cmp %l4, 0x7f
968 bne 1f
969 sethi %hi(sun4c_kfree_ring), %l4
970 or %l4, %lo(sun4c_kfree_ring), %l4
971 ld [%l4 + 0x18], %l3
972 deccc %l3 ! do we have a free entry?
973 bcs,a 2f ! no, unmap one.
974 sethi %hi(sun4c_kernel_ring), %l4
975
976 st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries--
977
978 ld [%l4 + 0x00], %l6 ! entry = sun4c_kfree_ring.ringhd.next
979 st %l5, [%l6 + 0x08] ! entry->vaddr = address
980
981 ld [%l6 + 0x00], %l3 ! next = entry->next
982 ld [%l6 + 0x04], %l7 ! entry->prev
983
984 st %l7, [%l3 + 0x04] ! next->prev = entry->prev
985 st %l3, [%l7 + 0x00] ! entry->prev->next = next
986
987 sethi %hi(sun4c_kernel_ring), %l4
988 or %l4, %lo(sun4c_kernel_ring), %l4
989 ! head = &sun4c_kernel_ring.ringhd
990
991 ld [%l4 + 0x00], %l7 ! head->next
992
993 st %l4, [%l6 + 0x04] ! entry->prev = head
994 st %l7, [%l6 + 0x00] ! entry->next = head->next
995 st %l6, [%l7 + 0x04] ! head->next->prev = entry
996
997 st %l6, [%l4 + 0x00] ! head->next = entry
998
999 ld [%l4 + 0x18], %l3
1000 inc %l3 ! sun4c_kernel_ring.num_entries++
1001 st %l3, [%l4 + 0x18]
1002 b 4f
1003 ld [%l6 + 0x08], %l5
1004
10052:
1006 or %l4, %lo(sun4c_kernel_ring), %l4
1007 ! head = &sun4c_kernel_ring.ringhd
1008
1009 ld [%l4 + 0x04], %l6 ! entry = head->prev
1010
1011 ld [%l6 + 0x08], %l3 ! tmp = entry->vaddr
1012
1013 ! Flush segment from the cache.
1014#ifdef CONFIG_SUN4
1015 sethi %hi((128 * 1024)), %l7
1016#else
1017 sethi %hi((64 * 1024)), %l7
1018#endif
10199:
1020vac_hwflush_patch1:
1021vac_linesize_patch:
1022 subcc %l7, 16, %l7
1023 bne 9b
1024vac_hwflush_patch2:
1025 sta %g0, [%l3 + %l7] ASI_FLUSHSEG
1026
1027 st %l5, [%l6 + 0x08] ! entry->vaddr = address
1028
1029 ld [%l6 + 0x00], %l5 ! next = entry->next
1030 ld [%l6 + 0x04], %l7 ! entry->prev
1031
1032 st %l7, [%l5 + 0x04] ! next->prev = entry->prev
1033 st %l5, [%l7 + 0x00] ! entry->prev->next = next
1034 st %l4, [%l6 + 0x04] ! entry->prev = head
1035
1036 ld [%l4 + 0x00], %l7 ! head->next
1037
1038 st %l7, [%l6 + 0x00] ! entry->next = head->next
1039 st %l6, [%l7 + 0x04] ! head->next->prev = entry
1040 st %l6, [%l4 + 0x00] ! head->next = entry
1041
1042 mov %l3, %l5 ! address = tmp
1043
10444:
1045num_context_patch1:
1046 mov 0x08, %l7
1047
1048 ld [%l6 + 0x08], %l4
1049 ldub [%l6 + 0x0c], %l3
1050 or %l4, %l3, %l4 ! encode new vaddr/pseg into l4
1051
1052 sethi %hi(AC_CONTEXT), %l3
1053 lduba [%l3] ASI_CONTROL, %l6
1054
1055 /* Invalidate old mapping, instantiate new mapping,
1056 * for each context. Registers l6/l7 are live across
1057 * this loop.
1058 */
10593: deccc %l7
1060 sethi %hi(AC_CONTEXT), %l3
1061 stba %l7, [%l3] ASI_CONTROL
1062invalid_segment_patch2:
1063 mov 0x7f, %l3
1064 stXa %l3, [%l5] ASI_SEGMAP
1065 andn %l4, 0x1ff, %l3
1066 bne 3b
1067 stXa %l4, [%l3] ASI_SEGMAP
1068
1069 sethi %hi(AC_CONTEXT), %l3
1070 stba %l6, [%l3] ASI_CONTROL
1071
1072 andn %l4, 0x1ff, %l5
1073
10741:
1075 sethi %hi(VMALLOC_START), %l4
1076 cmp %l5, %l4
1077
1078 bgeu 1f
1079 mov 1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7
1080
1081 sethi %hi(KERNBASE), %l6
1082
1083 sub %l5, %l6, %l4
1084 srl %l4, PAGE_SHIFT, %l4
1085 sethi %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3
1086 or %l3, %l4, %l3
1087
1088 sethi %hi(PAGE_SIZE), %l4
1089
10902:
1091 sta %l3, [%l5] ASI_PTE
1092 deccc %l7
1093 inc %l3
1094 bne 2b
1095 add %l5, %l4, %l5
1096
1097 b 7f
1098 sethi %hi(sun4c_kernel_faults), %l4
1099
11001:
1101 srl %l5, SUN4C_PGDIR_SHIFT, %l3
1102 sethi %hi(swapper_pg_dir), %l4
1103 or %l4, %lo(swapper_pg_dir), %l4
1104 sll %l3, 2, %l3
1105 ld [%l4 + %l3], %l4
1106#ifndef CONFIG_SUN4
1107 and %l4, PAGE_MASK, %l4
1108#else
1109 sethi %hi(PAGE_MASK), %l6
1110 and %l4, %l6, %l4
1111#endif
1112
1113 srl %l5, (PAGE_SHIFT - 2), %l6
1114 and %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
1115 add %l6, %l4, %l6
1116
1117 sethi %hi(PAGE_SIZE), %l4
1118
11192:
1120 ld [%l6], %l3
1121 deccc %l7
1122 sta %l3, [%l5] ASI_PTE
1123 add %l6, 0x4, %l6
1124 bne 2b
1125 add %l5, %l4, %l5
1126
1127 sethi %hi(sun4c_kernel_faults), %l4
11287:
1129 ld [%l4 + %lo(sun4c_kernel_faults)], %l3
1130 inc %l3
1131 st %l3, [%l4 + %lo(sun4c_kernel_faults)]
1132
1133 /* Restore condition codes */
1134 wr %l0, 0x0, %psr
1135 WRITE_PAUSE
1136 jmp %l1
1137 rett %l2
1138
1139sun4c_fault_fromuser:
1140 SAVE_ALL
1141 nop
1142
1143 mov %l7, %o1 ! Decode the info from %l7
1144 mov %l7, %o2
1145 and %o1, 1, %o1 ! arg2 = text_faultp
1146 mov %l7, %o3
1147 and %o2, 2, %o2 ! arg3 = writep
1148 andn %o3, 0xfff, %o3 ! arg4 = faulting address
1149
1150 wr %l0, PSR_ET, %psr
1151 WRITE_PAUSE
1152
1153 call do_sun4c_fault
1154 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
1155
1156 RESTORE_ALL
1157
1158 .align 4
1159 .globl srmmu_fault
1160srmmu_fault:
1161 mov 0x400, %l5
1162 mov 0x300, %l4
1163
1164 lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first
1165 lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last
1166
1167 andn %l6, 0xfff, %l6
1168 srl %l5, 6, %l5 ! and encode all info into l7
1169
1170 and %l5, 2, %l5
1171 or %l5, %l6, %l6
1172
1173 or %l6, %l7, %l7 ! l7 = [addr,write,txtfault]
1174
1175 SAVE_ALL
1176
1177 mov %l7, %o1
1178 mov %l7, %o2
1179 and %o1, 1, %o1 ! arg2 = text_faultp
1180 mov %l7, %o3
1181 and %o2, 2, %o2 ! arg3 = writep
1182 andn %o3, 0xfff, %o3 ! arg4 = faulting address
1183
1184 wr %l0, PSR_ET, %psr
1185 WRITE_PAUSE
1186
1187 call do_sparc_fault
1188 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
1189
1190 RESTORE_ALL
1191
1192#ifdef CONFIG_SUNOS_EMUL
1193 /* SunOS uses syscall zero as the 'indirect syscall' it looks
1194 * like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
1195 * This is complete brain damage.
1196 */
1197 .globl sunos_indir
1198sunos_indir:
1199 mov %o7, %l4
1200 cmp %o0, NR_SYSCALLS
1201 blu,a 1f
1202 sll %o0, 0x2, %o0
1203
1204 sethi %hi(sunos_nosys), %l6
1205 b 2f
1206 or %l6, %lo(sunos_nosys), %l6
1207
12081:
1209 set sunos_sys_table, %l7
1210 ld [%l7 + %o0], %l6
1211
12122:
1213 mov %o1, %o0
1214 mov %o2, %o1
1215 mov %o3, %o2
1216 mov %o4, %o3
1217 mov %o5, %o4
1218 call %l6
1219 mov %l4, %o7
1220#endif
1221
1222 .align 4
1223 .globl sys_nis_syscall
1224sys_nis_syscall:
1225 mov %o7, %l5
1226 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
1227 call c_sys_nis_syscall
1228 mov %l5, %o7
1229
1230 .align 4
1231 .globl sys_ptrace
1232sys_ptrace:
1233 call do_ptrace
1234 add %sp, STACKFRAME_SZ, %o0
1235
1236 ld [%curptr + TI_FLAGS], %l5
1237 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1238 be 1f
1239 nop
1240
1241 call syscall_trace
1242 nop
1243
12441:
1245 RESTORE_ALL
1246
1247 .align 4
1248 .globl sys_execve
1249sys_execve:
1250 mov %o7, %l5
1251 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
1252 call sparc_execve
1253 mov %l5, %o7
1254
1255 .align 4
1256 .globl sys_pipe
1257sys_pipe:
1258 mov %o7, %l5
1259 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
1260 call sparc_pipe
1261 mov %l5, %o7
1262
1263 .align 4
1264 .globl sys_sigaltstack
1265sys_sigaltstack:
1266 mov %o7, %l5
1267 mov %fp, %o2
1268 call do_sigaltstack
1269 mov %l5, %o7
1270
1271 .align 4
1272 .globl sys_sigstack
1273sys_sigstack:
1274 mov %o7, %l5
1275 mov %fp, %o2
1276 call do_sys_sigstack
1277 mov %l5, %o7
1278
1279 .align 4
1280 .globl sys_sigpause
1281sys_sigpause:
1282 /* Note: %o0 already has correct value... */
1283 call do_sigpause
1284 add %sp, STACKFRAME_SZ, %o1
1285
1286 ld [%curptr + TI_FLAGS], %l5
1287 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1288 be 1f
1289 nop
1290
1291 call syscall_trace
1292 nop
1293
12941:
1295 /* We are returning to a signal handler. */
1296 RESTORE_ALL
1297
1298 .align 4
1299 .globl sys_sigsuspend
1300sys_sigsuspend:
1301 call do_sigsuspend
1302 add %sp, STACKFRAME_SZ, %o0
1303
1304 ld [%curptr + TI_FLAGS], %l5
1305 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1306 be 1f
1307 nop
1308
1309 call syscall_trace
1310 nop
1311
13121:
1313 /* We are returning to a signal handler. */
1314 RESTORE_ALL
1315
1316 .align 4
1317 .globl sys_rt_sigsuspend
1318sys_rt_sigsuspend:
1319 /* Note: %o0, %o1 already have correct value... */
1320 call do_rt_sigsuspend
1321 add %sp, STACKFRAME_SZ, %o2
1322
1323 ld [%curptr + TI_FLAGS], %l5
1324 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1325 be 1f
1326 nop
1327
1328 call syscall_trace
1329 nop
1330
13311:
1332 /* We are returning to a signal handler. */
1333 RESTORE_ALL
1334
1335 .align 4
1336 .globl sys_sigreturn
1337sys_sigreturn:
1338 call do_sigreturn
1339 add %sp, STACKFRAME_SZ, %o0
1340
1341 ld [%curptr + TI_FLAGS], %l5
1342 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1343 be 1f
1344 nop
1345
1346 call syscall_trace
1347 nop
1348
13491:
1350 /* We don't want to muck with user registers like a
1351 * normal syscall, just return.
1352 */
1353 RESTORE_ALL
1354
1355 .align 4
1356 .globl sys_rt_sigreturn
1357sys_rt_sigreturn:
1358 call do_rt_sigreturn
1359 add %sp, STACKFRAME_SZ, %o0
1360
1361 ld [%curptr + TI_FLAGS], %l5
1362 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1363 be 1f
1364 nop
1365
1366 call syscall_trace
1367 nop
1368
13691:
1370 /* We are returning to a signal handler. */
1371 RESTORE_ALL
1372
1373 /* Now that we have a real sys_clone, sys_fork() is
1374 * implemented in terms of it. Our _real_ implementation
1375 * of SunOS vfork() will use sys_vfork().
1376 *
1377 * XXX These three should be consolidated into mostly shared
1378 * XXX code just like on sparc64... -DaveM
1379 */
1380 .align 4
1381 .globl sys_fork, flush_patch_two
1382sys_fork:
1383 mov %o7, %l5
1384flush_patch_two:
1385 FLUSH_ALL_KERNEL_WINDOWS;
1386 ld [%curptr + TI_TASK], %o4
1387 rd %psr, %g4
1388 WRITE_PAUSE
1389 mov SIGCHLD, %o0 ! arg0: clone flags
1390 rd %wim, %g5
1391 WRITE_PAUSE
1392 mov %fp, %o1 ! arg1: usp
1393 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1394 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
1395 mov 0, %o3
1396 call sparc_do_fork
1397 mov %l5, %o7
1398
1399 /* Whee, kernel threads! */
1400 .globl sys_clone, flush_patch_three
1401sys_clone:
1402 mov %o7, %l5
1403flush_patch_three:
1404 FLUSH_ALL_KERNEL_WINDOWS;
1405 ld [%curptr + TI_TASK], %o4
1406 rd %psr, %g4
1407 WRITE_PAUSE
1408
1409 /* arg0,1: flags,usp -- loaded already */
1410 cmp %o1, 0x0 ! Is new_usp NULL?
1411 rd %wim, %g5
1412 WRITE_PAUSE
1413 be,a 1f
1414 mov %fp, %o1 ! yes, use callers usp
1415 andn %o1, 7, %o1 ! no, align to 8 bytes
14161:
1417 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1418 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
1419 mov 0, %o3
1420 call sparc_do_fork
1421 mov %l5, %o7
1422
1423 /* Whee, real vfork! */
1424 .globl sys_vfork, flush_patch_four
1425sys_vfork:
1426flush_patch_four:
1427 FLUSH_ALL_KERNEL_WINDOWS;
1428 ld [%curptr + TI_TASK], %o4
1429 rd %psr, %g4
1430 WRITE_PAUSE
1431 rd %wim, %g5
1432 WRITE_PAUSE
1433 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1434 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
1435 mov %fp, %o1
1436 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1437 sethi %hi(sparc_do_fork), %l1
1438 mov 0, %o3
1439 jmpl %l1 + %lo(sparc_do_fork), %g0
1440 add %sp, STACKFRAME_SZ, %o2
1441
1442 .align 4
1443linux_sparc_ni_syscall:
1444 sethi %hi(sys_ni_syscall), %l7
1445 b syscall_is_too_hard
1446 or %l7, %lo(sys_ni_syscall), %l7
1447
1448linux_fast_syscall:
1449 andn %l7, 3, %l7
1450 mov %i0, %o0
1451 mov %i1, %o1
1452 mov %i2, %o2
1453 jmpl %l7 + %g0, %g0
1454 mov %i3, %o3
1455
1456linux_syscall_trace:
1457 call syscall_trace
1458 nop
1459 mov %i0, %o0
1460 mov %i1, %o1
1461 mov %i2, %o2
1462 mov %i3, %o3
1463 b 2f
1464 mov %i4, %o4
1465
1466 .globl ret_from_fork
1467ret_from_fork:
1468 call schedule_tail
1469 mov %g3, %o0
1470 b ret_sys_call
1471 ld [%sp + STACKFRAME_SZ + PT_I0], %o0
1472
1473 /* Linux native and SunOS system calls enter here... */
1474 .align 4
1475 .globl linux_sparc_syscall
1476linux_sparc_syscall:
1477 /* Direct access to user regs, must faster. */
1478 cmp %g1, NR_SYSCALLS
1479 bgeu linux_sparc_ni_syscall
1480 sll %g1, 2, %l4
1481 ld [%l7 + %l4], %l7
1482 andcc %l7, 1, %g0
1483 bne linux_fast_syscall
1484 /* Just do first insn from SAVE_ALL in the delay slot */
1485
1486 .globl syscall_is_too_hard
1487syscall_is_too_hard:
1488 SAVE_ALL_HEAD
1489 rd %wim, %l3
1490
1491 wr %l0, PSR_ET, %psr
1492 mov %i0, %o0
1493 mov %i1, %o1
1494 mov %i2, %o2
1495
1496 ld [%curptr + TI_FLAGS], %l5
1497 mov %i3, %o3
1498 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1499 mov %i4, %o4
1500 bne linux_syscall_trace
1501 mov %i0, %l5
15022:
1503 call %l7
1504 mov %i5, %o5
1505
1506 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1507
1508 .globl ret_sys_call
1509ret_sys_call:
1510 ld [%curptr + TI_FLAGS], %l6
1511 cmp %o0, -ERESTART_RESTARTBLOCK
1512 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
1513 set PSR_C, %g2
1514 bgeu 1f
1515 andcc %l6, _TIF_SYSCALL_TRACE, %g0
1516
1517 /* System call success, clear Carry condition code. */
1518 andn %g3, %g2, %g3
1519 clr %l6
1520 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1521 bne linux_syscall_trace2
1522 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1523 add %l1, 0x4, %l2 /* npc = npc+4 */
1524 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1525 b ret_trap_entry
1526 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
15271:
1528 /* System call failure, set Carry condition code.
1529 * Also, get abs(errno) to return to the process.
1530 */
1531 sub %g0, %o0, %o0
1532 or %g3, %g2, %g3
1533 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1534 mov 1, %l6
1535 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1536 bne linux_syscall_trace2
1537 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1538 add %l1, 0x4, %l2 /* npc = npc+4 */
1539 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1540 b ret_trap_entry
1541 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1542
1543linux_syscall_trace2:
1544 call syscall_trace
1545 add %l1, 0x4, %l2 /* npc = npc+4 */
1546 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1547 b ret_trap_entry
1548 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1549
1550
1551 /*
1552 * Solaris system calls and indirect system calls enter here.
1553 *
1554 * I have named the solaris indirect syscalls like that because
1555 * it seems like Solaris has some fast path syscalls that can
1556 * be handled as indirect system calls. - mig
1557 */
1558
1559linux_syscall_for_solaris:
1560 sethi %hi(sys_call_table), %l7
1561 b linux_sparc_syscall
1562 or %l7, %lo(sys_call_table), %l7
1563
1564 .align 4
1565 .globl solaris_syscall
1566solaris_syscall:
1567 cmp %g1,59
1568 be linux_syscall_for_solaris
1569 cmp %g1,2
1570 be linux_syscall_for_solaris
1571 cmp %g1,42
1572 be linux_syscall_for_solaris
1573 cmp %g1,119
1574 be,a linux_syscall_for_solaris
1575 mov 2, %g1
15761:
1577 SAVE_ALL_HEAD
1578 rd %wim, %l3
1579
1580 wr %l0, PSR_ET, %psr
1581 nop
1582 nop
1583 mov %i0, %l5
1584
1585 call do_solaris_syscall
1586 add %sp, STACKFRAME_SZ, %o0
1587
1588 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1589 set PSR_C, %g2
1590 cmp %o0, -ERESTART_RESTARTBLOCK
1591 bgeu 1f
1592 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
1593
1594 /* System call success, clear Carry condition code. */
1595 andn %g3, %g2, %g3
1596 clr %l6
1597 b 2f
1598 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1599
16001:
1601 /* System call failure, set Carry condition code.
1602 * Also, get abs(errno) to return to the process.
1603 */
1604 sub %g0, %o0, %o0
1605 mov 1, %l6
1606 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1607 or %g3, %g2, %g3
1608 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1609
1610 /* Advance the pc and npc over the trap instruction.
1611 * If the npc is unaligned (has a 1 in the lower byte), it means
1612 * the kernel does not want us to play magic (ie, skipping over
1613 * traps). Mainly when the Solaris code wants to set some PC and
1614 * nPC (setcontext).
1615 */
16162:
1617 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1618 andcc %l1, 1, %g0
1619 bne 1f
1620 add %l1, 0x4, %l2 /* npc = npc+4 */
1621 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1622 b ret_trap_entry
1623 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1624
1625 /* kernel knows what it is doing, fixup npc and continue */
16261:
1627 sub %l1, 1, %l1
1628 b ret_trap_entry
1629 st %l1, [%sp + STACKFRAME_SZ + PT_NPC]
1630
1631#ifndef CONFIG_SUNOS_EMUL
1632 .align 4
1633 .globl sunos_syscall
1634sunos_syscall:
1635 SAVE_ALL_HEAD
1636 rd %wim, %l3
1637 wr %l0, PSR_ET, %psr
1638 nop
1639 nop
1640 mov %i0, %l5
1641 call do_sunos_syscall
1642 add %sp, STACKFRAME_SZ, %o0
1643#endif
1644
1645 /* {net, open}bsd system calls enter here... */
1646 .align 4
1647 .globl bsd_syscall
1648bsd_syscall:
1649 /* Direct access to user regs, must faster. */
1650 cmp %g1, NR_SYSCALLS
1651 blu,a 1f
1652 sll %g1, 2, %l4
1653
1654 set sys_ni_syscall, %l7
1655 b bsd_is_too_hard
1656 nop
1657
16581:
1659 ld [%l7 + %l4], %l7
1660
1661 .globl bsd_is_too_hard
1662bsd_is_too_hard:
1663 rd %wim, %l3
1664 SAVE_ALL
1665
1666 wr %l0, PSR_ET, %psr
1667 WRITE_PAUSE
1668
16692:
1670 mov %i0, %o0
1671 mov %i1, %o1
1672 mov %i2, %o2
1673 mov %i0, %l5
1674 mov %i3, %o3
1675 mov %i4, %o4
1676 call %l7
1677 mov %i5, %o5
1678
1679 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1680 set PSR_C, %g2
1681 cmp %o0, -ERESTART_RESTARTBLOCK
1682 bgeu 1f
1683 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
1684
1685 /* System call success, clear Carry condition code. */
1686 andn %g3, %g2, %g3
1687 clr %l6
1688 b 2f
1689 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1690
16911:
1692 /* System call failure, set Carry condition code.
1693 * Also, get abs(errno) to return to the process.
1694 */
1695 sub %g0, %o0, %o0
1696#if 0 /* XXX todo XXX */
1697 sethi %hi(bsd_xlatb_rorl), %o3
1698 or %o3, %lo(bsd_xlatb_rorl), %o3
1699 sll %o0, 2, %o0
1700 ld [%o3 + %o0], %o0
1701#endif
1702 mov 1, %l6
1703 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1704 or %g3, %g2, %g3
1705 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1706
1707 /* Advance the pc and npc over the trap instruction. */
17082:
1709 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1710 add %l1, 0x4, %l2 /* npc = npc+4 */
1711 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1712 b ret_trap_entry
1713 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1714
1715/* Saving and restoring the FPU state is best done from lowlevel code.
1716 *
1717 * void fpsave(unsigned long *fpregs, unsigned long *fsr,
1718 * void *fpqueue, unsigned long *fpqdepth)
1719 */
1720
1721 .globl fpsave
1722fpsave:
1723 st %fsr, [%o1] ! this can trap on us if fpu is in bogon state
1724 ld [%o1], %g1
1725 set 0x2000, %g4
1726 andcc %g1, %g4, %g0
1727 be 2f
1728 mov 0, %g2
1729
1730 /* We have an fpqueue to save. */
17311:
1732 std %fq, [%o2]
1733fpsave_magic:
1734 st %fsr, [%o1]
1735 ld [%o1], %g3
1736 andcc %g3, %g4, %g0
1737 add %g2, 1, %g2
1738 bne 1b
1739 add %o2, 8, %o2
1740
17412:
1742 st %g2, [%o3]
1743
1744 std %f0, [%o0 + 0x00]
1745 std %f2, [%o0 + 0x08]
1746 std %f4, [%o0 + 0x10]
1747 std %f6, [%o0 + 0x18]
1748 std %f8, [%o0 + 0x20]
1749 std %f10, [%o0 + 0x28]
1750 std %f12, [%o0 + 0x30]
1751 std %f14, [%o0 + 0x38]
1752 std %f16, [%o0 + 0x40]
1753 std %f18, [%o0 + 0x48]
1754 std %f20, [%o0 + 0x50]
1755 std %f22, [%o0 + 0x58]
1756 std %f24, [%o0 + 0x60]
1757 std %f26, [%o0 + 0x68]
1758 std %f28, [%o0 + 0x70]
1759 retl
1760 std %f30, [%o0 + 0x78]
1761
1762 /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
1763 * code for pointing out this possible deadlock, while we save state
1764 * above we could trap on the fsr store so our low level fpu trap
1765 * code has to know how to deal with this.
1766 */
1767fpsave_catch:
1768 b fpsave_magic + 4
1769 st %fsr, [%o1]
1770
1771fpsave_catch2:
1772 b fpsave + 4
1773 st %fsr, [%o1]
1774
1775 /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
1776
1777 .globl fpload
1778fpload:
1779 ldd [%o0 + 0x00], %f0
1780 ldd [%o0 + 0x08], %f2
1781 ldd [%o0 + 0x10], %f4
1782 ldd [%o0 + 0x18], %f6
1783 ldd [%o0 + 0x20], %f8
1784 ldd [%o0 + 0x28], %f10
1785 ldd [%o0 + 0x30], %f12
1786 ldd [%o0 + 0x38], %f14
1787 ldd [%o0 + 0x40], %f16
1788 ldd [%o0 + 0x48], %f18
1789 ldd [%o0 + 0x50], %f20
1790 ldd [%o0 + 0x58], %f22
1791 ldd [%o0 + 0x60], %f24
1792 ldd [%o0 + 0x68], %f26
1793 ldd [%o0 + 0x70], %f28
1794 ldd [%o0 + 0x78], %f30
1795 ld [%o1], %fsr
1796 retl
1797 nop
1798
1799 /* __ndelay and __udelay take two arguments:
1800 * 0 - nsecs or usecs to delay
1801 * 1 - per_cpu udelay_val (loops per jiffy)
1802 *
1803 * Note that ndelay gives HZ times higher resolution but has a 10ms
1804 * limit. udelay can handle up to 1s.
1805 */
1806 .globl __ndelay
1807__ndelay:
1808 save %sp, -STACKFRAME_SZ, %sp
1809 mov %i0, %o0
1810 call .umul
1811 mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ)
1812 call .umul
1813 mov %i1, %o1 ! udelay_val
1814 ba delay_continue
1815 mov %o1, %o0 ! >>32 later for better resolution
1816
1817 .globl __udelay
1818__udelay:
1819 save %sp, -STACKFRAME_SZ, %sp
1820 mov %i0, %o0
1821 sethi %hi(0x10c6), %o1
1822 call .umul
1823 or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000
1824 call .umul
1825 mov %i1, %o1 ! udelay_val
1826 call .umul
1827 mov HZ, %o0 ! >>32 earlier for wider range
1828
1829delay_continue:
1830 cmp %o0, 0x0
18311:
1832 bne 1b
1833 subcc %o0, 1, %o0
1834
1835 ret
1836 restore
1837
1838 /* Handle a software breakpoint */
1839 /* We have to inform parent that child has stopped */
1840 .align 4
1841 .globl breakpoint_trap
1842breakpoint_trap:
1843 rd %wim,%l3
1844 SAVE_ALL
1845 wr %l0, PSR_ET, %psr
1846 WRITE_PAUSE
1847
1848 st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
1849 call sparc_breakpoint
1850 add %sp, STACKFRAME_SZ, %o0
1851
1852 RESTORE_ALL
1853
1854 .align 4
1855 .globl __handle_exception, flush_patch_exception
1856__handle_exception:
1857flush_patch_exception:
1858 FLUSH_ALL_KERNEL_WINDOWS;
1859 ldd [%o0], %o6
1860 jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h
1861 mov 1, %g1 ! signal EFAULT condition
1862
1863 .align 4
1864 .globl kill_user_windows, kuw_patch1_7win
1865 .globl kuw_patch1
1866kuw_patch1_7win: sll %o3, 6, %o3
1867
1868 /* No matter how much overhead this routine has in the worst
1869 * case scenerio, it is several times better than taking the
1870 * traps with the old method of just doing flush_user_windows().
1871 */
1872kill_user_windows:
1873 ld [%g6 + TI_UWINMASK], %o0 ! get current umask
1874 orcc %g0, %o0, %g0 ! if no bits set, we are done
1875 be 3f ! nothing to do
1876 rd %psr, %o5 ! must clear interrupts
1877 or %o5, PSR_PIL, %o4 ! or else that could change
1878 wr %o4, 0x0, %psr ! the uwinmask state
1879 WRITE_PAUSE ! burn them cycles
18801:
1881 ld [%g6 + TI_UWINMASK], %o0 ! get consistent state
1882 orcc %g0, %o0, %g0 ! did an interrupt come in?
1883 be 4f ! yep, we are done
1884 rd %wim, %o3 ! get current wim
1885 srl %o3, 1, %o4 ! simulate a save
1886kuw_patch1:
1887 sll %o3, 7, %o3 ! compute next wim
1888 or %o4, %o3, %o3 ! result
1889 andncc %o0, %o3, %o0 ! clean this bit in umask
1890 bne kuw_patch1 ! not done yet
1891 srl %o3, 1, %o4 ! begin another save simulation
1892 wr %o3, 0x0, %wim ! set the new wim
1893 st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask
18944:
1895 wr %o5, 0x0, %psr ! re-enable interrupts
1896 WRITE_PAUSE ! burn baby burn
18973:
1898 retl ! return
1899 st %g0, [%g6 + TI_W_SAVED] ! no windows saved
1900
1901 .align 4
1902 .globl restore_current
1903restore_current:
1904 LOAD_CURRENT(g6, o0)
1905 retl
1906 nop
1907
1908#ifdef CONFIG_PCI
1909#include <asm/pcic.h>
1910
1911 .align 4
1912 .globl linux_trap_ipi15_pcic
1913linux_trap_ipi15_pcic:
1914 rd %wim, %l3
1915 SAVE_ALL
1916
1917 /*
1918 * First deactivate NMI
1919 * or we cannot drop ET, cannot get window spill traps.
1920 * The busy loop is necessary because the PIO error
1921 * sometimes does not go away quickly and we trap again.
1922 */
1923 sethi %hi(pcic_regs), %o1
1924 ld [%o1 + %lo(pcic_regs)], %o2
1925
1926 ! Get pending status for printouts later.
1927 ld [%o2 + PCI_SYS_INT_PENDING], %o0
1928
1929 mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1930 stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
19311:
1932 ld [%o2 + PCI_SYS_INT_PENDING], %o1
1933 andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1934 bne 1b
1935 nop
1936
1937 or %l0, PSR_PIL, %l4
1938 wr %l4, 0x0, %psr
1939 WRITE_PAUSE
1940 wr %l4, PSR_ET, %psr
1941 WRITE_PAUSE
1942
1943 call pcic_nmi
1944 add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
1945 RESTORE_ALL
1946
1947 .globl pcic_nmi_trap_patch
1948pcic_nmi_trap_patch:
1949 sethi %hi(linux_trap_ipi15_pcic), %l3
1950 jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0
1951 rd %psr, %l0
1952 .word 0
1953
1954#endif /* CONFIG_PCI */
1955
1956/* End of entry.S */
diff --git a/arch/sparc/kernel/errtbls.c b/arch/sparc/kernel/errtbls.c
new file mode 100644
index 000000000000..bb36f6eadfee
--- /dev/null
+++ b/arch/sparc/kernel/errtbls.c
@@ -0,0 +1,276 @@
1/* $Id: errtbls.c,v 1.2 1995/11/25 00:57:55 davem Exp $
2 * errtbls.c: Error number conversion tables between various syscall
3 * OS semantics.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * Based upon preliminary work which is:
8 *
9 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
10 */
11
12#include <asm/bsderrno.h> /* NetBSD (bsd4.4) errnos */
13#include <asm/solerrno.h> /* Solaris errnos */
14
15/* Here are tables which convert between Linux/SunOS error number
16 * values to the equivalent in other OSs. Note that since the Linux
17 * ones have been set up to match exactly those of SunOS, no
18 * translation table is needed for that OS.
19 */
20
21int solaris_errno[] = {
22 0,
23 SOL_EPERM,
24 SOL_ENOENT,
25 SOL_ESRCH,
26 SOL_EINTR,
27 SOL_EIO,
28 SOL_ENXIO,
29 SOL_E2BIG,
30 SOL_ENOEXEC,
31 SOL_EBADF,
32 SOL_ECHILD,
33 SOL_EAGAIN,
34 SOL_ENOMEM,
35 SOL_EACCES,
36 SOL_EFAULT,
37 SOL_NOTBLK,
38 SOL_EBUSY,
39 SOL_EEXIST,
40 SOL_EXDEV,
41 SOL_ENODEV,
42 SOL_ENOTDIR,
43 SOL_EISDIR,
44 SOL_EINVAL,
45 SOL_ENFILE,
46 SOL_EMFILE,
47 SOL_ENOTTY,
48 SOL_ETXTBSY,
49 SOL_EFBIG,
50 SOL_ENOSPC,
51 SOL_ESPIPE,
52 SOL_EROFS,
53 SOL_EMLINK,
54 SOL_EPIPE,
55 SOL_EDOM,
56 SOL_ERANGE,
57 SOL_EWOULDBLOCK,
58 SOL_EINPROGRESS,
59 SOL_EALREADY,
60 SOL_ENOTSOCK,
61 SOL_EDESTADDRREQ,
62 SOL_EMSGSIZE,
63 SOL_EPROTOTYPE,
64 SOL_ENOPROTOOPT,
65 SOL_EPROTONOSUPPORT,
66 SOL_ESOCKTNOSUPPORT,
67 SOL_EOPNOTSUPP,
68 SOL_EPFNOSUPPORT,
69 SOL_EAFNOSUPPORT,
70 SOL_EADDRINUSE,
71 SOL_EADDRNOTAVAIL,
72 SOL_ENETDOWN,
73 SOL_ENETUNREACH,
74 SOL_ENETRESET,
75 SOL_ECONNABORTED,
76 SOL_ECONNRESET,
77 SOL_ENOBUFS,
78 SOL_EISCONN,
79 SOL_ENOTONN,
80 SOL_ESHUTDOWN,
81 SOL_ETOOMANYREFS,
82 SOL_ETIMEDOUT,
83 SOL_ECONNREFUSED,
84 SOL_ELOOP,
85 SOL_ENAMETOOLONG,
86 SOL_EHOSTDOWN,
87 SOL_EHOSTUNREACH,
88 SOL_ENOTEMPTY,
89 SOL_EPROCLIM,
90 SOL_EUSERS,
91 SOL_EDQUOT,
92 SOL_ESTALE,
93 SOL_EREMOTE,
94 SOL_ENOSTR,
95 SOL_ETIME,
96 SOL_ENOSR,
97 SOL_ENOMSG,
98 SOL_EBADMSG,
99 SOL_IDRM,
100 SOL_EDEADLK,
101 SOL_ENOLCK,
102 SOL_ENONET,
103 SOL_ERREMOTE,
104 SOL_ENOLINK,
105 SOL_EADV,
106 SOL_ESRMNT,
107 SOL_ECOMM,
108 SOL_EPROTO,
109 SOL_EMULTIHOP,
110 SOL_EINVAL, /* EDOTDOT XXX??? */
111 SOL_REMCHG,
112 SOL_NOSYS,
113 SOL_STRPIPE,
114 SOL_EOVERFLOW,
115 SOL_EBADFD,
116 SOL_ECHRNG,
117 SOL_EL2NSYNC,
118 SOL_EL3HLT,
119 SOL_EL3RST,
120 SOL_NRNG,
121 SOL_EUNATCH,
122 SOL_ENOCSI,
123 SOL_EL2HLT,
124 SOL_EBADE,
125 SOL_EBADR,
126 SOL_EXFULL,
127 SOL_ENOANO,
128 SOL_EBADRQC,
129 SOL_EBADSLT,
130 SOL_EDEADLOCK,
131 SOL_EBFONT,
132 SOL_ELIBEXEC,
133 SOL_ENODATA,
134 SOL_ELIBBAD,
135 SOL_ENOPKG,
136 SOL_ELIBACC,
137 SOL_ENOTUNIQ,
138 SOL_ERESTART,
139 SOL_EUCLEAN,
140 SOL_ENOTNAM,
141 SOL_ENAVAIL,
142 SOL_EISNAM,
143 SOL_EREMOTEIO,
144 SOL_EILSEQ,
145 SOL_ELIBMAX,
146 SOL_ELIBSCN,
147};
148
149int netbsd_errno[] = {
150 0,
151 BSD_EPERM,
152 BSD_ENOENT,
153 BSD_ESRCH,
154 BSD_EINTR,
155 BSD_EIO,
156 BSD_ENXIO,
157 BSD_E2BIG,
158 BSD_ENOEXEC,
159 BSD_EBADF,
160 BSD_ECHILD,
161 BSD_EAGAIN,
162 BSD_ENOMEM,
163 BSD_EACCES,
164 BSD_EFAULT,
165 BSD_NOTBLK,
166 BSD_EBUSY,
167 BSD_EEXIST,
168 BSD_EXDEV,
169 BSD_ENODEV,
170 BSD_ENOTDIR,
171 BSD_EISDIR,
172 BSD_EINVAL,
173 BSD_ENFILE,
174 BSD_EMFILE,
175 BSD_ENOTTY,
176 BSD_ETXTBSY,
177 BSD_EFBIG,
178 BSD_ENOSPC,
179 BSD_ESPIPE,
180 BSD_EROFS,
181 BSD_EMLINK,
182 BSD_EPIPE,
183 BSD_EDOM,
184 BSD_ERANGE,
185 BSD_EWOULDBLOCK,
186 BSD_EINPROGRESS,
187 BSD_EALREADY,
188 BSD_ENOTSOCK,
189 BSD_EDESTADDRREQ,
190 BSD_EMSGSIZE,
191 BSD_EPROTOTYPE,
192 BSD_ENOPROTOOPT,
193 BSD_EPROTONOSUPPORT,
194 BSD_ESOCKTNOSUPPORT,
195 BSD_EOPNOTSUPP,
196 BSD_EPFNOSUPPORT,
197 BSD_EAFNOSUPPORT,
198 BSD_EADDRINUSE,
199 BSD_EADDRNOTAVAIL,
200 BSD_ENETDOWN,
201 BSD_ENETUNREACH,
202 BSD_ENETRESET,
203 BSD_ECONNABORTED,
204 BSD_ECONNRESET,
205 BSD_ENOBUFS,
206 BSD_EISCONN,
207 BSD_ENOTONN,
208 BSD_ESHUTDOWN,
209 BSD_ETOOMANYREFS,
210 BSD_ETIMEDOUT,
211 BSD_ECONNREFUSED,
212 BSD_ELOOP,
213 BSD_ENAMETOOLONG,
214 BSD_EHOSTDOWN,
215 BSD_EHOSTUNREACH,
216 BSD_ENOTEMPTY,
217 BSD_EPROCLIM,
218 BSD_EUSERS,
219 BSD_EDQUOT,
220 BSD_ESTALE,
221 BSD_EREMOTE,
222 BSD_ENOSTR,
223 BSD_ETIME,
224 BSD_ENOSR,
225 BSD_ENOMSG,
226 BSD_EBADMSG,
227 BSD_IDRM,
228 BSD_EDEADLK,
229 BSD_ENOLCK,
230 BSD_ENONET,
231 BSD_ERREMOTE,
232 BSD_ENOLINK,
233 BSD_EADV,
234 BSD_ESRMNT,
235 BSD_ECOMM,
236 BSD_EPROTO,
237 BSD_EMULTIHOP,
238 BSD_EINVAL, /* EDOTDOT XXX??? */
239 BSD_REMCHG,
240 BSD_NOSYS,
241 BSD_STRPIPE,
242 BSD_EOVERFLOW,
243 BSD_EBADFD,
244 BSD_ECHRNG,
245 BSD_EL2NSYNC,
246 BSD_EL3HLT,
247 BSD_EL3RST,
248 BSD_NRNG,
249 BSD_EUNATCH,
250 BSD_ENOCSI,
251 BSD_EL2HLT,
252 BSD_EBADE,
253 BSD_EBADR,
254 BSD_EXFULL,
255 BSD_ENOANO,
256 BSD_EBADRQC,
257 BSD_EBADSLT,
258 BSD_EDEADLOCK,
259 BSD_EBFONT,
260 BSD_ELIBEXEC,
261 BSD_ENODATA,
262 BSD_ELIBBAD,
263 BSD_ENOPKG,
264 BSD_ELIBACC,
265 BSD_ENOTUNIQ,
266 BSD_ERESTART,
267 BSD_EUCLEAN,
268 BSD_ENOTNAM,
269 BSD_ENAVAIL,
270 BSD_EISNAM,
271 BSD_EREMOTEIO,
272 BSD_EILSEQ,
273 BSD_ELIBMAX,
274 BSD_ELIBSCN,
275};
276
diff --git a/arch/sparc/kernel/etrap.S b/arch/sparc/kernel/etrap.S
new file mode 100644
index 000000000000..a8b35bed12a2
--- /dev/null
+++ b/arch/sparc/kernel/etrap.S
@@ -0,0 +1,321 @@
1/* $Id: etrap.S,v 1.31 2000/01/08 16:38:18 anton Exp $
2 * etrap.S: Sparc trap window preparation for entry into the
3 * Linux kernel.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <asm/head.h>
9#include <asm/asi.h>
10#include <asm/contregs.h>
11#include <asm/page.h>
12#include <asm/psr.h>
13#include <asm/ptrace.h>
14#include <asm/winmacro.h>
15#include <asm/asmmacro.h>
16#include <asm/thread_info.h>
17
18/* Registers to not touch at all. */
19#define t_psr l0 /* Set by caller */
20#define t_pc l1 /* Set by caller */
21#define t_npc l2 /* Set by caller */
22#define t_wim l3 /* Set by caller */
23#define t_twinmask l4 /* Set at beginning of this entry routine. */
24#define t_kstack l5 /* Set right before pt_regs frame is built */
25#define t_retpc l6 /* If you change this, change winmacro.h header file */
26#define t_systable l7 /* Never touch this, could be the syscall table ptr. */
27#define curptr g6 /* Set after pt_regs frame is built */
28
29 .text
30 .align 4
31
32 /* SEVEN WINDOW PATCH INSTRUCTIONS */
33 .globl tsetup_7win_patch1, tsetup_7win_patch2
34 .globl tsetup_7win_patch3, tsetup_7win_patch4
35 .globl tsetup_7win_patch5, tsetup_7win_patch6
36tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
37tsetup_7win_patch2: and %g2, 0x7f, %g2
38tsetup_7win_patch3: and %g2, 0x7f, %g2
39tsetup_7win_patch4: and %g1, 0x7f, %g1
40tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
41tsetup_7win_patch6: and %g2, 0x7f, %g2
42 /* END OF PATCH INSTRUCTIONS */
43
44 /* At trap time, interrupts and all generic traps do the
45 * following:
46 *
47 * rd %psr, %l0
48 * b some_handler
49 * rd %wim, %l3
50 * nop
51 *
52 * Then 'some_handler' if it needs a trap frame (ie. it has
53 * to call c-code and the trap cannot be handled in-window)
54 * then it does the SAVE_ALL macro in entry.S which does
55 *
56 * sethi %hi(trap_setup), %l4
57 * jmpl %l4 + %lo(trap_setup), %l6
58 * nop
59 */
60
61 /* 2 3 4 window number
62 * -----
63 * O T S mnemonic
64 *
65 * O == Current window before trap
66 * T == Window entered when trap occurred
67 * S == Window we will need to save if (1<<T) == %wim
68 *
69 * Before execution gets here, it must be guaranteed that
70 * %l0 contains trap time %psr, %l1 and %l2 contain the
71 * trap pc and npc, and %l3 contains the trap time %wim.
72 */
73
74 .globl trap_setup, tsetup_patch1, tsetup_patch2
75 .globl tsetup_patch3, tsetup_patch4
76 .globl tsetup_patch5, tsetup_patch6
77trap_setup:
78 /* Calculate mask of trap window. See if from user
79 * or kernel and branch conditionally.
80 */
81 mov 1, %t_twinmask
82 andcc %t_psr, PSR_PS, %g0 ! fromsupv_p = (psr & PSR_PS)
83 be trap_setup_from_user ! nope, from user mode
84 sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
85
86 /* From kernel, allocate more kernel stack and
87 * build a pt_regs trap frame.
88 */
89 sub %fp, (STACKFRAME_SZ + TRACEREG_SZ), %t_kstack
90 STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
91
92 /* See if we are in the trap window. */
93 andcc %t_twinmask, %t_wim, %g0
94 bne trap_setup_kernel_spill ! in trap window, clean up
95 nop
96
97 /* Trap from kernel with a window available.
98 * Just do it...
99 */
100 jmpl %t_retpc + 0x8, %g0 ! return to caller
101 mov %t_kstack, %sp ! jump onto new stack
102
103trap_setup_kernel_spill:
104 ld [%curptr + TI_UWINMASK], %g1
105 orcc %g0, %g1, %g0
106 bne trap_setup_user_spill ! there are some user windows, yuck
107 /* Spill from kernel, but only kernel windows, adjust
108 * %wim and go.
109 */
110 srl %t_wim, 0x1, %g2 ! begin computation of new %wim
111tsetup_patch1:
112 sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
113 or %t_wim, %g2, %g2
114tsetup_patch2:
115 and %g2, 0xff, %g2 ! patched on 7 window Sparcs
116
117 save %g0, %g0, %g0
118
119 /* Set new %wim value */
120 wr %g2, 0x0, %wim
121
122 /* Save the kernel window onto the corresponding stack. */
123 STORE_WINDOW(sp)
124
125 restore %g0, %g0, %g0
126
127 jmpl %t_retpc + 0x8, %g0 ! return to caller
128 mov %t_kstack, %sp ! and onto new kernel stack
129
130#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
131
132trap_setup_from_user:
133 /* We can't use %curptr yet. */
134 LOAD_CURRENT(t_kstack, t_twinmask)
135
136 sethi %hi(STACK_OFFSET), %t_twinmask
137 or %t_twinmask, %lo(STACK_OFFSET), %t_twinmask
138 add %t_kstack, %t_twinmask, %t_kstack
139
140 mov 1, %t_twinmask
141 sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
142
143 /* Build pt_regs frame. */
144 STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
145
146#if 0
147 /* If we're sure every task_struct is THREAD_SIZE aligned,
148 we can speed this up. */
149 sethi %hi(STACK_OFFSET), %curptr
150 or %curptr, %lo(STACK_OFFSET), %curptr
151 sub %t_kstack, %curptr, %curptr
152#else
153 sethi %hi(~(THREAD_SIZE - 1)), %curptr
154 and %t_kstack, %curptr, %curptr
155#endif
156
157 /* Clear current_thread_info->w_saved */
158 st %g0, [%curptr + TI_W_SAVED]
159
160 /* See if we are in the trap window. */
161 andcc %t_twinmask, %t_wim, %g0
162 bne trap_setup_user_spill ! yep we are
163 orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1
164
165 /* Trap from user, but not into the invalid window.
166 * Calculate new umask. The way this works is,
167 * any window from the %wim at trap time until
168 * the window right before the one we are in now,
169 * is a user window. A diagram:
170 *
171 * 7 6 5 4 3 2 1 0 window number
172 * ---------------
173 * I L T mnemonic
174 *
175 * Window 'I' is the invalid window in our example,
176 * window 'L' is the window the user was in when
177 * the trap occurred, window T is the trap window
178 * we are in now. So therefore, windows 5, 4 and
179 * 3 are user windows. The following sequence
180 * computes the user winmask to represent this.
181 */
182 subcc %t_wim, %t_twinmask, %g2
183 bneg,a 1f
184 sub %g2, 0x1, %g2
1851:
186 andn %g2, %t_twinmask, %g2
187tsetup_patch3:
188 and %g2, 0xff, %g2 ! patched on 7win Sparcs
189 st %g2, [%curptr + TI_UWINMASK] ! store new umask
190
191 jmpl %t_retpc + 0x8, %g0 ! return to caller
192 mov %t_kstack, %sp ! and onto kernel stack
193
194trap_setup_user_spill:
195 /* A spill occurred from either kernel or user mode
196 * and there exist some user windows to deal with.
197 * A mask of the currently valid user windows
198 * is in %g1 upon entry to here.
199 */
200
201tsetup_patch4:
202 and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
203 srl %t_wim, 0x1, %g2 ! compute new %wim
204tsetup_patch5:
205 sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
206 or %t_wim, %g2, %g2 ! %g2 is new %wim
207tsetup_patch6:
208 and %g2, 0xff, %g2 ! patched on 7win Sparcs
209 andn %g1, %g2, %g1 ! clear this bit in %g1
210 st %g1, [%curptr + TI_UWINMASK]
211
212 save %g0, %g0, %g0
213
214 wr %g2, 0x0, %wim
215
216 /* Call MMU-architecture dependent stack checking
217 * routine.
218 */
219 .globl tsetup_mmu_patchme
220tsetup_mmu_patchme:
221 b tsetup_sun4c_stackchk
222 andcc %sp, 0x7, %g0
223
224 /* Architecture specific stack checking routines. When either
225 * of these routines are called, the globals are free to use
226 * as they have been safely stashed on the new kernel stack
227 * pointer. Thus the definition below for simplicity.
228 */
229#define glob_tmp g1
230
231 .globl tsetup_sun4c_stackchk
232tsetup_sun4c_stackchk:
233 /* Done by caller: andcc %sp, 0x7, %g0 */
234 bne trap_setup_user_stack_is_bolixed
235 sra %sp, 29, %glob_tmp
236
237 add %glob_tmp, 0x1, %glob_tmp
238 andncc %glob_tmp, 0x1, %g0
239 bne trap_setup_user_stack_is_bolixed
240 and %sp, 0xfff, %glob_tmp ! delay slot
241
242 /* See if our dump area will be on more than one
243 * page.
244 */
245 add %glob_tmp, 0x38, %glob_tmp
246 andncc %glob_tmp, 0xff8, %g0
247 be tsetup_sun4c_onepage ! only one page to check
248 lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
249
250tsetup_sun4c_twopages:
251 /* Is first page ok permission wise? */
252 srl %glob_tmp, 29, %glob_tmp
253 cmp %glob_tmp, 0x6
254 bne trap_setup_user_stack_is_bolixed
255 add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
256
257 sra %glob_tmp, 29, %glob_tmp
258 add %glob_tmp, 0x1, %glob_tmp
259 andncc %glob_tmp, 0x1, %g0
260 bne trap_setup_user_stack_is_bolixed
261 add %sp, 0x38, %glob_tmp
262
263 lda [%glob_tmp] ASI_PTE, %glob_tmp
264
265tsetup_sun4c_onepage:
266 srl %glob_tmp, 29, %glob_tmp
267 cmp %glob_tmp, 0x6 ! can user write to it?
268 bne trap_setup_user_stack_is_bolixed ! failure
269 nop
270
271 STORE_WINDOW(sp)
272
273 restore %g0, %g0, %g0
274
275 jmpl %t_retpc + 0x8, %g0
276 mov %t_kstack, %sp
277
278 .globl tsetup_srmmu_stackchk
279tsetup_srmmu_stackchk:
280 /* Check results of callers andcc %sp, 0x7, %g0 */
281 bne trap_setup_user_stack_is_bolixed
282 sethi %hi(PAGE_OFFSET), %glob_tmp
283
284 cmp %glob_tmp, %sp
285 bleu,a 1f
286 lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
287
288trap_setup_user_stack_is_bolixed:
289 /* From user/kernel into invalid window w/bad user
290 * stack. Save bad user stack, and return to caller.
291 */
292 SAVE_BOLIXED_USER_STACK(curptr, g3)
293 restore %g0, %g0, %g0
294
295 jmpl %t_retpc + 0x8, %g0
296 mov %t_kstack, %sp
297
2981:
299 /* Clear the fault status and turn on the no_fault bit. */
300 or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
301 sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
302
303 /* Dump the registers and cross fingers. */
304 STORE_WINDOW(sp)
305
306 /* Clear the no_fault bit and check the status. */
307 andn %glob_tmp, 0x2, %glob_tmp
308 sta %glob_tmp, [%g0] ASI_M_MMUREGS
309 mov AC_M_SFAR, %glob_tmp
310 lda [%glob_tmp] ASI_M_MMUREGS, %g0
311 mov AC_M_SFSR, %glob_tmp
312 lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp ! save away status of winstore
313 andcc %glob_tmp, 0x2, %g0 ! did we fault?
314 bne trap_setup_user_stack_is_bolixed ! failure
315 nop
316
317 restore %g0, %g0, %g0
318
319 jmpl %t_retpc + 0x8, %g0
320 mov %t_kstack, %sp
321
diff --git a/arch/sparc/kernel/head.S b/arch/sparc/kernel/head.S
new file mode 100644
index 000000000000..42d3de59d19b
--- /dev/null
+++ b/arch/sparc/kernel/head.S
@@ -0,0 +1,1326 @@
1/* $Id: head.S,v 1.105 2001/08/12 09:08:56 davem Exp $
2 * head.S: The initial boot code for the Sparc port of Linux.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,1999 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
9 *
10 * CompactPCI platform by Eric Brower, 1999.
11 */
12
13#include <linux/version.h>
14#include <linux/config.h>
15#include <linux/init.h>
16
17#include <asm/head.h>
18#include <asm/asi.h>
19#include <asm/contregs.h>
20#include <asm/ptrace.h>
21#include <asm/psr.h>
22#include <asm/page.h>
23#include <asm/kdebug.h>
24#include <asm/winmacro.h>
25#include <asm/thread_info.h> /* TI_UWINMASK */
26#include <asm/errno.h>
27#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */
28
29 .data
30/*
31 * The following are used with the prom_vector node-ops to figure out
32 * the cpu-type
33 */
34
35 .align 4
36 .globl cputyp
37cputyp:
38 .word 1
39
40 .align 4
41 .globl cputypval
42cputypval:
43 .asciz "sun4c"
44 .ascii " "
45
46cputypvalend:
47cputypvallen = cputypvar - cputypval
48
49 .align 4
50/*
51 * Sun people can't spell worth damn. "compatability" indeed.
52 * At least we *know* we can't spell, and use a spell-checker.
53 */
54
55/* Uh, actually Linus it is I who cannot spell. Too much murky
56 * Sparc assembly will do this to ya.
57 */
58cputypvar:
59 .asciz "compatability"
60
61/* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */
62 .align 4
63cputypvar_sun4m:
64 .asciz "compatible"
65
66 .align 4
67
68#ifndef CONFIG_SUN4
69sun4_notsup:
70 .asciz "Sparc-Linux sun4 needs a specially compiled kernel, turn CONFIG_SUN4 on.\n\n"
71 .align 4
72#else
73sun4cdm_notsup:
74 .asciz "Kernel compiled with CONFIG_SUN4 cannot run on SUN4C/SUN4M/SUN4D\nTurn CONFIG_SUN4 off.\n\n"
75 .align 4
76#endif
77
78sun4e_notsup:
79 .asciz "Sparc-Linux sun4e support does not exist\n\n"
80 .align 4
81
82#ifndef CONFIG_SUNOS_EMUL
83#undef SUNOS_SYSCALL_TRAP
84#define SUNOS_SYSCALL_TRAP SUNOS_NO_SYSCALL_TRAP
85#endif
86
87 /* The Sparc trap table, bootloader gives us control at _start. */
88 .text
89 .globl start, _stext, _start, __stext
90 .globl trapbase
91_start: /* danger danger */
92__stext:
93_stext:
94start:
95trapbase:
96#ifdef CONFIG_SMP
97trapbase_cpu0:
98#endif
99/* We get control passed to us here at t_zero. */
100t_zero: b gokernel; nop; nop; nop;
101t_tflt: SPARC_TFAULT /* Inst. Access Exception */
102t_bins: TRAP_ENTRY(0x2, bad_instruction) /* Illegal Instruction */
103t_pins: TRAP_ENTRY(0x3, priv_instruction) /* Privileged Instruction */
104t_fpd: TRAP_ENTRY(0x4, fpd_trap_handler) /* Floating Point Disabled */
105t_wovf: WINDOW_SPILL /* Window Overflow */
106t_wunf: WINDOW_FILL /* Window Underflow */
107t_mna: TRAP_ENTRY(0x7, mna_handler) /* Memory Address Not Aligned */
108t_fpe: TRAP_ENTRY(0x8, fpe_trap_handler) /* Floating Point Exception */
109t_dflt: SPARC_DFAULT /* Data Miss Exception */
110t_tio: TRAP_ENTRY(0xa, do_tag_overflow) /* Tagged Instruction Ovrflw */
111t_wpt: TRAP_ENTRY(0xb, do_watchpoint) /* Watchpoint Detected */
112t_badc: BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
113t_irq1: TRAP_ENTRY_INTERRUPT(1) /* IRQ Software/SBUS Level 1 */
114t_irq2: TRAP_ENTRY_INTERRUPT(2) /* IRQ SBUS Level 2 */
115t_irq3: TRAP_ENTRY_INTERRUPT(3) /* IRQ SCSI/DMA/SBUS Level 3 */
116t_irq4: TRAP_ENTRY_INTERRUPT(4) /* IRQ Software Level 4 */
117t_irq5: TRAP_ENTRY_INTERRUPT(5) /* IRQ SBUS/Ethernet Level 5 */
118t_irq6: TRAP_ENTRY_INTERRUPT(6) /* IRQ Software Level 6 */
119t_irq7: TRAP_ENTRY_INTERRUPT(7) /* IRQ Video/SBUS Level 5 */
120t_irq8: TRAP_ENTRY_INTERRUPT(8) /* IRQ SBUS Level 6 */
121t_irq9: TRAP_ENTRY_INTERRUPT(9) /* IRQ SBUS Level 7 */
122t_irq10:TRAP_ENTRY_INTERRUPT(10) /* IRQ Timer #1 (one we use) */
123t_irq11:TRAP_ENTRY_INTERRUPT(11) /* IRQ Floppy Intr. */
124t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */
125t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */
126t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */
127 .globl t_nmi
128#ifndef CONFIG_SMP
129t_nmi: NMI_TRAP /* Level 15 (NMI) */
130#else
131t_nmi: TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
132#endif
133t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */
134t_iacce:BAD_TRAP(0x21) /* Instr Access Error */
135t_bad22:BAD_TRAP(0x22) BAD_TRAP(0x23)
136t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled) /* Co-Processor Disabled */
137t_uflsh:SKIP_TRAP(0x25, unimp_flush) /* Unimplemented FLUSH inst. */
138t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27)
139t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */
140t_dacce:SPARC_DFAULT /* Data Access Error */
141t_hwdz: TRAP_ENTRY(0x2a, do_hw_divzero) /* Division by zero, you lose... */
142t_dserr:BAD_TRAP(0x2b) /* Data Store Error */
143t_daccm:BAD_TRAP(0x2c) /* Data Access MMU-Miss */
144t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
145t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
146t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
147t_iaccm:BAD_TRAP(0x3c) /* Instr Access MMU-Miss */
148t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41)
149t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46)
150t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b)
151t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50)
152t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
153t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
154t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
155t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
156t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
157t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
158t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
159t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
160t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
161t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f)
162t_sunos:SUNOS_SYSCALL_TRAP /* SunOS System Call */
163t_sbkpt:BREAKPOINT_TRAP /* Software Breakpoint/KGDB */
164t_divz: TRAP_ENTRY(0x82, do_hw_divzero) /* Divide by zero trap */
165t_flwin:TRAP_ENTRY(0x83, do_flush_windows) /* Flush Windows Trap */
166t_clwin:BAD_TRAP(0x84) /* Clean Windows Trap */
167t_rchk: BAD_TRAP(0x85) /* Range Check */
168t_funal:BAD_TRAP(0x86) /* Fix Unaligned Access Trap */
169t_iovf: BAD_TRAP(0x87) /* Integer Overflow Trap */
170t_slowl:SOLARIS_SYSCALL_TRAP /* Slowaris System Call */
171t_netbs:NETBSD_SYSCALL_TRAP /* Net-B.S. System Call */
172t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e)
173t_bad8f:BAD_TRAP(0x8f)
174t_linux:LINUX_SYSCALL_TRAP /* Linux System Call */
175t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95)
176t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a)
177t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f)
178t_getcc:GETCC_TRAP /* Get Condition Codes */
179t_setcc:SETCC_TRAP /* Set Condition Codes */
180t_getpsr:GETPSR_TRAP /* Get PSR Register */
181t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
182t_slowi:INDIRECT_SOLARIS_SYSCALL(156)
183t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
184t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
185t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
186t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
187t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
188t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
189t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
190t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
191t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
192t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
193t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
194t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
195t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
196t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
197t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
198t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
199t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
200t_badfc:BAD_TRAP(0xfc) BAD_TRAP(0xfd)
201dbtrap: BAD_TRAP(0xfe) /* Debugger/PROM breakpoint #1 */
202dbtrap2:BAD_TRAP(0xff) /* Debugger/PROM breakpoint #2 */
203
204 .globl end_traptable
205end_traptable:
206
207#ifdef CONFIG_SMP
208 /* Trap tables for the other cpus. */
209 .globl trapbase_cpu1, trapbase_cpu2, trapbase_cpu3
210trapbase_cpu1:
211 BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
212 TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
213 WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
214 TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
215 TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
216 BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
217 TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
218 TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
219 TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
220 TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
221 TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
222 TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
223 TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
224 TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
225 TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
226 BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
227 BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
228 SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
229 BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
230 BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
231 BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
232 BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
233 BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
234 BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
235 BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
236 BAD_TRAP(0x50)
237 BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
238 BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
239 BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
240 BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
241 BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
242 BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
243 BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
244 BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
245 BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
246 BAD_TRAP(0x7e) BAD_TRAP(0x7f)
247 SUNOS_SYSCALL_TRAP
248 BREAKPOINT_TRAP
249 TRAP_ENTRY(0x82, do_hw_divzero)
250 TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
251 BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
252 NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
253 BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
254 LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
255 BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
256 BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
257 BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
258 BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
259 INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
260 BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
261 BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
262 BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
263 BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
264 BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
265 BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
266 BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
267 BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
268 BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
269 BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
270 BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
271 BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
272 BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
273 BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
274 BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
275 BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
276 BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
277
278trapbase_cpu2:
279 BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
280 TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
281 WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
282 TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
283 TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
284 BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
285 TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
286 TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
287 TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
288 TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
289 TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
290 TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
291 TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
292 TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
293 TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
294 BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
295 BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
296 SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
297 BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
298 BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
299 BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
300 BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
301 BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
302 BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
303 BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
304 BAD_TRAP(0x50)
305 BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
306 BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
307 BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
308 BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
309 BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
310 BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
311 BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
312 BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
313 BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
314 BAD_TRAP(0x7e) BAD_TRAP(0x7f)
315 SUNOS_SYSCALL_TRAP
316 BREAKPOINT_TRAP
317 TRAP_ENTRY(0x82, do_hw_divzero)
318 TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
319 BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
320 NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
321 BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
322 LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
323 BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
324 BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
325 BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
326 BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
327 INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
328 BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
329 BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
330 BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
331 BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
332 BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
333 BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
334 BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
335 BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
336 BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
337 BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
338 BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
339 BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
340 BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
341 BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
342 BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
343 BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
344 BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
345
346trapbase_cpu3:
347 BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
348 TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
349 WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
350 TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
351 TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
352 BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
353 TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
354 TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
355 TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
356 TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
357 TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
358 TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
359 TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
360 TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
361 TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
362 BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
363 BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
364 SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
365 BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
366 BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
367 BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
368 BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
369 BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
370 BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
371 BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
372 BAD_TRAP(0x50)
373 BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
374 BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
375 BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
376 BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
377 BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
378 BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
379 BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
380 BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
381 BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
382 BAD_TRAP(0x7e) BAD_TRAP(0x7f)
383 SUNOS_SYSCALL_TRAP
384 BREAKPOINT_TRAP
385 TRAP_ENTRY(0x82, do_hw_divzero)
386 TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
387 BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
388 NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
389 BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
390 LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
391 BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
392 BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
393 BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
394 BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
395 INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
396 BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
397 BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
398 BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
399 BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
400 BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
401 BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
402 BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
403 BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
404 BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
405 BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
406 BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
407 BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
408 BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
409 BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
410 BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
411 BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
412 BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
413
414#endif
415 .align PAGE_SIZE
416
417/* This was the only reasonable way I could think of to properly align
418 * these page-table data structures.
419 */
420 .globl pg0, pg1, pg2, pg3
421 .globl empty_bad_page
422 .globl empty_bad_page_table
423 .globl empty_zero_page
424 .globl swapper_pg_dir
425swapper_pg_dir: .skip PAGE_SIZE
426pg0: .skip PAGE_SIZE
427pg1: .skip PAGE_SIZE
428pg2: .skip PAGE_SIZE
429pg3: .skip PAGE_SIZE
430empty_bad_page: .skip PAGE_SIZE
431empty_bad_page_table: .skip PAGE_SIZE
432empty_zero_page: .skip PAGE_SIZE
433
434 .global root_flags
435 .global ram_flags
436 .global root_dev
437 .global sparc_ramdisk_image
438 .global sparc_ramdisk_size
439
440/* This stuff has to be in sync with SILO and other potential boot loaders
441 * Fields should be kept upward compatible and whenever any change is made,
442 * HdrS version should be incremented.
443 */
444 .ascii "HdrS"
445 .word LINUX_VERSION_CODE
446 .half 0x0203 /* HdrS version */
447root_flags:
448 .half 1
449root_dev:
450 .half 0
451ram_flags:
452 .half 0
453sparc_ramdisk_image:
454 .word 0
455sparc_ramdisk_size:
456 .word 0
457 .word reboot_command
458 .word 0, 0, 0
459 .word _end
460
461/* Cool, here we go. Pick up the romvec pointer in %o0 and stash it in
462 * %g7 and at prom_vector_p. And also quickly check whether we are on
463 * a v0, v2, or v3 prom.
464 */
465gokernel:
466 /* Ok, it's nice to know, as early as possible, if we
467 * are already mapped where we expect to be in virtual
468 * memory. The Solaris /boot elf format bootloader
469 * will peek into our elf header and load us where
470 * we want to be, otherwise we have to re-map.
471 *
472 * Some boot loaders don't place the jmp'rs address
473 * in %o7, so we do a pc-relative call to a local
474 * label, then see what %o7 has.
475 */
476
477 mov %o7, %g4 ! Save %o7
478
479 /* Jump to it, and pray... */
480current_pc:
481 call 1f
482 nop
483
4841:
485 mov %o7, %g3
486
487 tst %o0
488 be no_sun4u_here
489 mov %g4, %o7 /* Previous %o7. */
490
491 mov %o0, %l0 ! stash away romvec
492 mov %o0, %g7 ! put it here too
493 mov %o1, %l1 ! stash away debug_vec too
494
495 /* Ok, let's check out our run time program counter. */
496 set current_pc, %g5
497 cmp %g3, %g5
498 be already_mapped
499 nop
500
501 /* %l6 will hold the offset we have to subtract
502 * from absolute symbols in order to access areas
503 * in our own image. If already mapped this is
504 * just plain zero, else it is KERNBASE.
505 */
506 set KERNBASE, %l6
507 b copy_prom_lvl14
508 nop
509
510already_mapped:
511 mov 0, %l6
512
513 /* Copy over the Prom's level 14 clock handler. */
514copy_prom_lvl14:
515#if 1
516 /* DJHR
517 * preserve our linked/calculated instructions
518 */
519 set lvl14_save, %g1
520 set t_irq14, %g3
521 sub %g1, %l6, %g1 ! translate to physical
522 sub %g3, %l6, %g3 ! translate to physical
523 ldd [%g3], %g4
524 std %g4, [%g1]
525 ldd [%g3+8], %g4
526 std %g4, [%g1+8]
527#endif
528 rd %tbr, %g1
529 andn %g1, 0xfff, %g1 ! proms trap table base
530 or %g0, (0x1e<<4), %g2 ! offset to lvl14 intr
531 or %g1, %g2, %g2
532 set t_irq14, %g3
533 sub %g3, %l6, %g3
534 ldd [%g2], %g4
535 std %g4, [%g3]
536 ldd [%g2 + 0x8], %g4
537 std %g4, [%g3 + 0x8] ! Copy proms handler
538
539/* Must determine whether we are on a sun4c MMU, SRMMU, or SUN4/400 MUTANT
540 * MMU so we can remap ourselves properly. DON'T TOUCH %l0 thru %l5 in these
541 * remapping routines, we need their values afterwards!
542 */
543 /* Now check whether we are already mapped, if we
544 * are we can skip all this garbage coming up.
545 */
546copy_prom_done:
547 cmp %l6, 0
548 be go_to_highmem ! this will be a nop then
549 nop
550
551 set LOAD_ADDR, %g6
552 cmp %g7, %g6
553 bne remap_not_a_sun4 ! This is not a Sun4
554 nop
555
556 or %g0, 0x1, %g1
557 lduba [%g1] ASI_CONTROL, %g1 ! Only safe to try on Sun4.
558 subcc %g1, 0x24, %g0 ! Is this a mutant Sun4/400???
559 be sun4_mutant_remap ! Ugh, it is...
560 nop
561
562 b sun4_normal_remap ! regular sun4, 2 level mmu
563 nop
564
565remap_not_a_sun4:
566 lda [%g0] ASI_M_MMUREGS, %g1 ! same as ASI_PTE on sun4c
567 and %g1, 0x1, %g1 ! Test SRMMU Enable bit ;-)
568 cmp %g1, 0x0
569 be sun4c_remap ! A sun4c MMU or normal Sun4
570 nop
571srmmu_remap:
572 /* First, check for a viking (TI) module. */
573 set 0x40000000, %g2
574 rd %psr, %g3
575 and %g2, %g3, %g3
576 subcc %g3, 0x0, %g0
577 bz srmmu_nviking
578 nop
579
580 /* Figure out what kind of viking we are on.
581 * We need to know if we have to play with the
582 * AC bit and disable traps or not.
583 */
584
585 /* I've only seen MicroSparc's on SparcClassics with this
586 * bit set.
587 */
588 set 0x800, %g2
589 lda [%g0] ASI_M_MMUREGS, %g3 ! peek in the control reg
590 and %g2, %g3, %g3
591 subcc %g3, 0x0, %g0
592 bnz srmmu_nviking ! is in mbus mode
593 nop
594
595 rd %psr, %g3 ! DO NOT TOUCH %g3
596 andn %g3, PSR_ET, %g2
597 wr %g2, 0x0, %psr
598 WRITE_PAUSE
599
600 /* Get context table pointer, then convert to
601 * a physical address, which is 36 bits.
602 */
603 set AC_M_CTPR, %g4
604 lda [%g4] ASI_M_MMUREGS, %g4
605 sll %g4, 0x4, %g4 ! We use this below
606 ! DO NOT TOUCH %g4
607
608 /* Set the AC bit in the Viking's MMU control reg. */
609 lda [%g0] ASI_M_MMUREGS, %g5 ! DO NOT TOUCH %g5
610 set 0x8000, %g6 ! AC bit mask
611 or %g5, %g6, %g6 ! Or it in...
612 sta %g6, [%g0] ASI_M_MMUREGS ! Close your eyes...
613
614 /* Grrr, why does it seem like every other load/store
615 * on the sun4m is in some ASI space...
616 * Fine with me, let's get the pointer to the level 1
617 * page table directory and fetch its entry.
618 */
619 lda [%g4] ASI_M_BYPASS, %o1 ! This is a level 1 ptr
620 srl %o1, 0x4, %o1 ! Clear low 4 bits
621 sll %o1, 0x8, %o1 ! Make physical
622
623 /* Ok, pull in the PTD. */
624 lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
625
626 /* Calculate to KERNBASE entry. */
627 add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
628
629 /* Poke the entry into the calculated address. */
630 sta %o2, [%o3] ASI_M_BYPASS
631
632 /* I don't get it Sun, if you engineered all these
633 * boot loaders and the PROM (thank you for the debugging
634 * features btw) why did you not have them load kernel
635 * images up in high address space, since this is necessary
636 * for ABI compliance anyways? Does this low-mapping provide
637 * enhanced interoperability?
638 *
639 * "The PROM is the computer."
640 */
641
642 /* Ok, restore the MMU control register we saved in %g5 */
643 sta %g5, [%g0] ASI_M_MMUREGS ! POW... ouch
644
645 /* Turn traps back on. We saved it in %g3 earlier. */
646 wr %g3, 0x0, %psr ! tick tock, tick tock
647
648 /* Now we burn precious CPU cycles due to bad engineering. */
649 WRITE_PAUSE
650
651 /* Wow, all that just to move a 32-bit value from one
652 * place to another... Jump to high memory.
653 */
654 b go_to_highmem
655 nop
656
657 /* This works on viking's in Mbus mode and all
658 * other MBUS modules. It is virtually the same as
659 * the above madness sans turning traps off and flipping
660 * the AC bit.
661 */
662srmmu_nviking:
663 set AC_M_CTPR, %g1
664 lda [%g1] ASI_M_MMUREGS, %g1 ! get ctx table ptr
665 sll %g1, 0x4, %g1 ! make physical addr
666 lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table
667 srl %g1, 0x4, %g1
668 sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
669
670 lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
671 add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
672 sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
673 b go_to_highmem
674 nop ! wheee....
675
676 /* This remaps the kernel on Sun4/4xx machines
677 * that have the Sun Mutant Three Level MMU.
678 * It's like a platypus, Sun didn't have the
679 * SRMMU in conception so they kludged the three
680 * level logic in the regular Sun4 MMU probably.
681 *
682 * Basically, you take each entry in the top level
683 * directory that maps the low 3MB starting at
684 * address zero and put the mapping in the KERNBASE
685 * slots. These top level pgd's are called regmaps.
686 */
687sun4_mutant_remap:
688 or %g0, %g0, %g3 ! source base
689 sethi %hi(KERNBASE), %g4 ! destination base
690 or %g4, %lo(KERNBASE), %g4
691 sethi %hi(0x300000), %g5
692 or %g5, %lo(0x300000), %g5 ! upper bound 3MB
693 or %g0, 0x1, %l6
694 sll %l6, 24, %l6 ! Regmap mapping size
695 add %g3, 0x2, %g3 ! Base magic
696 add %g4, 0x2, %g4 ! Base magic
697
698 /* Main remapping loop on Sun4-Mutant-MMU.
699 * "I am not an animal..." -Famous Mutant Person
700 */
701sun4_mutant_loop:
702 lduha [%g3] ASI_REGMAP, %g2 ! Get lower entry
703 stha %g2, [%g4] ASI_REGMAP ! Store in high entry
704 add %g4, %l6, %g4 ! Move up high memory ptr
705 subcc %g3, %g5, %g0 ! Reached our limit?
706 blu sun4_mutant_loop ! Nope, loop again
707 add %g3, %l6, %g3 ! delay, Move up low ptr
708 b go_to_highmem ! Jump to high memory.
709 nop
710
711 /* The following is for non-4/4xx sun4 MMU's. */
712sun4_normal_remap:
713 mov 0, %g3 ! source base
714 set KERNBASE, %g4 ! destination base
715 set 0x300000, %g5 ! upper bound 3MB
716 mov 1, %l6
717 sll %l6, 18, %l6 ! sun4 mmu segmap size
718sun4_normal_loop:
719 lduha [%g3] ASI_SEGMAP, %g6 ! load phys_seg
720 stha %g6, [%g4] ASI_SEGMAP ! stort new virt mapping
721 add %g3, %l6, %g3 ! increment source pointer
722 subcc %g3, %g5, %g0 ! reached limit?
723 blu sun4_normal_loop ! nope, loop again
724 add %g4, %l6, %g4 ! delay, increment dest ptr
725 b go_to_highmem
726 nop
727
728 /* The following works for Sun4c MMU's */
729sun4c_remap:
730 mov 0, %g3 ! source base
731 set KERNBASE, %g4 ! destination base
732 set 0x300000, %g5 ! upper bound 3MB
733 mov 1, %l6
734 sll %l6, 18, %l6 ! sun4c mmu segmap size
735sun4c_remap_loop:
736 lda [%g3] ASI_SEGMAP, %g6 ! load phys_seg
737 sta %g6, [%g4] ASI_SEGMAP ! store new virt mapping
738 add %g3, %l6, %g3 ! Increment source ptr
739 subcc %g3, %g5, %g0 ! Reached limit?
740 bl sun4c_remap_loop ! Nope, loop again
741 add %g4, %l6, %g4 ! delay, Increment dest ptr
742
743/* Now do a non-relative jump so that PC is in high-memory */
744go_to_highmem:
745 set execute_in_high_mem, %g1
746 jmpl %g1, %g0
747 nop
748
749/* The code above should be at beginning and we have to take care about
750 * short jumps, as branching to .text.init section from .text is usually
751 * impossible */
752 __INIT
753/* Acquire boot time privileged register values, this will help debugging.
754 * I figure out and store nwindows and nwindowsm1 later on.
755 */
756execute_in_high_mem:
757 mov %l0, %o0 ! put back romvec
758 mov %l1, %o1 ! and debug_vec
759
760 sethi %hi(prom_vector_p), %g1
761 st %o0, [%g1 + %lo(prom_vector_p)]
762
763 sethi %hi(linux_dbvec), %g1
764 st %o1, [%g1 + %lo(linux_dbvec)]
765
766 ld [%o0 + 0x4], %o3
767 and %o3, 0x3, %o5 ! get the version
768
769 cmp %o3, 0x2 ! a v2 prom?
770 be found_version
771 nop
772
773 /* paul@sfe.com.au */
774 cmp %o3, 0x3 ! a v3 prom?
775 be found_version
776 nop
777
778/* Old sun4's pass our load address into %o0 instead of the prom
779 * pointer. On sun4's you have to hard code the romvec pointer into
780 * your code. Sun probably still does that because they don't even
781 * trust their own "OpenBoot" specifications.
782 */
783 set LOAD_ADDR, %g6
784 cmp %o0, %g6 ! an old sun4?
785 be sun4_init
786 nop
787
788found_version:
789#ifdef CONFIG_SUN4
790/* For people who try sun4 kernels, even if Configure.help advises them. */
791 ld [%g7 + 0x68], %o1
792 set sun4cdm_notsup, %o0
793 call %o1
794 nop
795 b halt_me
796 nop
797#endif
798/* Get the machine type via the mysterious romvec node operations. */
799
800 add %g7, 0x1c, %l1
801 ld [%l1], %l0
802 ld [%l0], %l0
803 call %l0
804 or %g0, %g0, %o0 ! next_node(0) = first_node
805 or %o0, %g0, %g6
806
807 sethi %hi(cputypvar), %o1 ! First node has cpu-arch
808 or %o1, %lo(cputypvar), %o1
809 sethi %hi(cputypval), %o2 ! information, the string
810 or %o2, %lo(cputypval), %o2
811 ld [%l1], %l0 ! 'compatibility' tells
812 ld [%l0 + 0xc], %l0 ! that we want 'sun4x' where
813 call %l0 ! x is one of '', 'c', 'm',
814 nop ! 'd' or 'e'. %o2 holds pointer
815 ! to a buf where above string
816 ! will get stored by the prom.
817
818 subcc %o0, %g0, %g0
819 bpos got_prop ! Got the property
820 nop
821
822 or %g6, %g0, %o0
823 sethi %hi(cputypvar_sun4m), %o1
824 or %o1, %lo(cputypvar_sun4m), %o1
825 sethi %hi(cputypval), %o2
826 or %o2, %lo(cputypval), %o2
827 ld [%l1], %l0
828 ld [%l0 + 0xc], %l0
829 call %l0
830 nop
831
832got_prop:
833 set cputypval, %o2
834 ldub [%o2 + 0x4], %l1
835
836 cmp %l1, ' '
837 be 1f
838 cmp %l1, 'c'
839 be 1f
840 cmp %l1, 'm'
841 be 1f
842 cmp %l1, 's'
843 be 1f
844 cmp %l1, 'd'
845 be 1f
846 cmp %l1, 'e'
847 be no_sun4e_here ! Could be a sun4e.
848 nop
849 b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
850 nop
851
8521: set cputypval, %l1
853 ldub [%l1 + 0x4], %l1
854 cmp %l1, 'm' ! Test for sun4d, sun4e ?
855 be sun4m_init
856 cmp %l1, 's' ! Treat sun4s as sun4m
857 be sun4m_init
858 cmp %l1, 'd' ! Let us see how the beast will die
859 be sun4d_init
860 nop
861
862 /* Jump into mmu context zero. */
863 set AC_CONTEXT, %g1
864 stba %g0, [%g1] ASI_CONTROL
865
866 b sun4c_continue_boot
867 nop
868
869/* CPUID in bootbus can be found at PA 0xff0140000 */
870#define SUN4D_BOOTBUS_CPUID 0xf0140000
871
872sun4d_init:
873 /* Need to patch call to handler_irq */
874 set patch_handler_irq, %g4
875 set sun4d_handler_irq, %g5
876 sethi %hi(0x40000000), %g3 ! call
877 sub %g5, %g4, %g5
878 srl %g5, 2, %g5
879 or %g5, %g3, %g5
880 st %g5, [%g4]
881
882#ifdef CONFIG_SMP
883 /* Get our CPU id out of bootbus */
884 set SUN4D_BOOTBUS_CPUID, %g3
885 lduba [%g3] ASI_M_CTL, %g3
886 and %g3, 0xf8, %g3
887 srl %g3, 3, %g4
888 sta %g4, [%g0] ASI_M_VIKING_TMP1
889 sethi %hi(boot_cpu_id), %g5
890 stb %g4, [%g5 + %lo(boot_cpu_id)]
891 sll %g4, 2, %g4
892 sethi %hi(boot_cpu_id4), %g5
893 stb %g4, [%g5 + %lo(boot_cpu_id4)]
894#endif
895
896 /* Fall through to sun4m_init */
897
898sun4m_init:
899 /* XXX Fucking Cypress... */
900 lda [%g0] ASI_M_MMUREGS, %g5
901 srl %g5, 28, %g4
902
903 cmp %g4, 1
904 bne 1f
905 srl %g5, 24, %g4
906
907 and %g4, 0xf, %g4
908 cmp %g4, 7 /* This would be a HyperSparc. */
909
910 bne 2f
911 nop
912
9131:
914
915#define PATCH_IT(dst, src) \
916 set (dst), %g5; \
917 set (src), %g4; \
918 ld [%g4], %g3; \
919 st %g3, [%g5]; \
920 ld [%g4+0x4], %g3; \
921 st %g3, [%g5+0x4];
922
923 /* Signed multiply. */
924 PATCH_IT(.mul, .mul_patch)
925 PATCH_IT(.mul+0x08, .mul_patch+0x08)
926
927 /* Signed remainder. */
928 PATCH_IT(.rem, .rem_patch)
929 PATCH_IT(.rem+0x08, .rem_patch+0x08)
930 PATCH_IT(.rem+0x10, .rem_patch+0x10)
931 PATCH_IT(.rem+0x18, .rem_patch+0x18)
932 PATCH_IT(.rem+0x20, .rem_patch+0x20)
933 PATCH_IT(.rem+0x28, .rem_patch+0x28)
934
935 /* Signed division. */
936 PATCH_IT(.div, .div_patch)
937 PATCH_IT(.div+0x08, .div_patch+0x08)
938 PATCH_IT(.div+0x10, .div_patch+0x10)
939 PATCH_IT(.div+0x18, .div_patch+0x18)
940 PATCH_IT(.div+0x20, .div_patch+0x20)
941
942 /* Unsigned multiply. */
943 PATCH_IT(.umul, .umul_patch)
944 PATCH_IT(.umul+0x08, .umul_patch+0x08)
945
946 /* Unsigned remainder. */
947 PATCH_IT(.urem, .urem_patch)
948 PATCH_IT(.urem+0x08, .urem_patch+0x08)
949 PATCH_IT(.urem+0x10, .urem_patch+0x10)
950 PATCH_IT(.urem+0x18, .urem_patch+0x18)
951
952 /* Unsigned division. */
953 PATCH_IT(.udiv, .udiv_patch)
954 PATCH_IT(.udiv+0x08, .udiv_patch+0x08)
955 PATCH_IT(.udiv+0x10, .udiv_patch+0x10)
956
957#undef PATCH_IT
958
959/* Ok, the PROM could have done funny things and apple cider could still
960 * be sitting in the fault status/address registers. Read them all to
961 * clear them so we don't get magic faults later on.
962 */
963/* This sucks, apparently this makes Vikings call prom panic, will fix later */
9642:
965 rd %psr, %o1
966 srl %o1, 28, %o1 ! Get a type of the CPU
967
968 subcc %o1, 4, %g0 ! TI: Viking or MicroSPARC
969 be sun4c_continue_boot
970 nop
971
972 set AC_M_SFSR, %o0
973 lda [%o0] ASI_M_MMUREGS, %g0
974 set AC_M_SFAR, %o0
975 lda [%o0] ASI_M_MMUREGS, %g0
976
977 /* Fujitsu MicroSPARC-II has no asynchronous flavors of FARs */
978 subcc %o1, 0, %g0
979 be sun4c_continue_boot
980 nop
981
982 set AC_M_AFSR, %o0
983 lda [%o0] ASI_M_MMUREGS, %g0
984 set AC_M_AFAR, %o0
985 lda [%o0] ASI_M_MMUREGS, %g0
986 nop
987
988
989sun4c_continue_boot:
990
991
992/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
993 * show-time!
994 */
995
996 sethi %hi(cputyp), %o0
997 st %g4, [%o0 + %lo(cputyp)]
998
999 /* Turn on Supervisor, EnableFloating, and all the PIL bits.
1000 * Also puts us in register window zero with traps off.
1001 */
1002 set (PSR_PS | PSR_S | PSR_PIL | PSR_EF), %g2
1003 wr %g2, 0x0, %psr
1004 WRITE_PAUSE
1005
1006 /* I want a kernel stack NOW! */
1007 set init_thread_union, %g1
1008 set (THREAD_SIZE - STACKFRAME_SZ), %g2
1009 add %g1, %g2, %sp
1010 mov 0, %fp /* And for good luck */
1011
1012 /* Zero out our BSS section. */
1013 set __bss_start , %o0 ! First address of BSS
1014 set end , %o1 ! Last address of BSS
1015 add %o0, 0x1, %o0
10161:
1017 stb %g0, [%o0]
1018 subcc %o0, %o1, %g0
1019 bl 1b
1020 add %o0, 0x1, %o0
1021
1022 /* Initialize the uwinmask value for init task just in case.
1023 * But first make current_set[boot_cpu_id] point to something useful.
1024 */
1025 set init_thread_union, %g6
1026 set current_set, %g2
1027#ifdef CONFIG_SMP
1028 sethi %hi(boot_cpu_id4), %g3
1029 ldub [%g3 + %lo(boot_cpu_id4)], %g3
1030 st %g6, [%g2]
1031 add %g2, %g3, %g2
1032#endif
1033 st %g6, [%g2]
1034
1035 st %g0, [%g6 + TI_UWINMASK]
1036
1037/* Compute NWINDOWS and stash it away. Now uses %wim trick explained
1038 * in the V8 manual. Ok, this method seems to work, Sparc is cool...
1039 * No, it doesn't work, have to play the save/readCWP/restore trick.
1040 */
1041
1042 wr %g0, 0x0, %wim ! so we do not get a trap
1043 WRITE_PAUSE
1044
1045 save
1046
1047 rd %psr, %g3
1048
1049 restore
1050
1051 and %g3, 0x1f, %g3
1052 add %g3, 0x1, %g3
1053
1054 mov 2, %g1
1055 wr %g1, 0x0, %wim ! make window 1 invalid
1056 WRITE_PAUSE
1057
1058 cmp %g3, 0x7
1059 bne 2f
1060 nop
1061
1062 /* Adjust our window handling routines to
1063 * do things correctly on 7 window Sparcs.
1064 */
1065
1066#define PATCH_INSN(src, dest) \
1067 set src, %g5; \
1068 set dest, %g2; \
1069 ld [%g5], %g4; \
1070 st %g4, [%g2];
1071
1072 /* Patch for window spills... */
1073 PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
1074 PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
1075 PATCH_INSN(spnwin_patch3_7win, spnwin_patch3)
1076
1077 /* Patch for window fills... */
1078 PATCH_INSN(fnwin_patch1_7win, fnwin_patch1)
1079 PATCH_INSN(fnwin_patch2_7win, fnwin_patch2)
1080
1081 /* Patch for trap entry setup... */
1082 PATCH_INSN(tsetup_7win_patch1, tsetup_patch1)
1083 PATCH_INSN(tsetup_7win_patch2, tsetup_patch2)
1084 PATCH_INSN(tsetup_7win_patch3, tsetup_patch3)
1085 PATCH_INSN(tsetup_7win_patch4, tsetup_patch4)
1086 PATCH_INSN(tsetup_7win_patch5, tsetup_patch5)
1087 PATCH_INSN(tsetup_7win_patch6, tsetup_patch6)
1088
1089 /* Patch for returning from traps... */
1090 PATCH_INSN(rtrap_7win_patch1, rtrap_patch1)
1091 PATCH_INSN(rtrap_7win_patch2, rtrap_patch2)
1092 PATCH_INSN(rtrap_7win_patch3, rtrap_patch3)
1093 PATCH_INSN(rtrap_7win_patch4, rtrap_patch4)
1094 PATCH_INSN(rtrap_7win_patch5, rtrap_patch5)
1095
1096 /* Patch for killing user windows from the register file. */
1097 PATCH_INSN(kuw_patch1_7win, kuw_patch1)
1098
1099 /* Now patch the kernel window flush sequences.
1100 * This saves 2 traps on every switch and fork.
1101 */
1102 set 0x01000000, %g4
1103 set flush_patch_one, %g5
1104 st %g4, [%g5 + 0x18]
1105 st %g4, [%g5 + 0x1c]
1106 set flush_patch_two, %g5
1107 st %g4, [%g5 + 0x18]
1108 st %g4, [%g5 + 0x1c]
1109 set flush_patch_three, %g5
1110 st %g4, [%g5 + 0x18]
1111 st %g4, [%g5 + 0x1c]
1112 set flush_patch_four, %g5
1113 st %g4, [%g5 + 0x18]
1114 st %g4, [%g5 + 0x1c]
1115 set flush_patch_exception, %g5
1116 st %g4, [%g5 + 0x18]
1117 st %g4, [%g5 + 0x1c]
1118 set flush_patch_switch, %g5
1119 st %g4, [%g5 + 0x18]
1120 st %g4, [%g5 + 0x1c]
1121
11222:
1123 sethi %hi(nwindows), %g4
1124 st %g3, [%g4 + %lo(nwindows)] ! store final value
1125 sub %g3, 0x1, %g3
1126 sethi %hi(nwindowsm1), %g4
1127 st %g3, [%g4 + %lo(nwindowsm1)]
1128
1129 /* Here we go, start using Linux's trap table... */
1130 set trapbase, %g3
1131 wr %g3, 0x0, %tbr
1132 WRITE_PAUSE
1133
1134 /* Finally, turn on traps so that we can call c-code. */
1135 rd %psr, %g3
1136 wr %g3, 0x0, %psr
1137 WRITE_PAUSE
1138
1139 wr %g3, PSR_ET, %psr
1140 WRITE_PAUSE
1141
1142 /* First we call prom_init() to set up PROMLIB, then
1143 * off to start_kernel().
1144 */
1145
1146 sethi %hi(prom_vector_p), %g5
1147 ld [%g5 + %lo(prom_vector_p)], %o0
1148 call prom_init
1149 nop
1150
1151 call start_kernel
1152 nop
1153
1154 /* We should not get here. */
1155 call halt_me
1156 nop
1157
1158sun4_init:
1159#ifdef CONFIG_SUN4
1160/* There, happy now Adrian? */
1161 set cputypval, %o2 ! Let everyone know we
1162 set ' ', %o0 ! are a "sun4 " architecture
1163 stb %o0, [%o2 + 0x4]
1164
1165 b got_prop
1166 nop
1167#else
1168 sethi %hi(SUN4_PROM_VECTOR+0x84), %o1
1169 ld [%o1 + %lo(SUN4_PROM_VECTOR+0x84)], %o1
1170 set sun4_notsup, %o0
1171 call %o1 /* printf */
1172 nop
1173 sethi %hi(SUN4_PROM_VECTOR+0xc4), %o1
1174 ld [%o1 + %lo(SUN4_PROM_VECTOR+0xc4)], %o1
1175 call %o1 /* exittomon */
1176 nop
11771: ba 1b ! Cannot exit into KMON
1178 nop
1179#endif
1180no_sun4e_here:
1181 ld [%g7 + 0x68], %o1
1182 set sun4e_notsup, %o0
1183 call %o1
1184 nop
1185 b halt_me
1186 nop
1187
1188 __INITDATA
1189
1190sun4u_1:
1191 .asciz "finddevice"
1192 .align 4
1193sun4u_2:
1194 .asciz "/chosen"
1195 .align 4
1196sun4u_3:
1197 .asciz "getprop"
1198 .align 4
1199sun4u_4:
1200 .asciz "stdout"
1201 .align 4
1202sun4u_5:
1203 .asciz "write"
1204 .align 4
1205sun4u_6:
1206 .asciz "\n\rOn sun4u you have to use UltraLinux (64bit) kernel\n\rand not a 32bit sun4[cdem] version\n\r\n\r"
1207sun4u_6e:
1208 .align 4
1209sun4u_7:
1210 .asciz "exit"
1211 .align 8
1212sun4u_a1:
1213 .word 0, sun4u_1, 0, 1, 0, 1, 0, sun4u_2, 0
1214sun4u_r1:
1215 .word 0
1216sun4u_a2:
1217 .word 0, sun4u_3, 0, 4, 0, 1, 0
1218sun4u_i2:
1219 .word 0, 0, sun4u_4, 0, sun4u_1, 0, 8, 0
1220sun4u_r2:
1221 .word 0
1222sun4u_a3:
1223 .word 0, sun4u_5, 0, 3, 0, 1, 0
1224sun4u_i3:
1225 .word 0, 0, sun4u_6, 0, sun4u_6e - sun4u_6 - 1, 0
1226sun4u_r3:
1227 .word 0
1228sun4u_a4:
1229 .word 0, sun4u_7, 0, 0, 0, 0
1230sun4u_r4:
1231
1232 __INIT
1233no_sun4u_here:
1234 set sun4u_a1, %o0
1235 set current_pc, %l2
1236 cmp %l2, %g3
1237 be 1f
1238 mov %o4, %l0
1239 sub %g3, %l2, %l6
1240 add %o0, %l6, %o0
1241 mov %o0, %l4
1242 mov sun4u_r4 - sun4u_a1, %l3
1243 ld [%l4], %l5
12442:
1245 add %l4, 4, %l4
1246 cmp %l5, %l2
1247 add %l5, %l6, %l5
1248 bgeu,a 3f
1249 st %l5, [%l4 - 4]
12503:
1251 subcc %l3, 4, %l3
1252 bne 2b
1253 ld [%l4], %l5
12541:
1255 call %l0
1256 mov %o0, %l1
1257
1258 ld [%l1 + (sun4u_r1 - sun4u_a1)], %o1
1259 add %l1, (sun4u_a2 - sun4u_a1), %o0
1260 call %l0
1261 st %o1, [%o0 + (sun4u_i2 - sun4u_a2)]
1262
1263 ld [%l1 + (sun4u_1 - sun4u_a1)], %o1
1264 add %l1, (sun4u_a3 - sun4u_a1), %o0
1265 call %l0
1266 st %o1, [%o0 + (sun4u_i3 - sun4u_a3)]
1267
1268 call %l0
1269 add %l1, (sun4u_a4 - sun4u_a1), %o0
1270
1271 /* Not reached */
1272halt_me:
1273 ld [%g7 + 0x74], %o0
1274 call %o0 ! Get us out of here...
1275 nop ! Apparently Solaris is better.
1276
1277/* Ok, now we continue in the .data/.text sections */
1278
1279 .data
1280 .align 4
1281
1282/*
1283 * Fill up the prom vector, note in particular the kind first element,
1284 * no joke. I don't need all of them in here as the entire prom vector
1285 * gets initialized in c-code so all routines can use it.
1286 */
1287
1288 .globl prom_vector_p
1289prom_vector_p:
1290 .word 0
1291
1292/* We calculate the following at boot time, window fills/spills and trap entry
1293 * code uses these to keep track of the register windows.
1294 */
1295
1296 .align 4
1297 .globl nwindows
1298 .globl nwindowsm1
1299nwindows:
1300 .word 8
1301nwindowsm1:
1302 .word 7
1303
1304/* Boot time debugger vector value. We need this later on. */
1305
1306 .align 4
1307 .globl linux_dbvec
1308linux_dbvec:
1309 .word 0
1310 .word 0
1311
1312 .align 8
1313
1314 .globl lvl14_save
1315lvl14_save:
1316 .word 0
1317 .word 0
1318 .word 0
1319 .word 0
1320 .word t_irq14
1321
1322 .section ".fixup",#alloc,#execinstr
1323 .globl __ret_efault
1324__ret_efault:
1325 ret
1326 restore %g0, -EFAULT, %o0
diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c
new file mode 100644
index 000000000000..2e1b0f6e99d4
--- /dev/null
+++ b/arch/sparc/kernel/idprom.c
@@ -0,0 +1,108 @@
1/* $Id: idprom.c,v 1.24 1999/08/31 06:54:20 davem Exp $
2 * idprom.c: Routines to load the idprom into kernel addresses and
3 * interpret the data contained within.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/init.h>
12
13#include <asm/oplib.h>
14#include <asm/idprom.h>
15#include <asm/machines.h> /* Fun with Sun released architectures. */
16#ifdef CONFIG_SUN4
17#include <asm/sun4paddr.h>
18extern void sun4setup(void);
19#endif
20
21struct idprom *idprom;
22static struct idprom idprom_buffer;
23
24/* Here is the master table of Sun machines which use some implementation
25 * of the Sparc CPU and have a meaningful IDPROM machtype value that we
26 * know about. See asm-sparc/machines.h for empirical constants.
27 */
28struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
29/* First, Sun4's */
30{ "Sun 4/100 Series", (SM_SUN4 | SM_4_110) },
31{ "Sun 4/200 Series", (SM_SUN4 | SM_4_260) },
32{ "Sun 4/300 Series", (SM_SUN4 | SM_4_330) },
33{ "Sun 4/400 Series", (SM_SUN4 | SM_4_470) },
34/* Now, Sun4c's */
35{ "Sun4c SparcStation 1", (SM_SUN4C | SM_4C_SS1) },
36{ "Sun4c SparcStation IPC", (SM_SUN4C | SM_4C_IPC) },
37{ "Sun4c SparcStation 1+", (SM_SUN4C | SM_4C_SS1PLUS) },
38{ "Sun4c SparcStation SLC", (SM_SUN4C | SM_4C_SLC) },
39{ "Sun4c SparcStation 2", (SM_SUN4C | SM_4C_SS2) },
40{ "Sun4c SparcStation ELC", (SM_SUN4C | SM_4C_ELC) },
41{ "Sun4c SparcStation IPX", (SM_SUN4C | SM_4C_IPX) },
42/* Finally, early Sun4m's */
43{ "Sun4m SparcSystem600", (SM_SUN4M | SM_4M_SS60) },
44{ "Sun4m SparcStation10/20", (SM_SUN4M | SM_4M_SS50) },
45{ "Sun4m SparcStation5", (SM_SUN4M | SM_4M_SS40) },
46/* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */
47{ "Sun4M OBP based system", (SM_SUN4M_OBP | 0x0) } };
48
49static void __init display_system_type(unsigned char machtype)
50{
51 char sysname[128];
52 register int i;
53
54 for (i = 0; i < NUM_SUN_MACHINES; i++) {
55 if(Sun_Machines[i].id_machtype == machtype) {
56 if (machtype != (SM_SUN4M_OBP | 0x00) ||
57 prom_getproperty(prom_root_node, "banner-name",
58 sysname, sizeof(sysname)) <= 0)
59 printk("TYPE: %s\n", Sun_Machines[i].name);
60 else
61 printk("TYPE: %s\n", sysname);
62 return;
63 }
64 }
65
66 prom_printf("IDPROM: Bogus id_machtype value, 0x%x\n", machtype);
67 prom_halt();
68}
69
70/* Calculate the IDPROM checksum (xor of the data bytes). */
71static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
72{
73 unsigned char cksum, i, *ptr = (unsigned char *)idprom;
74
75 for (i = cksum = 0; i <= 0x0E; i++)
76 cksum ^= *ptr++;
77
78 return cksum;
79}
80
81/* Create a local IDPROM copy, verify integrity, and display information. */
82void __init idprom_init(void)
83{
84 prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
85
86 idprom = &idprom_buffer;
87
88 if (idprom->id_format != 0x01) {
89 prom_printf("IDPROM: Unknown format type!\n");
90 prom_halt();
91 }
92
93 if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
94 prom_printf("IDPROM: Checksum failure (nvram=%x, calc=%x)!\n",
95 idprom->id_cksum, calc_idprom_cksum(idprom));
96 prom_halt();
97 }
98
99 display_system_type(idprom->id_machtype);
100
101 printk("Ethernet address: %x:%x:%x:%x:%x:%x\n",
102 idprom->id_ethaddr[0], idprom->id_ethaddr[1],
103 idprom->id_ethaddr[2], idprom->id_ethaddr[3],
104 idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
105#ifdef CONFIG_SUN4
106 sun4setup();
107#endif
108}
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
new file mode 100644
index 000000000000..fc31de66b1c2
--- /dev/null
+++ b/arch/sparc/kernel/init_task.c
@@ -0,0 +1,28 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init_task.h>
5#include <linux/mqueue.h>
6
7#include <asm/pgtable.h>
8#include <asm/uaccess.h>
9
10static struct fs_struct init_fs = INIT_FS;
11static struct files_struct init_files = INIT_FILES;
12static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
13static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
14struct mm_struct init_mm = INIT_MM(init_mm);
15struct task_struct init_task = INIT_TASK(init_task);
16
17EXPORT_SYMBOL(init_mm);
18EXPORT_SYMBOL(init_task);
19
20/* .text section in head.S is aligned at 8k boundary and this gets linked
21 * right after that so that the init_thread_union is aligned properly as well.
22 * If this is not aligned on a 8k boundry, then you should change code
23 * in etrap.S which assumes it.
24 */
25union thread_union init_thread_union
26 __attribute__((section (".text\"\n\t#")))
27 __attribute__((aligned (THREAD_SIZE)))
28 = { INIT_THREAD_INFO(init_task) };
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
new file mode 100644
index 000000000000..d0f2bd227c4c
--- /dev/null
+++ b/arch/sparc/kernel/ioport.c
@@ -0,0 +1,731 @@
1/* $Id: ioport.c,v 1.45 2001/10/30 04:54:21 davem Exp $
2 * ioport.c: Simple io mapping allocator.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 *
7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
8 *
9 * 2000/01/29
10 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
11 * things are ok.
12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
13 * pointer into the big page mapping
14 * <rth> zait: so what?
15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
16 * <zaitcev> Hmm
17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
18 * So far so good.
19 * <zaitcev> Now, driver calls pci_free_consistent(with result of
20 * remap_it_my_way()).
21 * <zaitcev> How do you find the address to pass to free_pages()?
22 * <rth> zait: walk the page tables? It's only two or three level after all.
23 * <rth> zait: you have to walk them anyway to remove the mapping.
24 * <zaitcev> Hmm
25 * <zaitcev> Sounds reasonable
26 */
27
28#include <linux/config.h>
29#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/types.h>
33#include <linux/ioport.h>
34#include <linux/mm.h>
35#include <linux/slab.h>
36#include <linux/pci.h> /* struct pci_dev */
37#include <linux/proc_fs.h>
38
39#include <asm/io.h>
40#include <asm/vaddrs.h>
41#include <asm/oplib.h>
42#include <asm/page.h>
43#include <asm/pgalloc.h>
44#include <asm/dma.h>
45
46#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
47
48struct resource *_sparc_find_resource(struct resource *r, unsigned long);
49
50static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
51static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
52 unsigned long size, char *name);
53static void _sparc_free_io(struct resource *res);
54
55/* This points to the next to use virtual memory for DVMA mappings */
56static struct resource _sparc_dvma = {
57 .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
58};
59/* This points to the start of I/O mappings, cluable from outside. */
60/*ext*/ struct resource sparc_iomap = {
61 .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
62};
63
64/*
65 * Our mini-allocator...
66 * Boy this is gross! We need it because we must map I/O for
67 * timers and interrupt controller before the kmalloc is available.
68 */
69
70#define XNMLN 15
71#define XNRES 10 /* SS-10 uses 8 */
72
73struct xresource {
74 struct resource xres; /* Must be first */
75 int xflag; /* 1 == used */
76 char xname[XNMLN+1];
77};
78
79static struct xresource xresv[XNRES];
80
81static struct xresource *xres_alloc(void) {
82 struct xresource *xrp;
83 int n;
84
85 xrp = xresv;
86 for (n = 0; n < XNRES; n++) {
87 if (xrp->xflag == 0) {
88 xrp->xflag = 1;
89 return xrp;
90 }
91 xrp++;
92 }
93 return NULL;
94}
95
96static void xres_free(struct xresource *xrp) {
97 xrp->xflag = 0;
98}
99
100/*
101 * These are typically used in PCI drivers
102 * which are trying to be cross-platform.
103 *
104 * Bus type is always zero on IIep.
105 */
106void __iomem *ioremap(unsigned long offset, unsigned long size)
107{
108 char name[14];
109
110 sprintf(name, "phys_%08x", (u32)offset);
111 return _sparc_alloc_io(0, offset, size, name);
112}
113
114/*
115 * Comlimentary to ioremap().
116 */
117void iounmap(volatile void __iomem *virtual)
118{
119 unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
120 struct resource *res;
121
122 if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
123 printk("free_io/iounmap: cannot free %lx\n", vaddr);
124 return;
125 }
126 _sparc_free_io(res);
127
128 if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
129 xres_free((struct xresource *)res);
130 } else {
131 kfree(res);
132 }
133}
134
135/*
136 */
137void __iomem *sbus_ioremap(struct resource *phyres, unsigned long offset,
138 unsigned long size, char *name)
139{
140 return _sparc_alloc_io(phyres->flags & 0xF,
141 phyres->start + offset, size, name);
142}
143
144/*
145 */
146void sbus_iounmap(volatile void __iomem *addr, unsigned long size)
147{
148 iounmap(addr);
149}
150
151/*
152 * Meat of mapping
153 */
154static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
155 unsigned long size, char *name)
156{
157 static int printed_full;
158 struct xresource *xres;
159 struct resource *res;
160 char *tack;
161 int tlen;
162 void __iomem *va; /* P3 diag */
163
164 if (name == NULL) name = "???";
165
166 if ((xres = xres_alloc()) != 0) {
167 tack = xres->xname;
168 res = &xres->xres;
169 } else {
170 if (!printed_full) {
171 printk("ioremap: done with statics, switching to malloc\n");
172 printed_full = 1;
173 }
174 tlen = strlen(name);
175 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
176 if (tack == NULL) return NULL;
177 memset(tack, 0, sizeof(struct resource));
178 res = (struct resource *) tack;
179 tack += sizeof (struct resource);
180 }
181
182 strlcpy(tack, name, XNMLN+1);
183 res->name = tack;
184
185 va = _sparc_ioremap(res, busno, phys, size);
186 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
187 return va;
188}
189
190/*
191 */
192static void __iomem *
193_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
194{
195 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
196
197 if (allocate_resource(&sparc_iomap, res,
198 (offset + sz + PAGE_SIZE-1) & PAGE_MASK,
199 sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
200 /* Usually we cannot see printks in this case. */
201 prom_printf("alloc_io_res(%s): cannot occupy\n",
202 (res->name != NULL)? res->name: "???");
203 prom_halt();
204 }
205
206 pa &= PAGE_MASK;
207 sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
208
209 return (void __iomem *) (res->start + offset);
210}
211
212/*
213 * Comlimentary to _sparc_ioremap().
214 */
215static void _sparc_free_io(struct resource *res)
216{
217 unsigned long plen;
218
219 plen = res->end - res->start + 1;
220 if ((plen & (PAGE_SIZE-1)) != 0) BUG();
221 sparc_unmapiorange(res->start, plen);
222 release_resource(res);
223}
224
225#ifdef CONFIG_SBUS
226
227void sbus_set_sbus64(struct sbus_dev *sdev, int x) {
228 printk("sbus_set_sbus64: unsupported\n");
229}
230
231/*
232 * Allocate a chunk of memory suitable for DMA.
233 * Typically devices use them for control blocks.
234 * CPU may access them without any explicit flushing.
235 *
236 * XXX Some clever people know that sdev is not used and supply NULL. Watch.
237 */
238void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
239{
240 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
241 unsigned long va;
242 struct resource *res;
243 int order;
244
245 /* XXX why are some lenghts signed, others unsigned? */
246 if (len <= 0) {
247 return NULL;
248 }
249 /* XXX So what is maxphys for us and how do drivers know it? */
250 if (len > 256*1024) { /* __get_free_pages() limit */
251 return NULL;
252 }
253
254 order = get_order(len_total);
255 if ((va = __get_free_pages(GFP_KERNEL, order)) == 0)
256 goto err_nopages;
257
258 if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
259 goto err_nomem;
260 memset((char*)res, 0, sizeof(struct resource));
261
262 if (allocate_resource(&_sparc_dvma, res, len_total,
263 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
264 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
265 goto err_nova;
266 }
267 mmu_inval_dma_area(va, len_total);
268 // XXX The mmu_map_dma_area does this for us below, see comments.
269 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
270 /*
271 * XXX That's where sdev would be used. Currently we load
272 * all iommu tables with the same translations.
273 */
274 if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0)
275 goto err_noiommu;
276
277 return (void *)res->start;
278
279err_noiommu:
280 release_resource(res);
281err_nova:
282 free_pages(va, order);
283err_nomem:
284 kfree(res);
285err_nopages:
286 return NULL;
287}
288
289void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
290{
291 struct resource *res;
292 struct page *pgv;
293
294 if ((res = _sparc_find_resource(&_sparc_dvma,
295 (unsigned long)p)) == NULL) {
296 printk("sbus_free_consistent: cannot free %p\n", p);
297 return;
298 }
299
300 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
301 printk("sbus_free_consistent: unaligned va %p\n", p);
302 return;
303 }
304
305 n = (n + PAGE_SIZE-1) & PAGE_MASK;
306 if ((res->end-res->start)+1 != n) {
307 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
308 (long)((res->end-res->start)+1), n);
309 return;
310 }
311
312 release_resource(res);
313 kfree(res);
314
315 /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
316 pgv = mmu_translate_dvma(ba);
317 mmu_unmap_dma_area(ba, n);
318
319 __free_pages(pgv, get_order(n));
320}
321
322/*
323 * Map a chunk of memory so that devices can see it.
324 * CPU view of this memory may be inconsistent with
325 * a device view and explicit flushing is necessary.
326 */
327dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction)
328{
329 /* XXX why are some lenghts signed, others unsigned? */
330 if (len <= 0) {
331 return 0;
332 }
333 /* XXX So what is maxphys for us and how do drivers know it? */
334 if (len > 256*1024) { /* __get_free_pages() limit */
335 return 0;
336 }
337 return mmu_get_scsi_one(va, len, sdev->bus);
338}
339
340void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t ba, size_t n, int direction)
341{
342 mmu_release_scsi_one(ba, n, sdev->bus);
343}
344
345int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
346{
347 mmu_get_scsi_sgl(sg, n, sdev->bus);
348
349 /*
350 * XXX sparc64 can return a partial length here. sun4c should do this
351 * but it currently panics if it can't fulfill the request - Anton
352 */
353 return n;
354}
355
356void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
357{
358 mmu_release_scsi_sgl(sg, n, sdev->bus);
359}
360
361/*
362 */
363void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
364{
365#if 0
366 unsigned long va;
367 struct resource *res;
368
369 /* We do not need the resource, just print a message if invalid. */
370 res = _sparc_find_resource(&_sparc_dvma, ba);
371 if (res == NULL)
372 panic("sbus_dma_sync_single: 0x%x\n", ba);
373
374 va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */
375 /*
376 * XXX This bogosity will be fixed with the iommu rewrite coming soon
377 * to a kernel near you. - Anton
378 */
379 /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */
380#endif
381}
382
383void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
384{
385#if 0
386 unsigned long va;
387 struct resource *res;
388
389 /* We do not need the resource, just print a message if invalid. */
390 res = _sparc_find_resource(&_sparc_dvma, ba);
391 if (res == NULL)
392 panic("sbus_dma_sync_single: 0x%x\n", ba);
393
394 va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */
395 /*
396 * XXX This bogosity will be fixed with the iommu rewrite coming soon
397 * to a kernel near you. - Anton
398 */
399 /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */
400#endif
401}
402
403void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
404{
405 printk("sbus_dma_sync_sg_for_cpu: not implemented yet\n");
406}
407
408void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
409{
410 printk("sbus_dma_sync_sg_for_device: not implemented yet\n");
411}
412#endif /* CONFIG_SBUS */
413
414#ifdef CONFIG_PCI
415
416/* Allocate and map kernel buffer using consistent mode DMA for a device.
417 * hwdev should be valid struct pci_dev pointer for PCI devices.
418 */
419void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
420{
421 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
422 unsigned long va;
423 struct resource *res;
424 int order;
425
426 if (len == 0) {
427 return NULL;
428 }
429 if (len > 256*1024) { /* __get_free_pages() limit */
430 return NULL;
431 }
432
433 order = get_order(len_total);
434 va = __get_free_pages(GFP_KERNEL, order);
435 if (va == 0) {
436 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
437 return NULL;
438 }
439
440 if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
441 free_pages(va, order);
442 printk("pci_alloc_consistent: no core\n");
443 return NULL;
444 }
445 memset((char*)res, 0, sizeof(struct resource));
446
447 if (allocate_resource(&_sparc_dvma, res, len_total,
448 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
449 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
450 free_pages(va, order);
451 kfree(res);
452 return NULL;
453 }
454 mmu_inval_dma_area(va, len_total);
455#if 0
456/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
457 (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
458#endif
459 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
460
461 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
462 return (void *) res->start;
463}
464
465/* Free and unmap a consistent DMA buffer.
466 * cpu_addr is what was returned from pci_alloc_consistent,
467 * size must be the same as what as passed into pci_alloc_consistent,
468 * and likewise dma_addr must be the same as what *dma_addrp was set to.
469 *
470 * References to the memory and mappings assosciated with cpu_addr/dma_addr
471 * past this call are illegal.
472 */
473void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
474{
475 struct resource *res;
476 unsigned long pgp;
477
478 if ((res = _sparc_find_resource(&_sparc_dvma,
479 (unsigned long)p)) == NULL) {
480 printk("pci_free_consistent: cannot free %p\n", p);
481 return;
482 }
483
484 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
485 printk("pci_free_consistent: unaligned va %p\n", p);
486 return;
487 }
488
489 n = (n + PAGE_SIZE-1) & PAGE_MASK;
490 if ((res->end-res->start)+1 != n) {
491 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
492 (long)((res->end-res->start)+1), (long)n);
493 return;
494 }
495
496 pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
497 mmu_inval_dma_area(pgp, n);
498 sparc_unmapiorange((unsigned long)p, n);
499
500 release_resource(res);
501 kfree(res);
502
503 free_pages(pgp, get_order(n));
504}
505
506/* Map a single buffer of the indicated size for DMA in streaming mode.
507 * The 32-bit bus address to use is returned.
508 *
509 * Once the device is given the dma address, the device owns this memory
510 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
511 */
512dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
513 int direction)
514{
515 if (direction == PCI_DMA_NONE)
516 BUG();
517 /* IIep is write-through, not flushing. */
518 return virt_to_phys(ptr);
519}
520
521/* Unmap a single streaming mode DMA translation. The dma_addr and size
522 * must match what was provided for in a previous pci_map_single call. All
523 * other usages are undefined.
524 *
525 * After this call, reads by the cpu to the buffer are guaranteed to see
526 * whatever the device wrote there.
527 */
528void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
529 int direction)
530{
531 if (direction == PCI_DMA_NONE)
532 BUG();
533 if (direction != PCI_DMA_TODEVICE) {
534 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
535 (size + PAGE_SIZE-1) & PAGE_MASK);
536 }
537}
538
539/*
540 * Same as pci_map_single, but with pages.
541 */
542dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
543 unsigned long offset, size_t size, int direction)
544{
545 if (direction == PCI_DMA_NONE)
546 BUG();
547 /* IIep is write-through, not flushing. */
548 return page_to_phys(page) + offset;
549}
550
551void pci_unmap_page(struct pci_dev *hwdev,
552 dma_addr_t dma_address, size_t size, int direction)
553{
554 if (direction == PCI_DMA_NONE)
555 BUG();
556 /* mmu_inval_dma_area XXX */
557}
558
559/* Map a set of buffers described by scatterlist in streaming
560 * mode for DMA. This is the scather-gather version of the
561 * above pci_map_single interface. Here the scatter gather list
562 * elements are each tagged with the appropriate dma address
563 * and length. They are obtained via sg_dma_{address,length}(SG).
564 *
565 * NOTE: An implementation may be able to use a smaller number of
566 * DMA address/length pairs than there are SG table elements.
567 * (for example via virtual mapping capabilities)
568 * The routine returns the number of addr/length pairs actually
569 * used, at most nents.
570 *
571 * Device ownership issues as mentioned above for pci_map_single are
572 * the same here.
573 */
574int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
575 int direction)
576{
577 int n;
578
579 if (direction == PCI_DMA_NONE)
580 BUG();
581 /* IIep is write-through, not flushing. */
582 for (n = 0; n < nents; n++) {
583 if (page_address(sg->page) == NULL) BUG();
584 sg->dvma_address = virt_to_phys(page_address(sg->page));
585 sg->dvma_length = sg->length;
586 sg++;
587 }
588 return nents;
589}
590
591/* Unmap a set of streaming mode DMA translations.
592 * Again, cpu read rules concerning calls here are the same as for
593 * pci_unmap_single() above.
594 */
595void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
596 int direction)
597{
598 int n;
599
600 if (direction == PCI_DMA_NONE)
601 BUG();
602 if (direction != PCI_DMA_TODEVICE) {
603 for (n = 0; n < nents; n++) {
604 if (page_address(sg->page) == NULL) BUG();
605 mmu_inval_dma_area(
606 (unsigned long) page_address(sg->page),
607 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
608 sg++;
609 }
610 }
611}
612
613/* Make physical memory consistent for a single
614 * streaming mode DMA translation before or after a transfer.
615 *
616 * If you perform a pci_map_single() but wish to interrogate the
617 * buffer using the cpu, yet do not wish to teardown the PCI dma
618 * mapping, you must call this function before doing so. At the
619 * next point you give the PCI dma address back to the card, you
620 * must first perform a pci_dma_sync_for_device, and then the
621 * device again owns the buffer.
622 */
623void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
624{
625 if (direction == PCI_DMA_NONE)
626 BUG();
627 if (direction != PCI_DMA_TODEVICE) {
628 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
629 (size + PAGE_SIZE-1) & PAGE_MASK);
630 }
631}
632
633void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
634{
635 if (direction == PCI_DMA_NONE)
636 BUG();
637 if (direction != PCI_DMA_TODEVICE) {
638 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
639 (size + PAGE_SIZE-1) & PAGE_MASK);
640 }
641}
642
643/* Make physical memory consistent for a set of streaming
644 * mode DMA translations after a transfer.
645 *
646 * The same as pci_dma_sync_single_* but for a scatter-gather list,
647 * same rules and usage.
648 */
649void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
650{
651 int n;
652
653 if (direction == PCI_DMA_NONE)
654 BUG();
655 if (direction != PCI_DMA_TODEVICE) {
656 for (n = 0; n < nents; n++) {
657 if (page_address(sg->page) == NULL) BUG();
658 mmu_inval_dma_area(
659 (unsigned long) page_address(sg->page),
660 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
661 sg++;
662 }
663 }
664}
665
666void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
667{
668 int n;
669
670 if (direction == PCI_DMA_NONE)
671 BUG();
672 if (direction != PCI_DMA_TODEVICE) {
673 for (n = 0; n < nents; n++) {
674 if (page_address(sg->page) == NULL) BUG();
675 mmu_inval_dma_area(
676 (unsigned long) page_address(sg->page),
677 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
678 sg++;
679 }
680 }
681}
682#endif /* CONFIG_PCI */
683
684#ifdef CONFIG_PROC_FS
685
686static int
687_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
688 void *data)
689{
690 char *p = buf, *e = buf + length;
691 struct resource *r;
692 const char *nm;
693
694 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
695 if (p + 32 >= e) /* Better than nothing */
696 break;
697 if ((nm = r->name) == 0) nm = "???";
698 p += sprintf(p, "%08lx-%08lx: %s\n", r->start, r->end, nm);
699 }
700
701 return p-buf;
702}
703
704#endif /* CONFIG_PROC_FS */
705
706/*
707 * This is a version of find_resource and it belongs to kernel/resource.c.
708 * Until we have agreement with Linus and Martin, it lingers here.
709 *
710 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
711 * This probably warrants some sort of hashing.
712 */
713struct resource *
714_sparc_find_resource(struct resource *root, unsigned long hit)
715{
716 struct resource *tmp;
717
718 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
719 if (tmp->start <= hit && tmp->end >= hit)
720 return tmp;
721 }
722 return NULL;
723}
724
725void register_proc_sparc_ioport(void)
726{
727#ifdef CONFIG_PROC_FS
728 create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap);
729 create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma);
730#endif
731}
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
new file mode 100644
index 000000000000..410b9a72aba9
--- /dev/null
+++ b/arch/sparc/kernel/irq.c
@@ -0,0 +1,614 @@
1/* $Id: irq.c,v 1.114 2001/12/11 04:55:51 davem Exp $
2 * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
3 * Sparc the IRQ's are basically 'cast in stone'
4 * and you are supposed to probe the prom's device
5 * node trees to find out who's got which IRQ.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
9 * Copyright (C) 1995,2002 Pete A. Zaitcev (zaitcev@yahoo.com)
10 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
11 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/ptrace.h>
18#include <linux/errno.h>
19#include <linux/linkage.h>
20#include <linux/kernel_stat.h>
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24#include <linux/slab.h>
25#include <linux/random.h>
26#include <linux/init.h>
27#include <linux/smp.h>
28#include <linux/smp_lock.h>
29#include <linux/delay.h>
30#include <linux/threads.h>
31#include <linux/spinlock.h>
32#include <linux/seq_file.h>
33
34#include <asm/ptrace.h>
35#include <asm/processor.h>
36#include <asm/system.h>
37#include <asm/psr.h>
38#include <asm/smp.h>
39#include <asm/vaddrs.h>
40#include <asm/timer.h>
41#include <asm/openprom.h>
42#include <asm/oplib.h>
43#include <asm/traps.h>
44#include <asm/irq.h>
45#include <asm/io.h>
46#include <asm/pgalloc.h>
47#include <asm/pgtable.h>
48#include <asm/pcic.h>
49#include <asm/cacheflush.h>
50
51#ifdef CONFIG_SMP
52#define SMP_NOP2 "nop; nop;\n\t"
53#define SMP_NOP3 "nop; nop; nop;\n\t"
54#else
55#define SMP_NOP2
56#define SMP_NOP3
57#endif /* SMP */
58unsigned long __local_irq_save(void)
59{
60 unsigned long retval;
61 unsigned long tmp;
62
63 __asm__ __volatile__(
64 "rd %%psr, %0\n\t"
65 SMP_NOP3 /* Sun4m + Cypress + SMP bug */
66 "or %0, %2, %1\n\t"
67 "wr %1, 0, %%psr\n\t"
68 "nop; nop; nop\n"
69 : "=&r" (retval), "=r" (tmp)
70 : "i" (PSR_PIL)
71 : "memory");
72
73 return retval;
74}
75
76void local_irq_enable(void)
77{
78 unsigned long tmp;
79
80 __asm__ __volatile__(
81 "rd %%psr, %0\n\t"
82 SMP_NOP3 /* Sun4m + Cypress + SMP bug */
83 "andn %0, %1, %0\n\t"
84 "wr %0, 0, %%psr\n\t"
85 "nop; nop; nop\n"
86 : "=&r" (tmp)
87 : "i" (PSR_PIL)
88 : "memory");
89}
90
91void local_irq_restore(unsigned long old_psr)
92{
93 unsigned long tmp;
94
95 __asm__ __volatile__(
96 "rd %%psr, %0\n\t"
97 "and %2, %1, %2\n\t"
98 SMP_NOP2 /* Sun4m + Cypress + SMP bug */
99 "andn %0, %1, %0\n\t"
100 "wr %0, %2, %%psr\n\t"
101 "nop; nop; nop\n"
102 : "=&r" (tmp)
103 : "i" (PSR_PIL), "r" (old_psr)
104 : "memory");
105}
106
107EXPORT_SYMBOL(__local_irq_save);
108EXPORT_SYMBOL(local_irq_enable);
109EXPORT_SYMBOL(local_irq_restore);
110
111/*
112 * Dave Redman (djhr@tadpole.co.uk)
113 *
114 * IRQ numbers.. These are no longer restricted to 15..
115 *
116 * this is done to enable SBUS cards and onboard IO to be masked
117 * correctly. using the interrupt level isn't good enough.
118 *
119 * For example:
120 * A device interrupting at sbus level6 and the Floppy both come in
121 * at IRQ11, but enabling and disabling them requires writing to
122 * different bits in the SLAVIO/SEC.
123 *
124 * As a result of these changes sun4m machines could now support
125 * directed CPU interrupts using the existing enable/disable irq code
126 * with tweaks.
127 *
128 */
129
130static void irq_panic(void)
131{
132 extern char *cputypval;
133 prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
134 prom_halt();
135}
136
137void (*sparc_init_timers)(irqreturn_t (*)(int, void *,struct pt_regs *)) =
138 (void (*)(irqreturn_t (*)(int, void *,struct pt_regs *))) irq_panic;
139
140/*
141 * Dave Redman (djhr@tadpole.co.uk)
142 *
143 * There used to be extern calls and hard coded values here.. very sucky!
144 * instead, because some of the devices attach very early, I do something
145 * equally sucky but at least we'll never try to free statically allocated
146 * space or call kmalloc before kmalloc_init :(.
147 *
148 * In fact it's the timer10 that attaches first.. then timer14
149 * then kmalloc_init is called.. then the tty interrupts attach.
150 * hmmm....
151 *
152 */
153#define MAX_STATIC_ALLOC 4
154struct irqaction static_irqaction[MAX_STATIC_ALLOC];
155int static_irq_count;
156
157struct irqaction *irq_action[NR_IRQS] = {
158 [0 ... (NR_IRQS-1)] = NULL
159};
160
161/* Used to protect the IRQ action lists */
162DEFINE_SPINLOCK(irq_action_lock);
163
164int show_interrupts(struct seq_file *p, void *v)
165{
166 int i = *(loff_t *) v;
167 struct irqaction * action;
168 unsigned long flags;
169#ifdef CONFIG_SMP
170 int j;
171#endif
172
173 if (sparc_cpu_model == sun4d) {
174 extern int show_sun4d_interrupts(struct seq_file *, void *);
175
176 return show_sun4d_interrupts(p, v);
177 }
178 spin_lock_irqsave(&irq_action_lock, flags);
179 if (i < NR_IRQS) {
180 action = *(i + irq_action);
181 if (!action)
182 goto out_unlock;
183 seq_printf(p, "%3d: ", i);
184#ifndef CONFIG_SMP
185 seq_printf(p, "%10u ", kstat_irqs(i));
186#else
187 for (j = 0; j < NR_CPUS; j++) {
188 if (cpu_online(j))
189 seq_printf(p, "%10u ",
190 kstat_cpu(cpu_logical_map(j)).irqs[i]);
191 }
192#endif
193 seq_printf(p, " %c %s",
194 (action->flags & SA_INTERRUPT) ? '+' : ' ',
195 action->name);
196 for (action=action->next; action; action = action->next) {
197 seq_printf(p, ",%s %s",
198 (action->flags & SA_INTERRUPT) ? " +" : "",
199 action->name);
200 }
201 seq_putc(p, '\n');
202 }
203out_unlock:
204 spin_unlock_irqrestore(&irq_action_lock, flags);
205 return 0;
206}
207
208void free_irq(unsigned int irq, void *dev_id)
209{
210 struct irqaction * action;
211 struct irqaction * tmp = NULL;
212 unsigned long flags;
213 unsigned int cpu_irq;
214
215 if (sparc_cpu_model == sun4d) {
216 extern void sun4d_free_irq(unsigned int, void *);
217
218 sun4d_free_irq(irq, dev_id);
219 return;
220 }
221 cpu_irq = irq & (NR_IRQS - 1);
222 if (cpu_irq > 14) { /* 14 irq levels on the sparc */
223 printk("Trying to free bogus IRQ %d\n", irq);
224 return;
225 }
226
227 spin_lock_irqsave(&irq_action_lock, flags);
228
229 action = *(cpu_irq + irq_action);
230
231 if (!action->handler) {
232 printk("Trying to free free IRQ%d\n",irq);
233 goto out_unlock;
234 }
235 if (dev_id) {
236 for (; action; action = action->next) {
237 if (action->dev_id == dev_id)
238 break;
239 tmp = action;
240 }
241 if (!action) {
242 printk("Trying to free free shared IRQ%d\n",irq);
243 goto out_unlock;
244 }
245 } else if (action->flags & SA_SHIRQ) {
246 printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
247 goto out_unlock;
248 }
249 if (action->flags & SA_STATIC_ALLOC)
250 {
251 /* This interrupt is marked as specially allocated
252 * so it is a bad idea to free it.
253 */
254 printk("Attempt to free statically allocated IRQ%d (%s)\n",
255 irq, action->name);
256 goto out_unlock;
257 }
258
259 if (action && tmp)
260 tmp->next = action->next;
261 else
262 *(cpu_irq + irq_action) = action->next;
263
264 spin_unlock_irqrestore(&irq_action_lock, flags);
265
266 synchronize_irq(irq);
267
268 spin_lock_irqsave(&irq_action_lock, flags);
269
270 kfree(action);
271
272 if (!(*(cpu_irq + irq_action)))
273 disable_irq(irq);
274
275out_unlock:
276 spin_unlock_irqrestore(&irq_action_lock, flags);
277}
278
279EXPORT_SYMBOL(free_irq);
280
281/*
282 * This is called when we want to synchronize with
283 * interrupts. We may for example tell a device to
284 * stop sending interrupts: but to make sure there
285 * are no interrupts that are executing on another
286 * CPU we need to call this function.
287 */
288#ifdef CONFIG_SMP
289void synchronize_irq(unsigned int irq)
290{
291 printk("synchronize_irq says: implement me!\n");
292 BUG();
293}
294#endif /* SMP */
295
296void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
297{
298 int i;
299 struct irqaction * action;
300 unsigned int cpu_irq;
301
302 cpu_irq = irq & (NR_IRQS - 1);
303 action = *(cpu_irq + irq_action);
304
305 printk("IO device interrupt, irq = %d\n", irq);
306 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
307 regs->npc, regs->u_regs[14]);
308 if (action) {
309 printk("Expecting: ");
310 for (i = 0; i < 16; i++)
311 if (action->handler)
312 printk("[%s:%d:0x%x] ", action->name,
313 (int) i, (unsigned int) action->handler);
314 }
315 printk("AIEEE\n");
316 panic("bogus interrupt received");
317}
318
319void handler_irq(int irq, struct pt_regs * regs)
320{
321 struct irqaction * action;
322 int cpu = smp_processor_id();
323#ifdef CONFIG_SMP
324 extern void smp4m_irq_rotate(int cpu);
325#endif
326
327 irq_enter();
328 disable_pil_irq(irq);
329#ifdef CONFIG_SMP
330 /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */
331 if(irq < 10)
332 smp4m_irq_rotate(cpu);
333#endif
334 action = *(irq + irq_action);
335 kstat_cpu(cpu).irqs[irq]++;
336 do {
337 if (!action || !action->handler)
338 unexpected_irq(irq, NULL, regs);
339 action->handler(irq, action->dev_id, regs);
340 action = action->next;
341 } while (action);
342 enable_pil_irq(irq);
343 irq_exit();
344}
345
346#ifdef CONFIG_BLK_DEV_FD
347extern void floppy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
348
349void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
350{
351 int cpu = smp_processor_id();
352
353 disable_pil_irq(irq);
354 irq_enter();
355 kstat_cpu(cpu).irqs[irq]++;
356 floppy_interrupt(irq, dev_id, regs);
357 irq_exit();
358 enable_pil_irq(irq);
359 // XXX Eek, it's totally changed with preempt_count() and such
360 // if (softirq_pending(cpu))
361 // do_softirq();
362}
363#endif
364
365/* Fast IRQ's on the Sparc can only have one routine attached to them,
366 * thus no sharing possible.
367 */
368int request_fast_irq(unsigned int irq,
369 irqreturn_t (*handler)(int, void *, struct pt_regs *),
370 unsigned long irqflags, const char *devname)
371{
372 struct irqaction *action;
373 unsigned long flags;
374 unsigned int cpu_irq;
375 int ret;
376#ifdef CONFIG_SMP
377 struct tt_entry *trap_table;
378 extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
379#endif
380
381 cpu_irq = irq & (NR_IRQS - 1);
382 if(cpu_irq > 14) {
383 ret = -EINVAL;
384 goto out;
385 }
386 if(!handler) {
387 ret = -EINVAL;
388 goto out;
389 }
390
391 spin_lock_irqsave(&irq_action_lock, flags);
392
393 action = *(cpu_irq + irq_action);
394 if(action) {
395 if(action->flags & SA_SHIRQ)
396 panic("Trying to register fast irq when already shared.\n");
397 if(irqflags & SA_SHIRQ)
398 panic("Trying to register fast irq as shared.\n");
399
400 /* Anyway, someone already owns it so cannot be made fast. */
401 printk("request_fast_irq: Trying to register yet already owned.\n");
402 ret = -EBUSY;
403 goto out_unlock;
404 }
405
406 /* If this is flagged as statically allocated then we use our
407 * private struct which is never freed.
408 */
409 if (irqflags & SA_STATIC_ALLOC) {
410 if (static_irq_count < MAX_STATIC_ALLOC)
411 action = &static_irqaction[static_irq_count++];
412 else
413 printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
414 irq, devname);
415 }
416
417 if (action == NULL)
418 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
419 GFP_ATOMIC);
420
421 if (!action) {
422 ret = -ENOMEM;
423 goto out_unlock;
424 }
425
426 /* Dork with trap table if we get this far. */
427#define INSTANTIATE(table) \
428 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
429 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
430 SPARC_BRANCH((unsigned long) handler, \
431 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
432 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
433 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
434
435 INSTANTIATE(sparc_ttable)
436#ifdef CONFIG_SMP
437 trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
438 trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
439 trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
440#endif
441#undef INSTANTIATE
442 /*
443 * XXX Correct thing whould be to flush only I- and D-cache lines
444 * which contain the handler in question. But as of time of the
445 * writing we have no CPU-neutral interface to fine-grained flushes.
446 */
447 flush_cache_all();
448
449 action->handler = handler;
450 action->flags = irqflags;
451 cpus_clear(action->mask);
452 action->name = devname;
453 action->dev_id = NULL;
454 action->next = NULL;
455
456 *(cpu_irq + irq_action) = action;
457
458 enable_irq(irq);
459
460 ret = 0;
461out_unlock:
462 spin_unlock_irqrestore(&irq_action_lock, flags);
463out:
464 return ret;
465}
466
467int request_irq(unsigned int irq,
468 irqreturn_t (*handler)(int, void *, struct pt_regs *),
469 unsigned long irqflags, const char * devname, void *dev_id)
470{
471 struct irqaction * action, *tmp = NULL;
472 unsigned long flags;
473 unsigned int cpu_irq;
474 int ret;
475
476 if (sparc_cpu_model == sun4d) {
477 extern int sun4d_request_irq(unsigned int,
478 irqreturn_t (*)(int, void *, struct pt_regs *),
479 unsigned long, const char *, void *);
480 return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
481 }
482 cpu_irq = irq & (NR_IRQS - 1);
483 if(cpu_irq > 14) {
484 ret = -EINVAL;
485 goto out;
486 }
487 if (!handler) {
488 ret = -EINVAL;
489 goto out;
490 }
491
492 spin_lock_irqsave(&irq_action_lock, flags);
493
494 action = *(cpu_irq + irq_action);
495 if (action) {
496 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
497 for (tmp = action; tmp->next; tmp = tmp->next);
498 } else {
499 ret = -EBUSY;
500 goto out_unlock;
501 }
502 if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
503 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
504 ret = -EBUSY;
505 goto out_unlock;
506 }
507 action = NULL; /* Or else! */
508 }
509
510 /* If this is flagged as statically allocated then we use our
511 * private struct which is never freed.
512 */
513 if (irqflags & SA_STATIC_ALLOC) {
514 if (static_irq_count < MAX_STATIC_ALLOC)
515 action = &static_irqaction[static_irq_count++];
516 else
517 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
518 }
519
520 if (action == NULL)
521 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
522 GFP_ATOMIC);
523
524 if (!action) {
525 ret = -ENOMEM;
526 goto out_unlock;
527 }
528
529 action->handler = handler;
530 action->flags = irqflags;
531 cpus_clear(action->mask);
532 action->name = devname;
533 action->next = NULL;
534 action->dev_id = dev_id;
535
536 if (tmp)
537 tmp->next = action;
538 else
539 *(cpu_irq + irq_action) = action;
540
541 enable_irq(irq);
542
543 ret = 0;
544out_unlock:
545 spin_unlock_irqrestore(&irq_action_lock, flags);
546out:
547 return ret;
548}
549
550EXPORT_SYMBOL(request_irq);
551
552/* We really don't need these at all on the Sparc. We only have
553 * stubs here because they are exported to modules.
554 */
555unsigned long probe_irq_on(void)
556{
557 return 0;
558}
559
560EXPORT_SYMBOL(probe_irq_on);
561
562int probe_irq_off(unsigned long mask)
563{
564 return 0;
565}
566
567EXPORT_SYMBOL(probe_irq_off);
568
569/* djhr
570 * This could probably be made indirect too and assigned in the CPU
571 * bits of the code. That would be much nicer I think and would also
572 * fit in with the idea of being able to tune your kernel for your machine
573 * by removing unrequired machine and device support.
574 *
575 */
576
577void __init init_IRQ(void)
578{
579 extern void sun4c_init_IRQ( void );
580 extern void sun4m_init_IRQ( void );
581 extern void sun4d_init_IRQ( void );
582
583 switch(sparc_cpu_model) {
584 case sun4c:
585 case sun4:
586 sun4c_init_IRQ();
587 break;
588
589 case sun4m:
590#ifdef CONFIG_PCI
591 pcic_probe();
592 if (pcic_present()) {
593 sun4m_pci_init_IRQ();
594 break;
595 }
596#endif
597 sun4m_init_IRQ();
598 break;
599
600 case sun4d:
601 sun4d_init_IRQ();
602 break;
603
604 default:
605 prom_printf("Cannot initialize IRQ's on this Sun machine...");
606 break;
607 }
608 btfixup();
609}
610
611void init_irq_proc(void)
612{
613 /* For now, nothing... */
614}
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
new file mode 100644
index 000000000000..7931d6f92819
--- /dev/null
+++ b/arch/sparc/kernel/module.c
@@ -0,0 +1,159 @@
1/* Kernel module help for sparc32.
2 *
3 * Copyright (C) 2001 Rusty Russell.
4 * Copyright (C) 2002 David S. Miller.
5 */
6
7#include <linux/moduleloader.h>
8#include <linux/kernel.h>
9#include <linux/elf.h>
10#include <linux/vmalloc.h>
11#include <linux/fs.h>
12#include <linux/string.h>
13
14void *module_alloc(unsigned long size)
15{
16 void *ret;
17
18 /* We handle the zero case fine, unlike vmalloc */
19 if (size == 0)
20 return NULL;
21
22 ret = vmalloc(size);
23 if (!ret)
24 ret = ERR_PTR(-ENOMEM);
25 else
26 memset(ret, 0, size);
27
28 return ret;
29}
30
31/* Free memory returned from module_core_alloc/module_init_alloc */
32void module_free(struct module *mod, void *module_region)
33{
34 vfree(module_region);
35 /* FIXME: If module_region == mod->init_region, trim exception
36 table entries. */
37}
38
39/* Make generic code ignore STT_REGISTER dummy undefined symbols,
40 * and replace references to .func with func as in ppc64's dedotify.
41 */
42int module_frob_arch_sections(Elf_Ehdr *hdr,
43 Elf_Shdr *sechdrs,
44 char *secstrings,
45 struct module *mod)
46{
47 unsigned int symidx;
48 Elf32_Sym *sym;
49 char *strtab;
50 int i;
51
52 for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) {
53 if (symidx == hdr->e_shnum-1) {
54 printk("%s: no symtab found.\n", mod->name);
55 return -ENOEXEC;
56 }
57 }
58 sym = (Elf32_Sym *)sechdrs[symidx].sh_addr;
59 strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
60
61 for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
62 if (sym[i].st_shndx == SHN_UNDEF) {
63 if (ELF32_ST_TYPE(sym[i].st_info) == STT_REGISTER)
64 sym[i].st_shndx = SHN_ABS;
65 else {
66 char *name = strtab + sym[i].st_name;
67 if (name[0] == '.')
68 memmove(name, name+1, strlen(name));
69 }
70 }
71 }
72 return 0;
73}
74
75int apply_relocate(Elf32_Shdr *sechdrs,
76 const char *strtab,
77 unsigned int symindex,
78 unsigned int relsec,
79 struct module *me)
80{
81 printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n",
82 me->name);
83 return -ENOEXEC;
84}
85
86int apply_relocate_add(Elf32_Shdr *sechdrs,
87 const char *strtab,
88 unsigned int symindex,
89 unsigned int relsec,
90 struct module *me)
91{
92 unsigned int i;
93 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
94 Elf32_Sym *sym;
95 u8 *location;
96 u32 *loc32;
97
98 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
99 Elf32_Addr v;
100
101 /* This is where to make the change */
102 location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
103 + rel[i].r_offset;
104 loc32 = (u32 *) location;
105 /* This is the symbol it is referring to. Note that all
106 undefined symbols have been resolved. */
107 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
108 + ELF32_R_SYM(rel[i].r_info);
109 v = sym->st_value + rel[i].r_addend;
110
111 switch (ELF32_R_TYPE(rel[i].r_info)) {
112 case R_SPARC_32:
113 location[0] = v >> 24;
114 location[1] = v >> 16;
115 location[2] = v >> 8;
116 location[3] = v >> 0;
117 break;
118
119 case R_SPARC_WDISP30:
120 v -= (Elf32_Addr) location;
121 *loc32 = (*loc32 & ~0x3fffffff) |
122 ((v >> 2) & 0x3fffffff);
123 break;
124
125 case R_SPARC_WDISP22:
126 v -= (Elf32_Addr) location;
127 *loc32 = (*loc32 & ~0x3fffff) |
128 ((v >> 2) & 0x3fffff);
129 break;
130
131 case R_SPARC_LO10:
132 *loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff);
133 break;
134
135 case R_SPARC_HI22:
136 *loc32 = (*loc32 & ~0x3fffff) |
137 ((v >> 10) & 0x3fffff);
138 break;
139
140 default:
141 printk(KERN_ERR "module %s: Unknown relocation: %x\n",
142 me->name,
143 (int) (ELF32_R_TYPE(rel[i].r_info) & 0xff));
144 return -ENOEXEC;
145 };
146 }
147 return 0;
148}
149
150int module_finalize(const Elf_Ehdr *hdr,
151 const Elf_Shdr *sechdrs,
152 struct module *me)
153{
154 return 0;
155}
156
157void module_arch_cleanup(struct module *mod)
158{
159}
diff --git a/arch/sparc/kernel/muldiv.c b/arch/sparc/kernel/muldiv.c
new file mode 100644
index 000000000000..37b9a4942232
--- /dev/null
+++ b/arch/sparc/kernel/muldiv.c
@@ -0,0 +1,240 @@
1/* $Id: muldiv.c,v 1.5 1997/12/15 20:07:20 ecd Exp $
2 * muldiv.c: Hardware multiply/division illegal instruction trap
3 * for sun4c/sun4 (which do not have those instructions)
4 *
5 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 *
8 * 2004-12-25 Krzysztof Helt (krzysztof.h1@wp.pl)
9 * - fixed registers constrains in inline assembly declarations
10 */
11
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <asm/ptrace.h>
16#include <asm/processor.h>
17#include <asm/system.h>
18#include <asm/uaccess.h>
19
20/* #define DEBUG_MULDIV */
21
22static inline int has_imm13(int insn)
23{
24 return (insn & 0x2000);
25}
26
27static inline int is_foocc(int insn)
28{
29 return (insn & 0x800000);
30}
31
32static inline int sign_extend_imm13(int imm)
33{
34 return imm << 19 >> 19;
35}
36
37static inline void advance(struct pt_regs *regs)
38{
39 regs->pc = regs->npc;
40 regs->npc += 4;
41}
42
43static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
44 unsigned int rd)
45{
46 if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
47 /* Wheee... */
48 __asm__ __volatile__("save %sp, -0x40, %sp\n\t"
49 "save %sp, -0x40, %sp\n\t"
50 "save %sp, -0x40, %sp\n\t"
51 "save %sp, -0x40, %sp\n\t"
52 "save %sp, -0x40, %sp\n\t"
53 "save %sp, -0x40, %sp\n\t"
54 "save %sp, -0x40, %sp\n\t"
55 "restore; restore; restore; restore;\n\t"
56 "restore; restore; restore;\n\t");
57 }
58}
59
60#define fetch_reg(reg, regs) ({ \
61 struct reg_window __user *win; \
62 register unsigned long ret; \
63 \
64 if (!(reg)) ret = 0; \
65 else if ((reg) < 16) { \
66 ret = regs->u_regs[(reg)]; \
67 } else { \
68 /* Ho hum, the slightly complicated case. */ \
69 win = (struct reg_window __user *)regs->u_regs[UREG_FP];\
70 if (get_user (ret, &win->locals[(reg) - 16])) return -1;\
71 } \
72 ret; \
73})
74
75static inline int
76store_reg(unsigned int result, unsigned int reg, struct pt_regs *regs)
77{
78 struct reg_window __user *win;
79
80 if (!reg)
81 return 0;
82 if (reg < 16) {
83 regs->u_regs[reg] = result;
84 return 0;
85 } else {
86 /* need to use put_user() in this case: */
87 win = (struct reg_window __user *) regs->u_regs[UREG_FP];
88 return (put_user(result, &win->locals[reg - 16]));
89 }
90}
91
92extern void handle_hw_divzero (struct pt_regs *regs, unsigned long pc,
93 unsigned long npc, unsigned long psr);
94
95/* Should return 0 if mul/div emulation succeeded and SIGILL should
96 * not be issued.
97 */
98int do_user_muldiv(struct pt_regs *regs, unsigned long pc)
99{
100 unsigned int insn;
101 int inst;
102 unsigned int rs1, rs2, rdv;
103
104 if (!pc)
105 return -1; /* This happens to often, I think */
106 if (get_user (insn, (unsigned int __user *)pc))
107 return -1;
108 if ((insn & 0xc1400000) != 0x80400000)
109 return -1;
110 inst = ((insn >> 19) & 0xf);
111 if ((inst & 0xe) != 10 && (inst & 0xe) != 14)
112 return -1;
113
114 /* Now we know we have to do something with umul, smul, udiv or sdiv */
115 rs1 = (insn >> 14) & 0x1f;
116 rs2 = insn & 0x1f;
117 rdv = (insn >> 25) & 0x1f;
118 if (has_imm13(insn)) {
119 maybe_flush_windows(rs1, 0, rdv);
120 rs2 = sign_extend_imm13(insn);
121 } else {
122 maybe_flush_windows(rs1, rs2, rdv);
123 rs2 = fetch_reg(rs2, regs);
124 }
125 rs1 = fetch_reg(rs1, regs);
126 switch (inst) {
127 case 10: /* umul */
128#ifdef DEBUG_MULDIV
129 printk ("unsigned muldiv: 0x%x * 0x%x = ", rs1, rs2);
130#endif
131 __asm__ __volatile__ ("\n\t"
132 "mov %0, %%o0\n\t"
133 "call .umul\n\t"
134 " mov %1, %%o1\n\t"
135 "mov %%o0, %0\n\t"
136 "mov %%o1, %1\n\t"
137 : "=r" (rs1), "=r" (rs2)
138 : "0" (rs1), "1" (rs2)
139 : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc");
140#ifdef DEBUG_MULDIV
141 printk ("0x%x%08x\n", rs2, rs1);
142#endif
143 if (store_reg(rs1, rdv, regs))
144 return -1;
145 regs->y = rs2;
146 break;
147 case 11: /* smul */
148#ifdef DEBUG_MULDIV
149 printk ("signed muldiv: 0x%x * 0x%x = ", rs1, rs2);
150#endif
151 __asm__ __volatile__ ("\n\t"
152 "mov %0, %%o0\n\t"
153 "call .mul\n\t"
154 " mov %1, %%o1\n\t"
155 "mov %%o0, %0\n\t"
156 "mov %%o1, %1\n\t"
157 : "=r" (rs1), "=r" (rs2)
158 : "0" (rs1), "1" (rs2)
159 : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc");
160#ifdef DEBUG_MULDIV
161 printk ("0x%x%08x\n", rs2, rs1);
162#endif
163 if (store_reg(rs1, rdv, regs))
164 return -1;
165 regs->y = rs2;
166 break;
167 case 14: /* udiv */
168#ifdef DEBUG_MULDIV
169 printk ("unsigned muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2);
170#endif
171 if (!rs2) {
172#ifdef DEBUG_MULDIV
173 printk ("DIVISION BY ZERO\n");
174#endif
175 handle_hw_divzero (regs, pc, regs->npc, regs->psr);
176 return 0;
177 }
178 __asm__ __volatile__ ("\n\t"
179 "mov %2, %%o0\n\t"
180 "mov %0, %%o1\n\t"
181 "mov %%g0, %%o2\n\t"
182 "call __udivdi3\n\t"
183 " mov %1, %%o3\n\t"
184 "mov %%o1, %0\n\t"
185 "mov %%o0, %1\n\t"
186 : "=r" (rs1), "=r" (rs2)
187 : "r" (regs->y), "0" (rs1), "1" (rs2)
188 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
189 "g1", "g2", "g3", "cc");
190#ifdef DEBUG_MULDIV
191 printk ("0x%x\n", rs1);
192#endif
193 if (store_reg(rs1, rdv, regs))
194 return -1;
195 break;
196 case 15: /* sdiv */
197#ifdef DEBUG_MULDIV
198 printk ("signed muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2);
199#endif
200 if (!rs2) {
201#ifdef DEBUG_MULDIV
202 printk ("DIVISION BY ZERO\n");
203#endif
204 handle_hw_divzero (regs, pc, regs->npc, regs->psr);
205 return 0;
206 }
207 __asm__ __volatile__ ("\n\t"
208 "mov %2, %%o0\n\t"
209 "mov %0, %%o1\n\t"
210 "mov %%g0, %%o2\n\t"
211 "call __divdi3\n\t"
212 " mov %1, %%o3\n\t"
213 "mov %%o1, %0\n\t"
214 "mov %%o0, %1\n\t"
215 : "=r" (rs1), "=r" (rs2)
216 : "r" (regs->y), "0" (rs1), "1" (rs2)
217 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
218 "g1", "g2", "g3", "cc");
219#ifdef DEBUG_MULDIV
220 printk ("0x%x\n", rs1);
221#endif
222 if (store_reg(rs1, rdv, regs))
223 return -1;
224 break;
225 }
226 if (is_foocc (insn)) {
227 regs->psr &= ~PSR_ICC;
228 if ((inst & 0xe) == 14) {
229 /* ?div */
230 if (rs2) regs->psr |= PSR_V;
231 }
232 if (!rs1) regs->psr |= PSR_Z;
233 if (((int)rs1) < 0) regs->psr |= PSR_N;
234#ifdef DEBUG_MULDIV
235 printk ("psr muldiv: %08x\n", regs->psr);
236#endif
237 }
238 advance(regs);
239 return 0;
240}
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
new file mode 100644
index 000000000000..597d3ff6ad68
--- /dev/null
+++ b/arch/sparc/kernel/pcic.c
@@ -0,0 +1,1041 @@
1/*
2 * pcic.c: MicroSPARC-IIep PCI controller support
3 *
4 * Copyright (C) 1998 V. Roganov and G. Raiko
5 *
6 * Code is derived from Ultra/PCI PSYCHO controller support, see that
7 * for author info.
8 *
9 * Support for diverse IIep based platforms by Pete Zaitcev.
10 * CP-1200 by Eric Brower.
11 */
12
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <linux/jiffies.h>
20
21#include <asm/ebus.h>
22#include <asm/sbus.h> /* for sanity check... */
23#include <asm/swift.h> /* for cache flushing. */
24#include <asm/io.h>
25
26#include <linux/ctype.h>
27#include <linux/pci.h>
28#include <linux/time.h>
29#include <linux/timex.h>
30#include <linux/interrupt.h>
31
32#include <asm/irq.h>
33#include <asm/oplib.h>
34#include <asm/pcic.h>
35#include <asm/timer.h>
36#include <asm/uaccess.h>
37
38
39unsigned int pcic_pin_to_irq(unsigned int pin, char *name);
40
41/*
42 * I studied different documents and many live PROMs both from 2.30
43 * family and 3.xx versions. I came to the amazing conclusion: there is
44 * absolutely no way to route interrupts in IIep systems relying on
45 * information which PROM presents. We must hardcode interrupt routing
46 * schematics. And this actually sucks. -- zaitcev 1999/05/12
47 *
48 * To find irq for a device we determine which routing map
49 * is in effect or, in other words, on which machine we are running.
50 * We use PROM name for this although other techniques may be used
51 * in special cases (Gleb reports a PROMless IIep based system).
52 * Once we know the map we take device configuration address and
53 * find PCIC pin number where INT line goes. Then we may either program
54 * preferred irq into the PCIC or supply the preexisting irq to the device.
55 */
56struct pcic_ca2irq {
57 unsigned char busno; /* PCI bus number */
58 unsigned char devfn; /* Configuration address */
59 unsigned char pin; /* PCIC external interrupt pin */
60 unsigned char irq; /* Preferred IRQ (mappable in PCIC) */
61 unsigned int force; /* Enforce preferred IRQ */
62};
63
64struct pcic_sn2list {
65 char *sysname;
66 struct pcic_ca2irq *intmap;
67 int mapdim;
68};
69
70/*
71 * JavaEngine-1 apparently has different versions.
72 *
73 * According to communications with Sun folks, for P2 build 501-4628-03:
74 * pin 0 - parallel, audio;
75 * pin 1 - Ethernet;
76 * pin 2 - su;
77 * pin 3 - PS/2 kbd and mouse.
78 *
79 * OEM manual (805-1486):
80 * pin 0: Ethernet
81 * pin 1: All EBus
82 * pin 2: IGA (unused)
83 * pin 3: Not connected
84 * OEM manual says that 501-4628 & 501-4811 are the same thing,
85 * only the latter has NAND flash in place.
86 *
87 * So far unofficial Sun wins over the OEM manual. Poor OEMs...
88 */
89static struct pcic_ca2irq pcic_i_je1a[] = { /* 501-4811-03 */
90 { 0, 0x00, 2, 12, 0 }, /* EBus: hogs all */
91 { 0, 0x01, 1, 6, 1 }, /* Happy Meal */
92 { 0, 0x80, 0, 7, 0 }, /* IGA (unused) */
93};
94
95/* XXX JS-E entry is incomplete - PCI Slot 2 address (pin 7)? */
96static struct pcic_ca2irq pcic_i_jse[] = {
97 { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */
98 { 0, 0x01, 1, 6, 0 }, /* hme */
99 { 0, 0x08, 2, 9, 0 }, /* VGA - we hope not used :) */
100 { 0, 0x10, 6, 8, 0 }, /* PCI INTA# in Slot 1 */
101 { 0, 0x18, 7, 12, 0 }, /* PCI INTA# in Slot 2, shared w. RTC */
102 { 0, 0x38, 4, 9, 0 }, /* All ISA devices. Read 8259. */
103 { 0, 0x80, 5, 11, 0 }, /* EIDE */
104 /* {0,0x88, 0,0,0} - unknown device... PMU? Probably no interrupt. */
105 { 0, 0xA0, 4, 9, 0 }, /* USB */
106 /*
107 * Some pins belong to non-PCI devices, we hardcode them in drivers.
108 * sun4m timers - irq 10, 14
109 * PC style RTC - pin 7, irq 4 ?
110 * Smart card, Parallel - pin 4 shared with USB, ISA
111 * audio - pin 3, irq 5 ?
112 */
113};
114
115/* SPARCengine-6 was the original release name of CP1200.
116 * The documentation differs between the two versions
117 */
118static struct pcic_ca2irq pcic_i_se6[] = {
119 { 0, 0x08, 0, 2, 0 }, /* SCSI */
120 { 0, 0x01, 1, 6, 0 }, /* HME */
121 { 0, 0x00, 3, 13, 0 }, /* EBus */
122};
123
124/*
125 * Krups (courtesy of Varol Kaptan)
126 * No documentation available, but it was easy to guess
127 * because it was very similar to Espresso.
128 *
129 * pin 0 - kbd, mouse, serial;
130 * pin 1 - Ethernet;
131 * pin 2 - igs (we do not use it);
132 * pin 3 - audio;
133 * pin 4,5,6 - unused;
134 * pin 7 - RTC (from P2 onwards as David B. says).
135 */
136static struct pcic_ca2irq pcic_i_jk[] = {
137 { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */
138 { 0, 0x01, 1, 6, 0 }, /* hme */
139};
140
141/*
142 * Several entries in this list may point to the same routing map
143 * as several PROMs may be installed on the same physical board.
144 */
145#define SN2L_INIT(name, map) \
146 { name, map, sizeof(map)/sizeof(struct pcic_ca2irq) }
147
148static struct pcic_sn2list pcic_known_sysnames[] = {
149 SN2L_INIT("SUNW,JavaEngine1", pcic_i_je1a), /* JE1, PROM 2.32 */
150 SN2L_INIT("SUNW,JS-E", pcic_i_jse), /* PROLL JavaStation-E */
151 SN2L_INIT("SUNW,SPARCengine-6", pcic_i_se6), /* SPARCengine-6/CP-1200 */
152 SN2L_INIT("SUNW,JS-NC", pcic_i_jk), /* PROLL JavaStation-NC */
153 SN2L_INIT("SUNW,JSIIep", pcic_i_jk), /* OBP JavaStation-NC */
154 { NULL, NULL, 0 }
155};
156
157/*
158 * Only one PCIC per IIep,
159 * and since we have no SMP IIep, only one per system.
160 */
161static int pcic0_up;
162static struct linux_pcic pcic0;
163
164void * __iomem pcic_regs;
165volatile int pcic_speculative;
166volatile int pcic_trapped;
167
168static void pci_do_gettimeofday(struct timeval *tv);
169static int pci_do_settimeofday(struct timespec *tv);
170
171#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3))
172
173static int pcic_read_config_dword(unsigned int busno, unsigned int devfn,
174 int where, u32 *value)
175{
176 struct linux_pcic *pcic;
177 unsigned long flags;
178
179 pcic = &pcic0;
180
181 local_irq_save(flags);
182#if 0 /* does not fail here */
183 pcic_speculative = 1;
184 pcic_trapped = 0;
185#endif
186 writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr);
187#if 0 /* does not fail here */
188 nop();
189 if (pcic_trapped) {
190 local_irq_restore(flags);
191 *value = ~0;
192 return 0;
193 }
194#endif
195 pcic_speculative = 2;
196 pcic_trapped = 0;
197 *value = readl(pcic->pcic_config_space_data + (where&4));
198 nop();
199 if (pcic_trapped) {
200 pcic_speculative = 0;
201 local_irq_restore(flags);
202 *value = ~0;
203 return 0;
204 }
205 pcic_speculative = 0;
206 local_irq_restore(flags);
207 return 0;
208}
209
210static int pcic_read_config(struct pci_bus *bus, unsigned int devfn,
211 int where, int size, u32 *val)
212{
213 unsigned int v;
214
215 if (bus->number != 0) return -EINVAL;
216 switch (size) {
217 case 1:
218 pcic_read_config_dword(bus->number, devfn, where&~3, &v);
219 *val = 0xff & (v >> (8*(where & 3)));
220 return 0;
221 case 2:
222 if (where&1) return -EINVAL;
223 pcic_read_config_dword(bus->number, devfn, where&~3, &v);
224 *val = 0xffff & (v >> (8*(where & 3)));
225 return 0;
226 case 4:
227 if (where&3) return -EINVAL;
228 pcic_read_config_dword(bus->number, devfn, where&~3, val);
229 return 0;
230 }
231 return -EINVAL;
232}
233
234static int pcic_write_config_dword(unsigned int busno, unsigned int devfn,
235 int where, u32 value)
236{
237 struct linux_pcic *pcic;
238 unsigned long flags;
239
240 pcic = &pcic0;
241
242 local_irq_save(flags);
243 writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr);
244 writel(value, pcic->pcic_config_space_data + (where&4));
245 local_irq_restore(flags);
246 return 0;
247}
248
249static int pcic_write_config(struct pci_bus *bus, unsigned int devfn,
250 int where, int size, u32 val)
251{
252 unsigned int v;
253
254 if (bus->number != 0) return -EINVAL;
255 switch (size) {
256 case 1:
257 pcic_read_config_dword(bus->number, devfn, where&~3, &v);
258 v = (v & ~(0xff << (8*(where&3)))) |
259 ((0xff&val) << (8*(where&3)));
260 return pcic_write_config_dword(bus->number, devfn, where&~3, v);
261 case 2:
262 if (where&1) return -EINVAL;
263 pcic_read_config_dword(bus->number, devfn, where&~3, &v);
264 v = (v & ~(0xffff << (8*(where&3)))) |
265 ((0xffff&val) << (8*(where&3)));
266 return pcic_write_config_dword(bus->number, devfn, where&~3, v);
267 case 4:
268 if (where&3) return -EINVAL;
269 return pcic_write_config_dword(bus->number, devfn, where, val);
270 }
271 return -EINVAL;
272}
273
274static struct pci_ops pcic_ops = {
275 .read = pcic_read_config,
276 .write = pcic_write_config,
277};
278
279/*
280 * On sparc64 pcibios_init() calls pci_controller_probe().
281 * We want PCIC probed little ahead so that interrupt controller
282 * would be operational.
283 */
284int __init pcic_probe(void)
285{
286 struct linux_pcic *pcic;
287 struct linux_prom_registers regs[PROMREG_MAX];
288 struct linux_pbm_info* pbm;
289 char namebuf[64];
290 int node;
291 int err;
292
293 if (pcic0_up) {
294 prom_printf("PCIC: called twice!\n");
295 prom_halt();
296 }
297 pcic = &pcic0;
298
299 node = prom_getchild (prom_root_node);
300 node = prom_searchsiblings (node, "pci");
301 if (node == 0)
302 return -ENODEV;
303 /*
304 * Map in PCIC register set, config space, and IO base
305 */
306 err = prom_getproperty(node, "reg", (char*)regs, sizeof(regs));
307 if (err == 0 || err == -1) {
308 prom_printf("PCIC: Error, cannot get PCIC registers "
309 "from PROM.\n");
310 prom_halt();
311 }
312
313 pcic0_up = 1;
314
315 pcic->pcic_res_regs.name = "pcic_registers";
316 pcic->pcic_regs = ioremap(regs[0].phys_addr, regs[0].reg_size);
317 if (!pcic->pcic_regs) {
318 prom_printf("PCIC: Error, cannot map PCIC registers.\n");
319 prom_halt();
320 }
321
322 pcic->pcic_res_io.name = "pcic_io";
323 if ((pcic->pcic_io = (unsigned long)
324 ioremap(regs[1].phys_addr, 0x10000)) == 0) {
325 prom_printf("PCIC: Error, cannot map PCIC IO Base.\n");
326 prom_halt();
327 }
328
329 pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr";
330 if ((pcic->pcic_config_space_addr =
331 ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) {
332 prom_printf("PCIC: Error, cannot map"
333 "PCI Configuration Space Address.\n");
334 prom_halt();
335 }
336
337 /*
338 * Docs say three least significant bits in address and data
339 * must be the same. Thus, we need adjust size of data.
340 */
341 pcic->pcic_res_cfg_data.name = "pcic_cfg_data";
342 if ((pcic->pcic_config_space_data =
343 ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) {
344 prom_printf("PCIC: Error, cannot map"
345 "PCI Configuration Space Data.\n");
346 prom_halt();
347 }
348
349 pbm = &pcic->pbm;
350 pbm->prom_node = node;
351 prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0;
352 strcpy(pbm->prom_name, namebuf);
353
354 {
355 extern volatile int t_nmi[1];
356 extern int pcic_nmi_trap_patch[1];
357
358 t_nmi[0] = pcic_nmi_trap_patch[0];
359 t_nmi[1] = pcic_nmi_trap_patch[1];
360 t_nmi[2] = pcic_nmi_trap_patch[2];
361 t_nmi[3] = pcic_nmi_trap_patch[3];
362 swift_flush_dcache();
363 pcic_regs = pcic->pcic_regs;
364 }
365
366 prom_getstring(prom_root_node, "name", namebuf, 63); namebuf[63] = 0;
367 {
368 struct pcic_sn2list *p;
369
370 for (p = pcic_known_sysnames; p->sysname != NULL; p++) {
371 if (strcmp(namebuf, p->sysname) == 0)
372 break;
373 }
374 pcic->pcic_imap = p->intmap;
375 pcic->pcic_imdim = p->mapdim;
376 }
377 if (pcic->pcic_imap == NULL) {
378 /*
379 * We do not panic here for the sake of embedded systems.
380 */
381 printk("PCIC: System %s is unknown, cannot route interrupts\n",
382 namebuf);
383 }
384
385 return 0;
386}
387
388static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic)
389{
390 struct linux_pbm_info *pbm = &pcic->pbm;
391
392 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm);
393#if 0 /* deadwood transplanted from sparc64 */
394 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
395 pci_record_assignments(pbm, pbm->pci_bus);
396 pci_assign_unassigned(pbm, pbm->pci_bus);
397 pci_fixup_irq(pbm, pbm->pci_bus);
398#endif
399}
400
401/*
402 * Main entry point from the PCI subsystem.
403 */
404static int __init pcic_init(void)
405{
406 struct linux_pcic *pcic;
407
408 /*
409 * PCIC should be initialized at start of the timer.
410 * So, here we report the presence of PCIC and do some magic passes.
411 */
412 if(!pcic0_up)
413 return 0;
414 pcic = &pcic0;
415
416 /*
417 * Switch off IOTLB translation.
418 */
419 writeb(PCI_DVMA_CONTROL_IOTLB_DISABLE,
420 pcic->pcic_regs+PCI_DVMA_CONTROL);
421
422 /*
423 * Increase mapped size for PCI memory space (DMA access).
424 * Should be done in that order (size first, address second).
425 * Why we couldn't set up 4GB and forget about it? XXX
426 */
427 writel(0xF0000000UL, pcic->pcic_regs+PCI_SIZE_0);
428 writel(0+PCI_BASE_ADDRESS_SPACE_MEMORY,
429 pcic->pcic_regs+PCI_BASE_ADDRESS_0);
430
431 pcic_pbm_scan_bus(pcic);
432
433 ebus_init();
434 return 0;
435}
436
437int pcic_present(void)
438{
439 return pcic0_up;
440}
441
442static int __init pdev_to_pnode(struct linux_pbm_info *pbm,
443 struct pci_dev *pdev)
444{
445 struct linux_prom_pci_registers regs[PROMREG_MAX];
446 int err;
447 int node = prom_getchild(pbm->prom_node);
448
449 while(node) {
450 err = prom_getproperty(node, "reg",
451 (char *)&regs[0], sizeof(regs));
452 if(err != 0 && err != -1) {
453 unsigned long devfn = (regs[0].which_io >> 8) & 0xff;
454 if(devfn == pdev->devfn)
455 return node;
456 }
457 node = prom_getsibling(node);
458 }
459 return 0;
460}
461
462static inline struct pcidev_cookie *pci_devcookie_alloc(void)
463{
464 return kmalloc(sizeof(struct pcidev_cookie), GFP_ATOMIC);
465}
466
467static void pcic_map_pci_device(struct linux_pcic *pcic,
468 struct pci_dev *dev, int node)
469{
470 char namebuf[64];
471 unsigned long address;
472 unsigned long flags;
473 int j;
474
475 if (node == 0 || node == -1) {
476 strcpy(namebuf, "???");
477 } else {
478 prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0;
479 }
480
481 for (j = 0; j < 6; j++) {
482 address = dev->resource[j].start;
483 if (address == 0) break; /* are sequential */
484 flags = dev->resource[j].flags;
485 if ((flags & IORESOURCE_IO) != 0) {
486 if (address < 0x10000) {
487 /*
488 * A device responds to I/O cycles on PCI.
489 * We generate these cycles with memory
490 * access into the fixed map (phys 0x30000000).
491 *
492 * Since a device driver does not want to
493 * do ioremap() before accessing PC-style I/O,
494 * we supply virtual, ready to access address.
495 *
496 * Ebus devices do not come here even if
497 * CheerIO makes a similar conversion.
498 * See ebus.c for details.
499 *
500 * Note that check_region()/request_region()
501 * work for these devices.
502 *
503 * XXX Neat trick, but it's a *bad* idea
504 * to shit into regions like that.
505 * What if we want to allocate one more
506 * PCI base address...
507 */
508 dev->resource[j].start =
509 pcic->pcic_io + address;
510 dev->resource[j].end = 1; /* XXX */
511 dev->resource[j].flags =
512 (flags & ~IORESOURCE_IO) | IORESOURCE_MEM;
513 } else {
514 /*
515 * OOPS... PCI Spec allows this. Sun does
516 * not have any devices getting above 64K
517 * so it must be user with a weird I/O
518 * board in a PCI slot. We must remap it
519 * under 64K but it is not done yet. XXX
520 */
521 printk("PCIC: Skipping I/O space at 0x%lx,"
522 "this will Oops if a driver attaches;"
523 "device '%s' at %02x:%02x)\n", address,
524 namebuf, dev->bus->number, dev->devfn);
525 }
526 }
527 }
528}
529
530static void
531pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
532{
533 struct pcic_ca2irq *p;
534 int i, ivec;
535 char namebuf[64];
536
537 if (node == 0 || node == -1) {
538 strcpy(namebuf, "???");
539 } else {
540 prom_getstring(node, "name", namebuf, sizeof(namebuf));
541 }
542
543 if ((p = pcic->pcic_imap) == 0) {
544 dev->irq = 0;
545 return;
546 }
547 for (i = 0; i < pcic->pcic_imdim; i++) {
548 if (p->busno == dev->bus->number && p->devfn == dev->devfn)
549 break;
550 p++;
551 }
552 if (i >= pcic->pcic_imdim) {
553 printk("PCIC: device %s devfn %02x:%02x not found in %d\n",
554 namebuf, dev->bus->number, dev->devfn, pcic->pcic_imdim);
555 dev->irq = 0;
556 return;
557 }
558
559 i = p->pin;
560 if (i >= 0 && i < 4) {
561 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
562 dev->irq = ivec >> (i << 2) & 0xF;
563 } else if (i >= 4 && i < 8) {
564 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
565 dev->irq = ivec >> ((i-4) << 2) & 0xF;
566 } else { /* Corrupted map */
567 printk("PCIC: BAD PIN %d\n", i); for (;;) {}
568 }
569/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */
570
571 /*
572 * dev->irq=0 means PROM did not bother to program the upper
573 * half of PCIC. This happens on JS-E with PROM 3.11, for instance.
574 */
575 if (dev->irq == 0 || p->force) {
576 if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */
577 printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
578 }
579 printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n",
580 p->irq, p->pin, dev->bus->number, dev->devfn);
581 dev->irq = p->irq;
582
583 i = p->pin;
584 if (i >= 4) {
585 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
586 ivec &= ~(0xF << ((i - 4) << 2));
587 ivec |= p->irq << ((i - 4) << 2);
588 writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_HI);
589 } else {
590 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
591 ivec &= ~(0xF << (i << 2));
592 ivec |= p->irq << (i << 2);
593 writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO);
594 }
595 }
596
597 return;
598}
599
600/*
601 * Normally called from {do_}pci_scan_bus...
602 */
603void __init pcibios_fixup_bus(struct pci_bus *bus)
604{
605 struct pci_dev *dev;
606 int i, has_io, has_mem;
607 unsigned int cmd;
608 struct linux_pcic *pcic;
609 /* struct linux_pbm_info* pbm = &pcic->pbm; */
610 int node;
611 struct pcidev_cookie *pcp;
612
613 if (!pcic0_up) {
614 printk("pcibios_fixup_bus: no PCIC\n");
615 return;
616 }
617 pcic = &pcic0;
618
619 /*
620 * Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus);
621 */
622 if (bus->number != 0) {
623 printk("pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number);
624 return;
625 }
626
627 list_for_each_entry(dev, &bus->devices, bus_list) {
628
629 /*
630 * Comment from i386 branch:
631 * There are buggy BIOSes that forget to enable I/O and memory
632 * access to PCI devices. We try to fix this, but we need to
633 * be sure that the BIOS didn't forget to assign an address
634 * to the device. [mj]
635 * OBP is a case of such BIOS :-)
636 */
637 has_io = has_mem = 0;
638 for(i=0; i<6; i++) {
639 unsigned long f = dev->resource[i].flags;
640 if (f & IORESOURCE_IO) {
641 has_io = 1;
642 } else if (f & IORESOURCE_MEM)
643 has_mem = 1;
644 }
645 pcic_read_config(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
646 if (has_io && !(cmd & PCI_COMMAND_IO)) {
647 printk("PCIC: Enabling I/O for device %02x:%02x\n",
648 dev->bus->number, dev->devfn);
649 cmd |= PCI_COMMAND_IO;
650 pcic_write_config(dev->bus, dev->devfn,
651 PCI_COMMAND, 2, cmd);
652 }
653 if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
654 printk("PCIC: Enabling memory for device %02x:%02x\n",
655 dev->bus->number, dev->devfn);
656 cmd |= PCI_COMMAND_MEMORY;
657 pcic_write_config(dev->bus, dev->devfn,
658 PCI_COMMAND, 2, cmd);
659 }
660
661 node = pdev_to_pnode(&pcic->pbm, dev);
662 if(node == 0)
663 node = -1;
664
665 /* cookies */
666 pcp = pci_devcookie_alloc();
667 pcp->pbm = &pcic->pbm;
668 pcp->prom_node = node;
669 dev->sysdata = pcp;
670
671 /* fixing I/O to look like memory */
672 if ((dev->class>>16) != PCI_BASE_CLASS_BRIDGE)
673 pcic_map_pci_device(pcic, dev, node);
674
675 pcic_fill_irq(pcic, dev, node);
676 }
677}
678
679/*
680 * pcic_pin_to_irq() is exported to ebus.c.
681 */
682unsigned int
683pcic_pin_to_irq(unsigned int pin, char *name)
684{
685 struct linux_pcic *pcic = &pcic0;
686 unsigned int irq;
687 unsigned int ivec;
688
689 if (pin < 4) {
690 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
691 irq = ivec >> (pin << 2) & 0xF;
692 } else if (pin < 8) {
693 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
694 irq = ivec >> ((pin-4) << 2) & 0xF;
695 } else { /* Corrupted map */
696 printk("PCIC: BAD PIN %d FOR %s\n", pin, name);
697 for (;;) {} /* XXX Cannot panic properly in case of PROLL */
698 }
699/* P3 */ /* printk("PCIC: dev %s pin %d ivec 0x%x irq %x\n", name, pin, ivec, irq); */
700 return irq;
701}
702
703/* Makes compiler happy */
704static volatile int pcic_timer_dummy;
705
706static void pcic_clear_clock_irq(void)
707{
708 pcic_timer_dummy = readl(pcic0.pcic_regs+PCI_SYS_LIMIT);
709}
710
711static irqreturn_t pcic_timer_handler (int irq, void *h, struct pt_regs *regs)
712{
713 write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
714 pcic_clear_clock_irq();
715 do_timer(regs);
716#ifndef CONFIG_SMP
717 update_process_times(user_mode(regs));
718#endif
719 write_sequnlock(&xtime_lock);
720 return IRQ_HANDLED;
721}
722
723#define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */
724#define TICK_TIMER_LIMIT ((100*1000000/4)/100)
725
726void __init pci_time_init(void)
727{
728 struct linux_pcic *pcic = &pcic0;
729 unsigned long v;
730 int timer_irq, irq;
731
732 /* A hack until do_gettimeofday prototype is moved to arch specific headers
733 and btfixupped. Patch do_gettimeofday with ba pci_do_gettimeofday; nop */
734 ((unsigned int *)do_gettimeofday)[0] =
735 0x10800000 | ((((unsigned long)pci_do_gettimeofday -
736 (unsigned long)do_gettimeofday) >> 2) & 0x003fffff);
737 ((unsigned int *)do_gettimeofday)[1] = 0x01000000;
738 BTFIXUPSET_CALL(bus_do_settimeofday, pci_do_settimeofday, BTFIXUPCALL_NORM);
739 btfixup();
740
741 writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT);
742 /* PROM should set appropriate irq */
743 v = readb(pcic->pcic_regs+PCI_COUNTER_IRQ);
744 timer_irq = PCI_COUNTER_IRQ_SYS(v);
745 writel (PCI_COUNTER_IRQ_SET(timer_irq, 0),
746 pcic->pcic_regs+PCI_COUNTER_IRQ);
747 irq = request_irq(timer_irq, pcic_timer_handler,
748 (SA_INTERRUPT | SA_STATIC_ALLOC), "timer", NULL);
749 if (irq) {
750 prom_printf("time_init: unable to attach IRQ%d\n", timer_irq);
751 prom_halt();
752 }
753 local_irq_enable();
754}
755
756static __inline__ unsigned long do_gettimeoffset(void)
757{
758 /*
759 * We devide all to 100
760 * to have microsecond resolution and to avoid overflow
761 */
762 unsigned long count =
763 readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW;
764 count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100);
765 return count;
766}
767
768extern unsigned long wall_jiffies;
769
770static void pci_do_gettimeofday(struct timeval *tv)
771{
772 unsigned long flags;
773 unsigned long seq;
774 unsigned long usec, sec;
775 unsigned long max_ntp_tick = tick_usec - tickadj;
776
777 do {
778 unsigned long lost;
779
780 seq = read_seqbegin_irqsave(&xtime_lock, flags);
781 usec = do_gettimeoffset();
782 lost = jiffies - wall_jiffies;
783
784 /*
785 * If time_adjust is negative then NTP is slowing the clock
786 * so make sure not to go into next possible interval.
787 * Better to lose some accuracy than have time go backwards..
788 */
789 if (unlikely(time_adjust < 0)) {
790 usec = min(usec, max_ntp_tick);
791
792 if (lost)
793 usec += lost * max_ntp_tick;
794 }
795 else if (unlikely(lost))
796 usec += lost * tick_usec;
797
798 sec = xtime.tv_sec;
799 usec += (xtime.tv_nsec / 1000);
800 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
801
802 while (usec >= 1000000) {
803 usec -= 1000000;
804 sec++;
805 }
806
807 tv->tv_sec = sec;
808 tv->tv_usec = usec;
809}
810
811static int pci_do_settimeofday(struct timespec *tv)
812{
813 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
814 return -EINVAL;
815
816 /*
817 * This is revolting. We need to set "xtime" correctly. However, the
818 * value in this location is the value at the most recent update of
819 * wall time. Discover what correction gettimeofday() would have
820 * made, and then undo it!
821 */
822 tv->tv_nsec -= 1000 * (do_gettimeoffset() +
823 (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ));
824 while (tv->tv_nsec < 0) {
825 tv->tv_nsec += NSEC_PER_SEC;
826 tv->tv_sec--;
827 }
828
829 wall_to_monotonic.tv_sec += xtime.tv_sec - tv->tv_sec;
830 wall_to_monotonic.tv_nsec += xtime.tv_nsec - tv->tv_nsec;
831
832 if (wall_to_monotonic.tv_nsec > NSEC_PER_SEC) {
833 wall_to_monotonic.tv_nsec -= NSEC_PER_SEC;
834 wall_to_monotonic.tv_sec++;
835 }
836 if (wall_to_monotonic.tv_nsec < 0) {
837 wall_to_monotonic.tv_nsec += NSEC_PER_SEC;
838 wall_to_monotonic.tv_sec--;
839 }
840
841 xtime.tv_sec = tv->tv_sec;
842 xtime.tv_nsec = tv->tv_nsec;
843 time_adjust = 0; /* stop active adjtime() */
844 time_status |= STA_UNSYNC;
845 time_maxerror = NTP_PHASE_LIMIT;
846 time_esterror = NTP_PHASE_LIMIT;
847 return 0;
848}
849
850#if 0
851static void watchdog_reset() {
852 writeb(0, pcic->pcic_regs+PCI_SYS_STATUS);
853}
854#endif
855
856/*
857 * Other archs parse arguments here.
858 */
859char * __init pcibios_setup(char *str)
860{
861 return str;
862}
863
864void pcibios_align_resource(void *data, struct resource *res,
865 unsigned long size, unsigned long align)
866{
867}
868
869int pcibios_enable_device(struct pci_dev *pdev, int mask)
870{
871 return 0;
872}
873
874/*
875 * NMI
876 */
877void pcic_nmi(unsigned int pend, struct pt_regs *regs)
878{
879
880 pend = flip_dword(pend);
881
882 if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
883 /*
884 * XXX On CP-1200 PCI #SERR may happen, we do not know
885 * what to do about it yet.
886 */
887 printk("Aiee, NMI pend 0x%x pc 0x%x spec %d, hanging\n",
888 pend, (int)regs->pc, pcic_speculative);
889 for (;;) { }
890 }
891 pcic_speculative = 0;
892 pcic_trapped = 1;
893 regs->pc = regs->npc;
894 regs->npc += 4;
895}
896
897static inline unsigned long get_irqmask(int irq_nr)
898{
899 return 1 << irq_nr;
900}
901
902static inline char *pcic_irq_itoa(unsigned int irq)
903{
904 static char buff[16];
905 sprintf(buff, "%d", irq);
906 return buff;
907}
908
909static void pcic_disable_irq(unsigned int irq_nr)
910{
911 unsigned long mask, flags;
912
913 mask = get_irqmask(irq_nr);
914 local_irq_save(flags);
915 writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET);
916 local_irq_restore(flags);
917}
918
919static void pcic_enable_irq(unsigned int irq_nr)
920{
921 unsigned long mask, flags;
922
923 mask = get_irqmask(irq_nr);
924 local_irq_save(flags);
925 writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR);
926 local_irq_restore(flags);
927}
928
929static void pcic_clear_profile_irq(int cpu)
930{
931 printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
932}
933
934static void pcic_load_profile_irq(int cpu, unsigned int limit)
935{
936 printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
937}
938
939/* We assume the caller has disabled local interrupts when these are called,
940 * or else very bizarre behavior will result.
941 */
942static void pcic_disable_pil_irq(unsigned int pil)
943{
944 writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET);
945}
946
947static void pcic_enable_pil_irq(unsigned int pil)
948{
949 writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR);
950}
951
952void __init sun4m_pci_init_IRQ(void)
953{
954 BTFIXUPSET_CALL(enable_irq, pcic_enable_irq, BTFIXUPCALL_NORM);
955 BTFIXUPSET_CALL(disable_irq, pcic_disable_irq, BTFIXUPCALL_NORM);
956 BTFIXUPSET_CALL(enable_pil_irq, pcic_enable_pil_irq, BTFIXUPCALL_NORM);
957 BTFIXUPSET_CALL(disable_pil_irq, pcic_disable_pil_irq, BTFIXUPCALL_NORM);
958 BTFIXUPSET_CALL(clear_clock_irq, pcic_clear_clock_irq, BTFIXUPCALL_NORM);
959 BTFIXUPSET_CALL(clear_profile_irq, pcic_clear_profile_irq, BTFIXUPCALL_NORM);
960 BTFIXUPSET_CALL(load_profile_irq, pcic_load_profile_irq, BTFIXUPCALL_NORM);
961 BTFIXUPSET_CALL(__irq_itoa, pcic_irq_itoa, BTFIXUPCALL_NORM);
962}
963
964int pcibios_assign_resource(struct pci_dev *pdev, int resource)
965{
966 return -ENXIO;
967}
968
969/*
970 * This probably belongs here rather than ioport.c because
971 * we do not want this crud linked into SBus kernels.
972 * Also, think for a moment about likes of floppy.c that
973 * include architecture specific parts. They may want to redefine ins/outs.
974 *
975 * We do not use horroble macroses here because we want to
976 * advance pointer by sizeof(size).
977 */
978void outsb(unsigned long addr, const void *src, unsigned long count)
979{
980 while (count) {
981 count -= 1;
982 outb(*(const char *)src, addr);
983 src += 1;
984 /* addr += 1; */
985 }
986}
987
988void outsw(unsigned long addr, const void *src, unsigned long count)
989{
990 while (count) {
991 count -= 2;
992 outw(*(const short *)src, addr);
993 src += 2;
994 /* addr += 2; */
995 }
996}
997
998void outsl(unsigned long addr, const void *src, unsigned long count)
999{
1000 while (count) {
1001 count -= 4;
1002 outl(*(const long *)src, addr);
1003 src += 4;
1004 /* addr += 4; */
1005 }
1006}
1007
1008void insb(unsigned long addr, void *dst, unsigned long count)
1009{
1010 while (count) {
1011 count -= 1;
1012 *(unsigned char *)dst = inb(addr);
1013 dst += 1;
1014 /* addr += 1; */
1015 }
1016}
1017
1018void insw(unsigned long addr, void *dst, unsigned long count)
1019{
1020 while (count) {
1021 count -= 2;
1022 *(unsigned short *)dst = inw(addr);
1023 dst += 2;
1024 /* addr += 2; */
1025 }
1026}
1027
1028void insl(unsigned long addr, void *dst, unsigned long count)
1029{
1030 while (count) {
1031 count -= 4;
1032 /*
1033 * XXX I am sure we are in for an unaligned trap here.
1034 */
1035 *(unsigned long *)dst = inl(addr);
1036 dst += 4;
1037 /* addr += 4; */
1038 }
1039}
1040
1041subsys_initcall(pcic_init);
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
new file mode 100644
index 000000000000..7eca8871ff47
--- /dev/null
+++ b/arch/sparc/kernel/pmc.c
@@ -0,0 +1,99 @@
1/* pmc - Driver implementation for power management functions
2 * of Power Management Controller (PMC) on SPARCstation-Voyager.
3 *
4 * Copyright (c) 2002 Eric Brower (ebrower@usa.net)
5 */
6
7#include <linux/kernel.h>
8#include <linux/fs.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/miscdevice.h>
12#include <linux/pm.h>
13
14#include <asm/io.h>
15#include <asm/sbus.h>
16#include <asm/oplib.h>
17#include <asm/uaccess.h>
18#include <asm/auxio.h>
19
20/* Debug
21 *
22 * #define PMC_DEBUG_LED
23 * #define PMC_NO_IDLE
24 */
25
26#define PMC_MINOR MISC_DYNAMIC_MINOR
27#define PMC_OBPNAME "SUNW,pmc"
28#define PMC_DEVNAME "pmc"
29
30#define PMC_IDLE_REG 0x00
31#define PMC_IDLE_ON 0x01
32
33volatile static u8 __iomem *regs;
34static int pmc_regsize;
35
36#define pmc_readb(offs) (sbus_readb(regs+offs))
37#define pmc_writeb(val, offs) (sbus_writeb(val, regs+offs))
38
39/*
40 * CPU idle callback function
41 * See .../arch/sparc/kernel/process.c
42 */
43void pmc_swift_idle(void)
44{
45#ifdef PMC_DEBUG_LED
46 set_auxio(0x00, AUXIO_LED);
47#endif
48
49 pmc_writeb(pmc_readb(PMC_IDLE_REG) | PMC_IDLE_ON, PMC_IDLE_REG);
50
51#ifdef PMC_DEBUG_LED
52 set_auxio(AUXIO_LED, 0x00);
53#endif
54}
55
56static inline void pmc_free(void)
57{
58 sbus_iounmap(regs, pmc_regsize);
59}
60
61static int __init pmc_probe(void)
62{
63 struct sbus_bus *sbus = NULL;
64 struct sbus_dev *sdev = NULL;
65 for_each_sbus(sbus) {
66 for_each_sbusdev(sdev, sbus) {
67 if (!strcmp(sdev->prom_name, PMC_OBPNAME)) {
68 goto sbus_done;
69 }
70 }
71 }
72
73sbus_done:
74 if (!sdev) {
75 return -ENODEV;
76 }
77
78 pmc_regsize = sdev->reg_addrs[0].reg_size;
79 regs = sbus_ioremap(&sdev->resource[0], 0,
80 pmc_regsize, PMC_OBPNAME);
81 if (!regs) {
82 printk(KERN_ERR "%s: unable to map registers\n", PMC_DEVNAME);
83 return -ENODEV;
84 }
85
86#ifndef PMC_NO_IDLE
87 /* Assign power management IDLE handler */
88 pm_idle = pmc_swift_idle;
89#endif
90
91 printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
92 return 0;
93}
94
95/* This driver is not critical to the boot process
96 * and is easiest to ioremap when SBus is already
97 * initialized, so we install ourselves thusly:
98 */
99__initcall(pmc_probe);
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
new file mode 100644
index 000000000000..143fe2f3c1c4
--- /dev/null
+++ b/arch/sparc/kernel/process.c
@@ -0,0 +1,746 @@
1/* $Id: process.c,v 1.161 2002/01/23 11:27:32 davem Exp $
2 * linux/arch/sparc/kernel/process.c
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 */
7
8/*
9 * This file handles the architecture-dependent parts of process handling..
10 */
11
12#include <stdarg.h>
13
14#include <linux/errno.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/kallsyms.h>
19#include <linux/mm.h>
20#include <linux/stddef.h>
21#include <linux/ptrace.h>
22#include <linux/slab.h>
23#include <linux/user.h>
24#include <linux/a.out.h>
25#include <linux/config.h>
26#include <linux/smp.h>
27#include <linux/smp_lock.h>
28#include <linux/reboot.h>
29#include <linux/delay.h>
30#include <linux/pm.h>
31#include <linux/init.h>
32
33#include <asm/auxio.h>
34#include <asm/oplib.h>
35#include <asm/uaccess.h>
36#include <asm/system.h>
37#include <asm/page.h>
38#include <asm/pgalloc.h>
39#include <asm/pgtable.h>
40#include <asm/delay.h>
41#include <asm/processor.h>
42#include <asm/psr.h>
43#include <asm/elf.h>
44#include <asm/unistd.h>
45
46/*
47 * Power management idle function
48 * Set in pm platform drivers (apc.c and pmc.c)
49 */
50void (*pm_idle)(void);
51
52/*
53 * Power-off handler instantiation for pm.h compliance
54 * This is done via auxio, but could be used as a fallback
55 * handler when auxio is not present-- unused for now...
56 */
57void (*pm_power_off)(void);
58
59/*
60 * sysctl - toggle power-off restriction for serial console
61 * systems in machine_power_off()
62 */
63int scons_pwroff = 1;
64
65extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
66
67struct task_struct *last_task_used_math = NULL;
68struct thread_info *current_set[NR_CPUS];
69
70/*
71 * default_idle is new in 2.5. XXX Review, currently stolen from sparc64.
72 */
73void default_idle(void)
74{
75}
76
77#ifndef CONFIG_SMP
78
79#define SUN4C_FAULT_HIGH 100
80
81/*
82 * the idle loop on a Sparc... ;)
83 */
84void cpu_idle(void)
85{
86 if (current->pid != 0)
87 goto out;
88
89 /* endless idle loop with no priority at all */
90 for (;;) {
91 if (ARCH_SUN4C_SUN4) {
92 static int count = HZ;
93 static unsigned long last_jiffies;
94 static unsigned long last_faults;
95 static unsigned long fps;
96 unsigned long now;
97 unsigned long faults;
98 unsigned long flags;
99
100 extern unsigned long sun4c_kernel_faults;
101 extern void sun4c_grow_kernel_ring(void);
102
103 local_irq_save(flags);
104 now = jiffies;
105 count -= (now - last_jiffies);
106 last_jiffies = now;
107 if (count < 0) {
108 count += HZ;
109 faults = sun4c_kernel_faults;
110 fps = (fps + (faults - last_faults)) >> 1;
111 last_faults = faults;
112#if 0
113 printk("kernel faults / second = %ld\n", fps);
114#endif
115 if (fps >= SUN4C_FAULT_HIGH) {
116 sun4c_grow_kernel_ring();
117 }
118 }
119 local_irq_restore(flags);
120 }
121
122 while((!need_resched()) && pm_idle) {
123 (*pm_idle)();
124 }
125
126 schedule();
127 check_pgt_cache();
128 }
129out:
130 return;
131}
132
133#else
134
135/* This is being executed in task 0 'user space'. */
136void cpu_idle(void)
137{
138 /* endless idle loop with no priority at all */
139 while(1) {
140 if(need_resched()) {
141 schedule();
142 check_pgt_cache();
143 }
144 barrier(); /* or else gcc optimizes... */
145 }
146}
147
148#endif
149
150extern char reboot_command [];
151
152extern void (*prom_palette)(int);
153
154/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
155void machine_halt(void)
156{
157 local_irq_enable();
158 mdelay(8);
159 local_irq_disable();
160 if (!serial_console && prom_palette)
161 prom_palette (1);
162 prom_halt();
163 panic("Halt failed!");
164}
165
166EXPORT_SYMBOL(machine_halt);
167
168void machine_restart(char * cmd)
169{
170 char *p;
171
172 local_irq_enable();
173 mdelay(8);
174 local_irq_disable();
175
176 p = strchr (reboot_command, '\n');
177 if (p) *p = 0;
178 if (!serial_console && prom_palette)
179 prom_palette (1);
180 if (cmd)
181 prom_reboot(cmd);
182 if (*reboot_command)
183 prom_reboot(reboot_command);
184 prom_feval ("reset");
185 panic("Reboot failed!");
186}
187
188EXPORT_SYMBOL(machine_restart);
189
190void machine_power_off(void)
191{
192#ifdef CONFIG_SUN_AUXIO
193 if (auxio_power_register && (!serial_console || scons_pwroff))
194 *auxio_power_register |= AUXIO_POWER_OFF;
195#endif
196 machine_halt();
197}
198
199EXPORT_SYMBOL(machine_power_off);
200
201static DEFINE_SPINLOCK(sparc_backtrace_lock);
202
203void __show_backtrace(unsigned long fp)
204{
205 struct reg_window *rw;
206 unsigned long flags;
207 int cpu = smp_processor_id();
208
209 spin_lock_irqsave(&sparc_backtrace_lock, flags);
210
211 rw = (struct reg_window *)fp;
212 while(rw && (((unsigned long) rw) >= PAGE_OFFSET) &&
213 !(((unsigned long) rw) & 0x7)) {
214 printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] "
215 "FP[%08lx] CALLER[%08lx]: ", cpu,
216 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
217 rw->ins[4], rw->ins[5],
218 rw->ins[6],
219 rw->ins[7]);
220 print_symbol("%s\n", rw->ins[7]);
221 rw = (struct reg_window *) rw->ins[6];
222 }
223 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
224}
225
226#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
227#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
228#define __GET_FP(fp) __asm__ __volatile__("mov %%i6, %0" : "=r" (fp))
229
230void show_backtrace(void)
231{
232 unsigned long fp;
233
234 __SAVE; __SAVE; __SAVE; __SAVE;
235 __SAVE; __SAVE; __SAVE; __SAVE;
236 __RESTORE; __RESTORE; __RESTORE; __RESTORE;
237 __RESTORE; __RESTORE; __RESTORE; __RESTORE;
238
239 __GET_FP(fp);
240
241 __show_backtrace(fp);
242}
243
244#ifdef CONFIG_SMP
245void smp_show_backtrace_all_cpus(void)
246{
247 xc0((smpfunc_t) show_backtrace);
248 show_backtrace();
249}
250#endif
251
252#if 0
253void show_stackframe(struct sparc_stackf *sf)
254{
255 unsigned long size;
256 unsigned long *stk;
257 int i;
258
259 printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx "
260 "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n",
261 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
262 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
263 printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx "
264 "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n",
265 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
266 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
267 printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx "
268 "x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n",
269 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
270 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
271 sf->xxargs[0]);
272 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
273 size -= STACKFRAME_SZ;
274 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
275 i = 0;
276 do {
277 printk("s%d: %08lx\n", i++, *stk++);
278 } while ((size -= sizeof(unsigned long)));
279}
280#endif
281
282void show_regs(struct pt_regs *r)
283{
284 struct reg_window *rw = (struct reg_window *) r->u_regs[14];
285
286 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
287 r->psr, r->pc, r->npc, r->y, print_tainted());
288 print_symbol("PC: <%s>\n", r->pc);
289 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
290 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
291 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
292 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
293 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
294 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
295 print_symbol("RPC: <%s>\n", r->u_regs[15]);
296
297 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
298 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
299 rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
300 printk("%%I: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
301 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
302 rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
303}
304
305/*
306 * The show_stack is an external API which we do not use ourselves.
307 * The oops is printed in die_if_kernel.
308 */
309void show_stack(struct task_struct *tsk, unsigned long *_ksp)
310{
311 unsigned long pc, fp;
312 unsigned long task_base;
313 struct reg_window *rw;
314 int count = 0;
315
316 if (tsk != NULL)
317 task_base = (unsigned long) tsk->thread_info;
318 else
319 task_base = (unsigned long) current_thread_info();
320
321 fp = (unsigned long) _ksp;
322 do {
323 /* Bogus frame pointer? */
324 if (fp < (task_base + sizeof(struct thread_info)) ||
325 fp >= (task_base + (PAGE_SIZE << 1)))
326 break;
327 rw = (struct reg_window *) fp;
328 pc = rw->ins[7];
329 printk("[%08lx : ", pc);
330 print_symbol("%s ] ", pc);
331 fp = rw->ins[6];
332 } while (++count < 16);
333 printk("\n");
334}
335
336/*
337 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
338 */
339unsigned long thread_saved_pc(struct task_struct *tsk)
340{
341 return tsk->thread_info->kpc;
342}
343
344/*
345 * Free current thread data structures etc..
346 */
347void exit_thread(void)
348{
349#ifndef CONFIG_SMP
350 if(last_task_used_math == current) {
351#else
352 if(current_thread_info()->flags & _TIF_USEDFPU) {
353#endif
354 /* Keep process from leaving FPU in a bogon state. */
355 put_psr(get_psr() | PSR_EF);
356 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
357 &current->thread.fpqueue[0], &current->thread.fpqdepth);
358#ifndef CONFIG_SMP
359 last_task_used_math = NULL;
360#else
361 current_thread_info()->flags &= ~_TIF_USEDFPU;
362#endif
363 }
364}
365
366void flush_thread(void)
367{
368 current_thread_info()->w_saved = 0;
369
370 /* No new signal delivery by default */
371 current->thread.new_signal = 0;
372#ifndef CONFIG_SMP
373 if(last_task_used_math == current) {
374#else
375 if(current_thread_info()->flags & _TIF_USEDFPU) {
376#endif
377 /* Clean the fpu. */
378 put_psr(get_psr() | PSR_EF);
379 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
380 &current->thread.fpqueue[0], &current->thread.fpqdepth);
381#ifndef CONFIG_SMP
382 last_task_used_math = NULL;
383#else
384 current_thread_info()->flags &= ~_TIF_USEDFPU;
385#endif
386 }
387
388 /* Now, this task is no longer a kernel thread. */
389 current->thread.current_ds = USER_DS;
390 if (current->thread.flags & SPARC_FLAG_KTHREAD) {
391 current->thread.flags &= ~SPARC_FLAG_KTHREAD;
392
393 /* We must fixup kregs as well. */
394 /* XXX This was not fixed for ti for a while, worked. Unused? */
395 current->thread.kregs = (struct pt_regs *)
396 ((char *)current->thread_info + (THREAD_SIZE - TRACEREG_SZ));
397 }
398}
399
400static __inline__ struct sparc_stackf __user *
401clone_stackframe(struct sparc_stackf __user *dst,
402 struct sparc_stackf __user *src)
403{
404 unsigned long size, fp;
405 struct sparc_stackf *tmp;
406 struct sparc_stackf __user *sp;
407
408 if (get_user(tmp, &src->fp))
409 return NULL;
410
411 fp = (unsigned long) tmp;
412 size = (fp - ((unsigned long) src));
413 fp = (unsigned long) dst;
414 sp = (struct sparc_stackf __user *)(fp - size);
415
416 /* do_fork() grabs the parent semaphore, we must release it
417 * temporarily so we can build the child clone stack frame
418 * without deadlocking.
419 */
420 if (__copy_user(sp, src, size))
421 sp = NULL;
422 else if (put_user(fp, &sp->fp))
423 sp = NULL;
424
425 return sp;
426}
427
428asmlinkage int sparc_do_fork(unsigned long clone_flags,
429 unsigned long stack_start,
430 struct pt_regs *regs,
431 unsigned long stack_size)
432{
433 unsigned long parent_tid_ptr, child_tid_ptr;
434
435 parent_tid_ptr = regs->u_regs[UREG_I2];
436 child_tid_ptr = regs->u_regs[UREG_I4];
437
438 return do_fork(clone_flags, stack_start,
439 regs, stack_size,
440 (int __user *) parent_tid_ptr,
441 (int __user *) child_tid_ptr);
442}
443
444/* Copy a Sparc thread. The fork() return value conventions
445 * under SunOS are nothing short of bletcherous:
446 * Parent --> %o0 == childs pid, %o1 == 0
447 * Child --> %o0 == parents pid, %o1 == 1
448 *
449 * NOTE: We have a separate fork kpsr/kwim because
450 * the parent could change these values between
451 * sys_fork invocation and when we reach here
452 * if the parent should sleep while trying to
453 * allocate the task_struct and kernel stack in
454 * do_fork().
455 * XXX See comment above sys_vfork in sparc64. todo.
456 */
457extern void ret_from_fork(void);
458
459int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
460 unsigned long unused,
461 struct task_struct *p, struct pt_regs *regs)
462{
463 struct thread_info *ti = p->thread_info;
464 struct pt_regs *childregs;
465 char *new_stack;
466
467#ifndef CONFIG_SMP
468 if(last_task_used_math == current) {
469#else
470 if(current_thread_info()->flags & _TIF_USEDFPU) {
471#endif
472 put_psr(get_psr() | PSR_EF);
473 fpsave(&p->thread.float_regs[0], &p->thread.fsr,
474 &p->thread.fpqueue[0], &p->thread.fpqdepth);
475#ifdef CONFIG_SMP
476 current_thread_info()->flags &= ~_TIF_USEDFPU;
477#endif
478 }
479
480 /*
481 * p->thread_info new_stack childregs
482 * ! ! ! {if(PSR_PS) }
483 * V V (stk.fr.) V (pt_regs) { (stk.fr.) }
484 * +----- - - - - - ------+===========+============={+==========}+
485 */
486 new_stack = (char*)ti + THREAD_SIZE;
487 if (regs->psr & PSR_PS)
488 new_stack -= STACKFRAME_SZ;
489 new_stack -= STACKFRAME_SZ + TRACEREG_SZ;
490 memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
491 childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ);
492
493 /*
494 * A new process must start with interrupts closed in 2.5,
495 * because this is how Mingo's scheduler works (see schedule_tail
496 * and finish_arch_switch). If we do not do it, a timer interrupt hits
497 * before we unlock, attempts to re-take the rq->lock, and then we die.
498 * Thus, kpsr|=PSR_PIL.
499 */
500 ti->ksp = (unsigned long) new_stack;
501 ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
502 ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
503 ti->kwim = current->thread.fork_kwim;
504
505 if(regs->psr & PSR_PS) {
506 extern struct pt_regs fake_swapper_regs;
507
508 p->thread.kregs = &fake_swapper_regs;
509 new_stack += STACKFRAME_SZ + TRACEREG_SZ;
510 childregs->u_regs[UREG_FP] = (unsigned long) new_stack;
511 p->thread.flags |= SPARC_FLAG_KTHREAD;
512 p->thread.current_ds = KERNEL_DS;
513 memcpy(new_stack, (void *)regs->u_regs[UREG_FP], STACKFRAME_SZ);
514 childregs->u_regs[UREG_G6] = (unsigned long) ti;
515 } else {
516 p->thread.kregs = childregs;
517 childregs->u_regs[UREG_FP] = sp;
518 p->thread.flags &= ~SPARC_FLAG_KTHREAD;
519 p->thread.current_ds = USER_DS;
520
521 if (sp != regs->u_regs[UREG_FP]) {
522 struct sparc_stackf __user *childstack;
523 struct sparc_stackf __user *parentstack;
524
525 /*
526 * This is a clone() call with supplied user stack.
527 * Set some valid stack frames to give to the child.
528 */
529 childstack = (struct sparc_stackf __user *)
530 (sp & ~0x7UL);
531 parentstack = (struct sparc_stackf __user *)
532 regs->u_regs[UREG_FP];
533
534#if 0
535 printk("clone: parent stack:\n");
536 show_stackframe(parentstack);
537#endif
538
539 childstack = clone_stackframe(childstack, parentstack);
540 if (!childstack)
541 return -EFAULT;
542
543#if 0
544 printk("clone: child stack:\n");
545 show_stackframe(childstack);
546#endif
547
548 childregs->u_regs[UREG_FP] = (unsigned long)childstack;
549 }
550 }
551
552#ifdef CONFIG_SMP
553 /* FPU must be disabled on SMP. */
554 childregs->psr &= ~PSR_EF;
555#endif
556
557 /* Set the return value for the child. */
558 childregs->u_regs[UREG_I0] = current->pid;
559 childregs->u_regs[UREG_I1] = 1;
560
561 /* Set the return value for the parent. */
562 regs->u_regs[UREG_I1] = 0;
563
564 if (clone_flags & CLONE_SETTLS)
565 childregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
566
567 return 0;
568}
569
570/*
571 * fill in the user structure for a core dump..
572 */
573void dump_thread(struct pt_regs * regs, struct user * dump)
574{
575 unsigned long first_stack_page;
576
577 dump->magic = SUNOS_CORE_MAGIC;
578 dump->len = sizeof(struct user);
579 dump->regs.psr = regs->psr;
580 dump->regs.pc = regs->pc;
581 dump->regs.npc = regs->npc;
582 dump->regs.y = regs->y;
583 /* fuck me plenty */
584 memcpy(&dump->regs.regs[0], &regs->u_regs[1], (sizeof(unsigned long) * 15));
585 dump->uexec = current->thread.core_exec;
586 dump->u_tsize = (((unsigned long) current->mm->end_code) -
587 ((unsigned long) current->mm->start_code)) & ~(PAGE_SIZE - 1);
588 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1)));
589 dump->u_dsize -= dump->u_tsize;
590 dump->u_dsize &= ~(PAGE_SIZE - 1);
591 first_stack_page = (regs->u_regs[UREG_FP] & ~(PAGE_SIZE - 1));
592 dump->u_ssize = (TASK_SIZE - first_stack_page) & ~(PAGE_SIZE - 1);
593 memcpy(&dump->fpu.fpstatus.fregs.regs[0], &current->thread.float_regs[0], (sizeof(unsigned long) * 32));
594 dump->fpu.fpstatus.fsr = current->thread.fsr;
595 dump->fpu.fpstatus.flags = dump->fpu.fpstatus.extra = 0;
596 dump->fpu.fpstatus.fpq_count = current->thread.fpqdepth;
597 memcpy(&dump->fpu.fpstatus.fpq[0], &current->thread.fpqueue[0],
598 ((sizeof(unsigned long) * 2) * 16));
599 dump->sigcode = 0;
600}
601
602/*
603 * fill in the fpu structure for a core dump.
604 */
605int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
606{
607 if (used_math()) {
608 memset(fpregs, 0, sizeof(*fpregs));
609 fpregs->pr_q_entrysize = 8;
610 return 1;
611 }
612#ifdef CONFIG_SMP
613 if (current_thread_info()->flags & _TIF_USEDFPU) {
614 put_psr(get_psr() | PSR_EF);
615 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
616 &current->thread.fpqueue[0], &current->thread.fpqdepth);
617 if (regs != NULL) {
618 regs->psr &= ~(PSR_EF);
619 current_thread_info()->flags &= ~(_TIF_USEDFPU);
620 }
621 }
622#else
623 if (current == last_task_used_math) {
624 put_psr(get_psr() | PSR_EF);
625 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
626 &current->thread.fpqueue[0], &current->thread.fpqdepth);
627 if (regs != NULL) {
628 regs->psr &= ~(PSR_EF);
629 last_task_used_math = NULL;
630 }
631 }
632#endif
633 memcpy(&fpregs->pr_fr.pr_regs[0],
634 &current->thread.float_regs[0],
635 (sizeof(unsigned long) * 32));
636 fpregs->pr_fsr = current->thread.fsr;
637 fpregs->pr_qcnt = current->thread.fpqdepth;
638 fpregs->pr_q_entrysize = 8;
639 fpregs->pr_en = 1;
640 if(fpregs->pr_qcnt != 0) {
641 memcpy(&fpregs->pr_q[0],
642 &current->thread.fpqueue[0],
643 sizeof(struct fpq) * fpregs->pr_qcnt);
644 }
645 /* Zero out the rest. */
646 memset(&fpregs->pr_q[fpregs->pr_qcnt], 0,
647 sizeof(struct fpq) * (32 - fpregs->pr_qcnt));
648 return 1;
649}
650
651/*
652 * sparc_execve() executes a new program after the asm stub has set
653 * things up for us. This should basically do what I want it to.
654 */
655asmlinkage int sparc_execve(struct pt_regs *regs)
656{
657 int error, base = 0;
658 char *filename;
659
660 /* Check for indirect call. */
661 if(regs->u_regs[UREG_G1] == 0)
662 base = 1;
663
664 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
665 error = PTR_ERR(filename);
666 if(IS_ERR(filename))
667 goto out;
668 error = do_execve(filename,
669 (char __user * __user *)regs->u_regs[base + UREG_I1],
670 (char __user * __user *)regs->u_regs[base + UREG_I2],
671 regs);
672 putname(filename);
673 if (error == 0) {
674 task_lock(current);
675 current->ptrace &= ~PT_DTRACE;
676 task_unlock(current);
677 }
678out:
679 return error;
680}
681
682/*
683 * This is the mechanism for creating a new kernel thread.
684 *
685 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
686 * who haven't done an "execve()") should use this: it will work within
687 * a system call from a "real" process, but the process memory space will
688 * not be free'd until both the parent and the child have exited.
689 */
690pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
691{
692 long retval;
693
694 __asm__ __volatile__("mov %4, %%g2\n\t" /* Set aside fn ptr... */
695 "mov %5, %%g3\n\t" /* and arg. */
696 "mov %1, %%g1\n\t"
697 "mov %2, %%o0\n\t" /* Clone flags. */
698 "mov 0, %%o1\n\t" /* usp arg == 0 */
699 "t 0x10\n\t" /* Linux/Sparc clone(). */
700 "cmp %%o1, 0\n\t"
701 "be 1f\n\t" /* The parent, just return. */
702 " nop\n\t" /* Delay slot. */
703 "jmpl %%g2, %%o7\n\t" /* Call the function. */
704 " mov %%g3, %%o0\n\t" /* Get back the arg in delay. */
705 "mov %3, %%g1\n\t"
706 "t 0x10\n\t" /* Linux/Sparc exit(). */
707 /* Notreached by child. */
708 "1: mov %%o0, %0\n\t" :
709 "=r" (retval) :
710 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
711 "i" (__NR_exit), "r" (fn), "r" (arg) :
712 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
713 return retval;
714}
715
716unsigned long get_wchan(struct task_struct *task)
717{
718 unsigned long pc, fp, bias = 0;
719 unsigned long task_base = (unsigned long) task;
720 unsigned long ret = 0;
721 struct reg_window *rw;
722 int count = 0;
723
724 if (!task || task == current ||
725 task->state == TASK_RUNNING)
726 goto out;
727
728 fp = task->thread_info->ksp + bias;
729 do {
730 /* Bogus frame pointer? */
731 if (fp < (task_base + sizeof(struct thread_info)) ||
732 fp >= (task_base + (2 * PAGE_SIZE)))
733 break;
734 rw = (struct reg_window *) fp;
735 pc = rw->ins[7];
736 if (!in_sched_functions(pc)) {
737 ret = pc;
738 goto out;
739 }
740 fp = rw->ins[6] + bias;
741 } while (++count < 16);
742
743out:
744 return ret;
745}
746
diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c
new file mode 100644
index 000000000000..fc4ad69357b8
--- /dev/null
+++ b/arch/sparc/kernel/ptrace.c
@@ -0,0 +1,632 @@
1/* ptrace.c: Sparc process tracing support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
4 *
5 * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
6 * and David Mosberger.
7 *
8 * Added Linux support -miguel (weird, eh?, the orignal code was meant
9 * to emulate SunOS).
10 */
11
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/user.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/security.h>
21
22#include <asm/pgtable.h>
23#include <asm/system.h>
24#include <asm/uaccess.h>
25
26#define MAGIC_CONSTANT 0x80000000
27
28
29/* Returning from ptrace is a bit tricky because the syscall return
30 * low level code assumes any value returned which is negative and
31 * is a valid errno will mean setting the condition codes to indicate
32 * an error return. This doesn't work, so we have this hook.
33 */
34static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
35{
36 regs->u_regs[UREG_I0] = error;
37 regs->psr |= PSR_C;
38 regs->pc = regs->npc;
39 regs->npc += 4;
40}
41
42static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
43{
44 regs->u_regs[UREG_I0] = value;
45 regs->psr &= ~PSR_C;
46 regs->pc = regs->npc;
47 regs->npc += 4;
48}
49
50static void
51pt_succ_return_linux(struct pt_regs *regs, unsigned long value, long __user *addr)
52{
53 if (put_user(value, addr)) {
54 pt_error_return(regs, EFAULT);
55 return;
56 }
57 regs->u_regs[UREG_I0] = 0;
58 regs->psr &= ~PSR_C;
59 regs->pc = regs->npc;
60 regs->npc += 4;
61}
62
63static void
64pt_os_succ_return (struct pt_regs *regs, unsigned long val, long __user *addr)
65{
66 if (current->personality == PER_SUNOS)
67 pt_succ_return (regs, val);
68 else
69 pt_succ_return_linux (regs, val, addr);
70}
71
72/* Fuck me gently with a chainsaw... */
73static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset,
74 struct task_struct *tsk, long __user *addr)
75{
76 struct pt_regs *cregs = tsk->thread.kregs;
77 struct thread_info *t = tsk->thread_info;
78 int v;
79
80 if(offset >= 1024)
81 offset -= 1024; /* whee... */
82 if(offset & ((sizeof(unsigned long) - 1))) {
83 pt_error_return(regs, EIO);
84 return;
85 }
86 if(offset >= 16 && offset < 784) {
87 offset -= 16; offset >>= 2;
88 pt_os_succ_return(regs, *(((unsigned long *)(&t->reg_window[0]))+offset), addr);
89 return;
90 }
91 if(offset >= 784 && offset < 832) {
92 offset -= 784; offset >>= 2;
93 pt_os_succ_return(regs, *(((unsigned long *)(&t->rwbuf_stkptrs[0]))+offset), addr);
94 return;
95 }
96 switch(offset) {
97 case 0:
98 v = t->ksp;
99 break;
100 case 4:
101 v = t->kpc;
102 break;
103 case 8:
104 v = t->kpsr;
105 break;
106 case 12:
107 v = t->uwinmask;
108 break;
109 case 832:
110 v = t->w_saved;
111 break;
112 case 896:
113 v = cregs->u_regs[UREG_I0];
114 break;
115 case 900:
116 v = cregs->u_regs[UREG_I1];
117 break;
118 case 904:
119 v = cregs->u_regs[UREG_I2];
120 break;
121 case 908:
122 v = cregs->u_regs[UREG_I3];
123 break;
124 case 912:
125 v = cregs->u_regs[UREG_I4];
126 break;
127 case 916:
128 v = cregs->u_regs[UREG_I5];
129 break;
130 case 920:
131 v = cregs->u_regs[UREG_I6];
132 break;
133 case 924:
134 if(tsk->thread.flags & MAGIC_CONSTANT)
135 v = cregs->u_regs[UREG_G1];
136 else
137 v = 0;
138 break;
139 case 940:
140 v = cregs->u_regs[UREG_I0];
141 break;
142 case 944:
143 v = cregs->u_regs[UREG_I1];
144 break;
145
146 case 948:
147 /* Isn't binary compatibility _fun_??? */
148 if(cregs->psr & PSR_C)
149 v = cregs->u_regs[UREG_I0] << 24;
150 else
151 v = 0;
152 break;
153
154 /* Rest of them are completely unsupported. */
155 default:
156 printk("%s [%d]: Wants to read user offset %ld\n",
157 current->comm, current->pid, offset);
158 pt_error_return(regs, EIO);
159 return;
160 }
161 if (current->personality == PER_SUNOS)
162 pt_succ_return (regs, v);
163 else
164 pt_succ_return_linux (regs, v, addr);
165 return;
166}
167
168static inline void write_sunos_user(struct pt_regs *regs, unsigned long offset,
169 struct task_struct *tsk)
170{
171 struct pt_regs *cregs = tsk->thread.kregs;
172 struct thread_info *t = tsk->thread_info;
173 unsigned long value = regs->u_regs[UREG_I3];
174
175 if(offset >= 1024)
176 offset -= 1024; /* whee... */
177 if(offset & ((sizeof(unsigned long) - 1)))
178 goto failure;
179 if(offset >= 16 && offset < 784) {
180 offset -= 16; offset >>= 2;
181 *(((unsigned long *)(&t->reg_window[0]))+offset) = value;
182 goto success;
183 }
184 if(offset >= 784 && offset < 832) {
185 offset -= 784; offset >>= 2;
186 *(((unsigned long *)(&t->rwbuf_stkptrs[0]))+offset) = value;
187 goto success;
188 }
189 switch(offset) {
190 case 896:
191 cregs->u_regs[UREG_I0] = value;
192 break;
193 case 900:
194 cregs->u_regs[UREG_I1] = value;
195 break;
196 case 904:
197 cregs->u_regs[UREG_I2] = value;
198 break;
199 case 908:
200 cregs->u_regs[UREG_I3] = value;
201 break;
202 case 912:
203 cregs->u_regs[UREG_I4] = value;
204 break;
205 case 916:
206 cregs->u_regs[UREG_I5] = value;
207 break;
208 case 920:
209 cregs->u_regs[UREG_I6] = value;
210 break;
211 case 924:
212 cregs->u_regs[UREG_I7] = value;
213 break;
214 case 940:
215 cregs->u_regs[UREG_I0] = value;
216 break;
217 case 944:
218 cregs->u_regs[UREG_I1] = value;
219 break;
220
221 /* Rest of them are completely unsupported or "no-touch". */
222 default:
223 printk("%s [%d]: Wants to write user offset %ld\n",
224 current->comm, current->pid, offset);
225 goto failure;
226 }
227success:
228 pt_succ_return(regs, 0);
229 return;
230failure:
231 pt_error_return(regs, EIO);
232 return;
233}
234
235/* #define ALLOW_INIT_TRACING */
236/* #define DEBUG_PTRACE */
237
238#ifdef DEBUG_PTRACE
239char *pt_rq [] = {
240 /* 0 */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
241 /* 4 */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
242 /* 8 */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
243 /* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
244 /* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
245 /* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
246 /* 24 */ "SYSCALL", ""
247};
248#endif
249
250/*
251 * Called by kernel/ptrace.c when detaching..
252 *
253 * Make sure single step bits etc are not set.
254 */
255void ptrace_disable(struct task_struct *child)
256{
257 /* nothing to do */
258}
259
260asmlinkage void do_ptrace(struct pt_regs *regs)
261{
262 unsigned long request = regs->u_regs[UREG_I0];
263 unsigned long pid = regs->u_regs[UREG_I1];
264 unsigned long addr = regs->u_regs[UREG_I2];
265 unsigned long data = regs->u_regs[UREG_I3];
266 unsigned long addr2 = regs->u_regs[UREG_I4];
267 struct task_struct *child;
268 int ret;
269
270 lock_kernel();
271#ifdef DEBUG_PTRACE
272 {
273 char *s;
274
275 if ((request >= 0) && (request <= 24))
276 s = pt_rq [request];
277 else
278 s = "unknown";
279
280 if (request == PTRACE_POKEDATA && data == 0x91d02001){
281 printk ("do_ptrace: breakpoint pid=%d, addr=%08lx addr2=%08lx\n",
282 pid, addr, addr2);
283 } else
284 printk("do_ptrace: rq=%s(%d) pid=%d addr=%08lx data=%08lx addr2=%08lx\n",
285 s, (int) request, (int) pid, addr, data, addr2);
286 }
287#endif
288 if (request == PTRACE_TRACEME) {
289 int my_ret;
290
291 /* are we already being traced? */
292 if (current->ptrace & PT_PTRACED) {
293 pt_error_return(regs, EPERM);
294 goto out;
295 }
296 my_ret = security_ptrace(current->parent, current);
297 if (my_ret) {
298 pt_error_return(regs, -my_ret);
299 goto out;
300 }
301
302 /* set the ptrace bit in the process flags. */
303 current->ptrace |= PT_PTRACED;
304 pt_succ_return(regs, 0);
305 goto out;
306 }
307#ifndef ALLOW_INIT_TRACING
308 if (pid == 1) {
309 /* Can't dork with init. */
310 pt_error_return(regs, EPERM);
311 goto out;
312 }
313#endif
314 read_lock(&tasklist_lock);
315 child = find_task_by_pid(pid);
316 if (child)
317 get_task_struct(child);
318 read_unlock(&tasklist_lock);
319
320 if (!child) {
321 pt_error_return(regs, ESRCH);
322 goto out;
323 }
324
325 if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
326 || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
327 if (ptrace_attach(child)) {
328 pt_error_return(regs, EPERM);
329 goto out_tsk;
330 }
331 pt_succ_return(regs, 0);
332 goto out_tsk;
333 }
334
335 ret = ptrace_check_attach(child, request == PTRACE_KILL);
336 if (ret < 0) {
337 pt_error_return(regs, -ret);
338 goto out_tsk;
339 }
340
341 switch(request) {
342 case PTRACE_PEEKTEXT: /* read word at location addr. */
343 case PTRACE_PEEKDATA: {
344 unsigned long tmp;
345
346 if (access_process_vm(child, addr,
347 &tmp, sizeof(tmp), 0) == sizeof(tmp))
348 pt_os_succ_return(regs, tmp, (long __user *)data);
349 else
350 pt_error_return(regs, EIO);
351 goto out_tsk;
352 }
353
354 case PTRACE_PEEKUSR:
355 read_sunos_user(regs, addr, child, (long __user *) data);
356 goto out_tsk;
357
358 case PTRACE_POKEUSR:
359 write_sunos_user(regs, addr, child);
360 goto out_tsk;
361
362 case PTRACE_POKETEXT: /* write the word at location addr. */
363 case PTRACE_POKEDATA: {
364 if (access_process_vm(child, addr,
365 &data, sizeof(data), 1) == sizeof(data))
366 pt_succ_return(regs, 0);
367 else
368 pt_error_return(regs, EIO);
369 goto out_tsk;
370 }
371
372 case PTRACE_GETREGS: {
373 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
374 struct pt_regs *cregs = child->thread.kregs;
375 int rval;
376
377 if (!access_ok(VERIFY_WRITE, pregs, sizeof(struct pt_regs))) {
378 rval = -EFAULT;
379 pt_error_return(regs, -rval);
380 goto out_tsk;
381 }
382 __put_user(cregs->psr, (&pregs->psr));
383 __put_user(cregs->pc, (&pregs->pc));
384 __put_user(cregs->npc, (&pregs->npc));
385 __put_user(cregs->y, (&pregs->y));
386 for(rval = 1; rval < 16; rval++)
387 __put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]));
388 pt_succ_return(regs, 0);
389#ifdef DEBUG_PTRACE
390 printk ("PC=%x nPC=%x o7=%x\n", cregs->pc, cregs->npc, cregs->u_regs [15]);
391#endif
392 goto out_tsk;
393 }
394
395 case PTRACE_SETREGS: {
396 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
397 struct pt_regs *cregs = child->thread.kregs;
398 unsigned long psr, pc, npc, y;
399 int i;
400
401 /* Must be careful, tracing process can only set certain
402 * bits in the psr.
403 */
404 if (!access_ok(VERIFY_READ, pregs, sizeof(struct pt_regs))) {
405 pt_error_return(regs, EFAULT);
406 goto out_tsk;
407 }
408 __get_user(psr, (&pregs->psr));
409 __get_user(pc, (&pregs->pc));
410 __get_user(npc, (&pregs->npc));
411 __get_user(y, (&pregs->y));
412 psr &= PSR_ICC;
413 cregs->psr &= ~PSR_ICC;
414 cregs->psr |= psr;
415 if (!((pc | npc) & 3)) {
416 cregs->pc = pc;
417 cregs->npc =npc;
418 }
419 cregs->y = y;
420 for(i = 1; i < 16; i++)
421 __get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]));
422 pt_succ_return(regs, 0);
423 goto out_tsk;
424 }
425
426 case PTRACE_GETFPREGS: {
427 struct fps {
428 unsigned long regs[32];
429 unsigned long fsr;
430 unsigned long flags;
431 unsigned long extra;
432 unsigned long fpqd;
433 struct fq {
434 unsigned long *insnaddr;
435 unsigned long insn;
436 } fpq[16];
437 };
438 struct fps __user *fps = (struct fps __user *) addr;
439 int i;
440
441 if (!access_ok(VERIFY_WRITE, fps, sizeof(struct fps))) {
442 i = -EFAULT;
443 pt_error_return(regs, -i);
444 goto out_tsk;
445 }
446 for(i = 0; i < 32; i++)
447 __put_user(child->thread.float_regs[i], (&fps->regs[i]));
448 __put_user(child->thread.fsr, (&fps->fsr));
449 __put_user(child->thread.fpqdepth, (&fps->fpqd));
450 __put_user(0, (&fps->flags));
451 __put_user(0, (&fps->extra));
452 for(i = 0; i < 16; i++) {
453 __put_user(child->thread.fpqueue[i].insn_addr,
454 (&fps->fpq[i].insnaddr));
455 __put_user(child->thread.fpqueue[i].insn, (&fps->fpq[i].insn));
456 }
457 pt_succ_return(regs, 0);
458 goto out_tsk;
459 }
460
461 case PTRACE_SETFPREGS: {
462 struct fps {
463 unsigned long regs[32];
464 unsigned long fsr;
465 unsigned long flags;
466 unsigned long extra;
467 unsigned long fpqd;
468 struct fq {
469 unsigned long *insnaddr;
470 unsigned long insn;
471 } fpq[16];
472 };
473 struct fps __user *fps = (struct fps __user *) addr;
474 int i;
475
476 if (!access_ok(VERIFY_READ, fps, sizeof(struct fps))) {
477 i = -EFAULT;
478 pt_error_return(regs, -i);
479 goto out_tsk;
480 }
481 copy_from_user(&child->thread.float_regs[0], &fps->regs[0], (32 * sizeof(unsigned long)));
482 __get_user(child->thread.fsr, (&fps->fsr));
483 __get_user(child->thread.fpqdepth, (&fps->fpqd));
484 for(i = 0; i < 16; i++) {
485 __get_user(child->thread.fpqueue[i].insn_addr,
486 (&fps->fpq[i].insnaddr));
487 __get_user(child->thread.fpqueue[i].insn, (&fps->fpq[i].insn));
488 }
489 pt_succ_return(regs, 0);
490 goto out_tsk;
491 }
492
493 case PTRACE_READTEXT:
494 case PTRACE_READDATA: {
495 int res = ptrace_readdata(child, addr,
496 (void __user *) addr2, data);
497
498 if (res == data) {
499 pt_succ_return(regs, 0);
500 goto out_tsk;
501 }
502 /* Partial read is an IO failure */
503 if (res >= 0)
504 res = -EIO;
505 pt_error_return(regs, -res);
506 goto out_tsk;
507 }
508
509 case PTRACE_WRITETEXT:
510 case PTRACE_WRITEDATA: {
511 int res = ptrace_writedata(child, (void __user *) addr2,
512 addr, data);
513
514 if (res == data) {
515 pt_succ_return(regs, 0);
516 goto out_tsk;
517 }
518 /* Partial write is an IO failure */
519 if (res >= 0)
520 res = -EIO;
521 pt_error_return(regs, -res);
522 goto out_tsk;
523 }
524
525 case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
526 addr = 1;
527
528 case PTRACE_CONT: { /* restart after signal. */
529 if (data > _NSIG) {
530 pt_error_return(regs, EIO);
531 goto out_tsk;
532 }
533 if (addr != 1) {
534 if (addr & 3) {
535 pt_error_return(regs, EINVAL);
536 goto out_tsk;
537 }
538#ifdef DEBUG_PTRACE
539 printk ("Original: %08lx %08lx\n", child->thread.kregs->pc, child->thread.kregs->npc);
540 printk ("Continuing with %08lx %08lx\n", addr, addr+4);
541#endif
542 child->thread.kregs->pc = addr;
543 child->thread.kregs->npc = addr + 4;
544 }
545
546 if (request == PTRACE_SYSCALL)
547 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
548 else
549 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
550
551 child->exit_code = data;
552#ifdef DEBUG_PTRACE
553 printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n",
554 child->comm, child->pid, child->exit_code,
555 child->thread.kregs->pc,
556 child->thread.kregs->npc);
557#endif
558 wake_up_process(child);
559 pt_succ_return(regs, 0);
560 goto out_tsk;
561 }
562
563/*
564 * make the child exit. Best I can do is send it a sigkill.
565 * perhaps it should be put in the status that it wants to
566 * exit.
567 */
568 case PTRACE_KILL: {
569 if (child->exit_state == EXIT_ZOMBIE) { /* already dead */
570 pt_succ_return(regs, 0);
571 goto out_tsk;
572 }
573 wake_up_process(child);
574 child->exit_code = SIGKILL;
575 pt_succ_return(regs, 0);
576 goto out_tsk;
577 }
578
579 case PTRACE_SUNDETACH: { /* detach a process that was attached. */
580 int err = ptrace_detach(child, data);
581 if (err) {
582 pt_error_return(regs, EIO);
583 goto out_tsk;
584 }
585 pt_succ_return(regs, 0);
586 goto out_tsk;
587 }
588
589 /* PTRACE_DUMPCORE unsupported... */
590
591 default: {
592 int err = ptrace_request(child, request, addr, data);
593 if (err)
594 pt_error_return(regs, -err);
595 else
596 pt_succ_return(regs, 0);
597 goto out_tsk;
598 }
599 }
600out_tsk:
601 if (child)
602 put_task_struct(child);
603out:
604 unlock_kernel();
605}
606
607asmlinkage void syscall_trace(void)
608{
609#ifdef DEBUG_PTRACE
610 printk("%s [%d]: syscall_trace\n", current->comm, current->pid);
611#endif
612 if (!test_thread_flag(TIF_SYSCALL_TRACE))
613 return;
614 if (!(current->ptrace & PT_PTRACED))
615 return;
616 current->thread.flags ^= MAGIC_CONSTANT;
617 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
618 ? 0x80 : 0));
619 /*
620 * this isn't the same as continuing with a signal, but it will do
621 * for normal use. strace only continues with a signal if the
622 * stopping signal is not SIGTRAP. -brl
623 */
624#ifdef DEBUG_PTRACE
625 printk("%s [%d]: syscall_trace exit= %x\n", current->comm,
626 current->pid, current->exit_code);
627#endif
628 if (current->exit_code) {
629 send_sig (current->exit_code, current, 1);
630 current->exit_code = 0;
631 }
632}
diff --git a/arch/sparc/kernel/rtrap.S b/arch/sparc/kernel/rtrap.S
new file mode 100644
index 000000000000..f7460d897e79
--- /dev/null
+++ b/arch/sparc/kernel/rtrap.S
@@ -0,0 +1,319 @@
1/* $Id: rtrap.S,v 1.58 2002/01/31 03:30:05 davem Exp $
2 * rtrap.S: Return from Sparc trap low-level code.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/page.h>
8#include <asm/ptrace.h>
9#include <asm/psr.h>
10#include <asm/asi.h>
11#include <asm/smp.h>
12#include <asm/contregs.h>
13#include <asm/winmacro.h>
14#include <asm/asmmacro.h>
15#include <asm/thread_info.h>
16
17#define t_psr l0
18#define t_pc l1
19#define t_npc l2
20#define t_wim l3
21#define twin_tmp1 l4
22#define glob_tmp g4
23#define curptr g6
24
25 /* 7 WINDOW SPARC PATCH INSTRUCTIONS */
26 .globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
27 .globl rtrap_7win_patch4, rtrap_7win_patch5
28rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp
29rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp
30rtrap_7win_patch3: srl %g1, 7, %g2
31rtrap_7win_patch4: srl %g2, 6, %g2
32rtrap_7win_patch5: and %g1, 0x7f, %g1
33 /* END OF PATCH INSTRUCTIONS */
34
35 /* We need to check for a few things which are:
36 * 1) The need to call schedule() because this
37 * processes quantum is up.
38 * 2) Pending signals for this process, if any
39 * exist we need to call do_signal() to do
40 * the needy.
41 *
42 * Else we just check if the rett would land us
43 * in an invalid window, if so we need to grab
44 * it off the user/kernel stack first.
45 */
46
47 .globl ret_trap_entry, rtrap_patch1, rtrap_patch2
48 .globl rtrap_patch3, rtrap_patch4, rtrap_patch5
49 .globl ret_trap_lockless_ipi
50ret_trap_entry:
51ret_trap_lockless_ipi:
52 andcc %t_psr, PSR_PS, %g0
53 be 1f
54 nop
55
56 wr %t_psr, 0x0, %psr
57 b ret_trap_kernel
58 nop
59
601:
61 ld [%curptr + TI_FLAGS], %g2
62 andcc %g2, (_TIF_NEED_RESCHED), %g0
63 be signal_p
64 nop
65
66 call schedule
67 nop
68
69 ld [%curptr + TI_FLAGS], %g2
70signal_p:
71 andcc %g2, (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING), %g0
72 bz,a ret_trap_continue
73 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
74
75 clr %o0
76 mov %l5, %o2
77 mov %l6, %o3
78 call do_signal
79 add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
80
81 /* Fall through. */
82 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
83 clr %l6
84ret_trap_continue:
85 wr %t_psr, 0x0, %psr
86 WRITE_PAUSE
87
88 ld [%curptr + TI_W_SAVED], %twin_tmp1
89 orcc %g0, %twin_tmp1, %g0
90 be ret_trap_nobufwins
91 nop
92
93 wr %t_psr, PSR_ET, %psr
94 WRITE_PAUSE
95
96 mov 1, %o1
97 call try_to_clear_window_buffer
98 add %sp, STACKFRAME_SZ, %o0
99
100 b signal_p
101 ld [%curptr + TI_FLAGS], %g2
102
103ret_trap_nobufwins:
104 /* Load up the user's out registers so we can pull
105 * a window from the stack, if necessary.
106 */
107 LOAD_PT_INS(sp)
108
109 /* If there are already live user windows in the
110 * set we can return from trap safely.
111 */
112 ld [%curptr + TI_UWINMASK], %twin_tmp1
113 orcc %g0, %twin_tmp1, %g0
114 bne ret_trap_userwins_ok
115 nop
116
117 /* Calculate new %wim, we have to pull a register
118 * window from the users stack.
119 */
120ret_trap_pull_one_window:
121 rd %wim, %t_wim
122 sll %t_wim, 0x1, %twin_tmp1
123rtrap_patch1: srl %t_wim, 0x7, %glob_tmp
124 or %glob_tmp, %twin_tmp1, %glob_tmp
125rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
126
127 wr %glob_tmp, 0x0, %wim
128
129 /* Here comes the architecture specific
130 * branch to the user stack checking routine
131 * for return from traps.
132 */
133 .globl rtrap_mmu_patchme
134rtrap_mmu_patchme: b sun4c_rett_stackchk
135 andcc %fp, 0x7, %g0
136
137ret_trap_userwins_ok:
138 LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
139 or %t_pc, %t_npc, %g2
140 andcc %g2, 0x3, %g0
141 be 1f
142 nop
143
144 b ret_trap_unaligned_pc
145 add %sp, STACKFRAME_SZ, %o0
146
1471:
148 LOAD_PT_YREG(sp, g1)
149 LOAD_PT_GLOBALS(sp)
150
151 wr %t_psr, 0x0, %psr
152 WRITE_PAUSE
153
154 jmp %t_pc
155 rett %t_npc
156
157ret_trap_unaligned_pc:
158 ld [%sp + STACKFRAME_SZ + PT_PC], %o1
159 ld [%sp + STACKFRAME_SZ + PT_NPC], %o2
160 ld [%sp + STACKFRAME_SZ + PT_PSR], %o3
161
162 wr %t_wim, 0x0, %wim ! or else...
163
164 wr %t_psr, PSR_ET, %psr
165 WRITE_PAUSE
166
167 call do_memaccess_unaligned
168 nop
169
170 b signal_p
171 ld [%curptr + TI_FLAGS], %g2
172
173ret_trap_kernel:
174 /* Will the rett land us in the invalid window? */
175 mov 2, %g1
176 sll %g1, %t_psr, %g1
177rtrap_patch3: srl %g1, 8, %g2
178 or %g1, %g2, %g1
179 rd %wim, %g2
180 andcc %g2, %g1, %g0
181 be 1f ! Nope, just return from the trap
182 sll %g2, 0x1, %g1
183
184 /* We have to grab a window before returning. */
185rtrap_patch4: srl %g2, 7, %g2
186 or %g1, %g2, %g1
187rtrap_patch5: and %g1, 0xff, %g1
188
189 wr %g1, 0x0, %wim
190
191 /* Grrr, make sure we load from the right %sp... */
192 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
193
194 restore %g0, %g0, %g0
195 LOAD_WINDOW(sp)
196 b 2f
197 save %g0, %g0, %g0
198
199 /* Reload the entire frame in case this is from a
200 * kernel system call or whatever...
201 */
2021:
203 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
2042:
205 wr %t_psr, 0x0, %psr
206 WRITE_PAUSE
207
208 jmp %t_pc
209 rett %t_npc
210
211ret_trap_user_stack_is_bolixed:
212 wr %t_wim, 0x0, %wim
213
214 wr %t_psr, PSR_ET, %psr
215 WRITE_PAUSE
216
217 call window_ret_fault
218 add %sp, STACKFRAME_SZ, %o0
219
220 b signal_p
221 ld [%curptr + TI_FLAGS], %g2
222
223
224 .globl sun4c_rett_stackchk
225sun4c_rett_stackchk:
226 be 1f
227 and %fp, 0xfff, %g1 ! delay slot
228
229 b ret_trap_user_stack_is_bolixed + 0x4
230 wr %t_wim, 0x0, %wim
231
232 /* See if we have to check the sanity of one page or two */
2331:
234 add %g1, 0x38, %g1
235 sra %fp, 29, %g2
236 add %g2, 0x1, %g2
237 andncc %g2, 0x1, %g0
238 be 1f
239 andncc %g1, 0xff8, %g0
240
241 /* %sp is in vma hole, yuck */
242 b ret_trap_user_stack_is_bolixed + 0x4
243 wr %t_wim, 0x0, %wim
244
2451:
246 be sun4c_rett_onepage /* Only one page to check */
247 lda [%fp] ASI_PTE, %g2
248
249sun4c_rett_twopages:
250 add %fp, 0x38, %g1
251 sra %g1, 29, %g2
252 add %g2, 0x1, %g2
253 andncc %g2, 0x1, %g0
254 be 1f
255 lda [%g1] ASI_PTE, %g2
256
257 /* Second page is in vma hole */
258 b ret_trap_user_stack_is_bolixed + 0x4
259 wr %t_wim, 0x0, %wim
260
2611:
262 srl %g2, 29, %g2
263 andcc %g2, 0x4, %g0
264 bne sun4c_rett_onepage
265 lda [%fp] ASI_PTE, %g2
266
267 /* Second page has bad perms */
268 b ret_trap_user_stack_is_bolixed + 0x4
269 wr %t_wim, 0x0, %wim
270
271sun4c_rett_onepage:
272 srl %g2, 29, %g2
273 andcc %g2, 0x4, %g0
274 bne,a 1f
275 restore %g0, %g0, %g0
276
277 /* A page had bad page permissions, losing... */
278 b ret_trap_user_stack_is_bolixed + 0x4
279 wr %t_wim, 0x0, %wim
280
281 /* Whee, things are ok, load the window and continue. */
2821:
283 LOAD_WINDOW(sp)
284
285 b ret_trap_userwins_ok
286 save %g0, %g0, %g0
287
288 .globl srmmu_rett_stackchk
289srmmu_rett_stackchk:
290 bne ret_trap_user_stack_is_bolixed
291 sethi %hi(PAGE_OFFSET), %g1
292 cmp %g1, %fp
293 bleu ret_trap_user_stack_is_bolixed
294 mov AC_M_SFSR, %g1
295 lda [%g1] ASI_M_MMUREGS, %g0
296
297 lda [%g0] ASI_M_MMUREGS, %g1
298 or %g1, 0x2, %g1
299 sta %g1, [%g0] ASI_M_MMUREGS
300
301 restore %g0, %g0, %g0
302
303 LOAD_WINDOW(sp)
304
305 save %g0, %g0, %g0
306
307 andn %g1, 0x2, %g1
308 sta %g1, [%g0] ASI_M_MMUREGS
309
310 mov AC_M_SFAR, %g2
311 lda [%g2] ASI_M_MMUREGS, %g2
312
313 mov AC_M_SFSR, %g1
314 lda [%g1] ASI_M_MMUREGS, %g1
315 andcc %g1, 0x2, %g0
316 be ret_trap_userwins_ok
317 nop
318
319 b,a ret_trap_user_stack_is_bolixed
diff --git a/arch/sparc/kernel/sclow.S b/arch/sparc/kernel/sclow.S
new file mode 100644
index 000000000000..3a867fc19927
--- /dev/null
+++ b/arch/sparc/kernel/sclow.S
@@ -0,0 +1,86 @@
1/* sclow.S: Low level special syscall handling.
2 * Basically these are cases where we can completely
3 * handle the system call without saving any state
4 * because we know that the process will not sleep.
5 *
6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 */
8
9#include <asm/ptrace.h>
10#include <asm/asm_offsets.h>
11#include <asm/errno.h>
12#include <asm/winmacro.h>
13#include <asm/thread_info.h>
14#include <asm/psr.h>
15#include <asm/page.h>
16
17#define CC_AND_RETT \
18 set PSR_C, %l4; \
19 andn %l0, %l4, %l4; \
20 wr %l4, 0x0, %psr; \
21 nop; nop; nop; \
22 jmp %l2; \
23 rett %l2 + 4;
24
25#define SC_AND_RETT \
26 set PSR_C, %l4; \
27 or %l0, %l4, %l4; \
28 wr %l4, 0x0, %psr; \
29 nop; nop; nop; \
30 jmp %l2; \
31 rett %l2 + 4;
32
33#define LABEL(func) func##_low
34
35 .globl LABEL(sunosnop)
36LABEL(sunosnop):
37 CC_AND_RETT
38
39#if (ASIZ_task_uid == 2 && ASIZ_task_euid == 2)
40 .globl LABEL(sunosgetuid)
41LABEL(sunosgetuid):
42 LOAD_CURRENT(l4, l5)
43 ld [%l4 + TI_TASK], %l4
44 lduh [%l4 + AOFF_task_uid], %i0
45 lduh [%l4 + AOFF_task_euid], %i1
46 CC_AND_RETT
47#endif
48
49#if (ASIZ_task_gid == 2 && ASIZ_task_egid == 2)
50 .globl LABEL(sunosgetgid)
51LABEL(sunosgetgid):
52 LOAD_CURRENT(l4, l5)
53 ld [%l4 + TI_TASK], %l4
54 lduh [%l4 + AOFF_task_gid], %i0
55 lduh [%l4 + AOFF_task_egid], %i1
56 CC_AND_RETT
57#endif
58
59 .globl LABEL(sunosmctl)
60LABEL(sunosmctl):
61 mov 0, %i0
62 CC_AND_RETT
63
64 .globl LABEL(sunosgdtsize)
65LABEL(sunosgdtsize):
66 mov 256, %i0
67 CC_AND_RETT
68
69 .globl LABEL(getpagesize)
70LABEL(getpagesize):
71 set PAGE_SIZE, %i0
72 CC_AND_RETT
73
74 /* XXX sys_nice() XXX */
75 /* XXX sys_setpriority() XXX */
76 /* XXX sys_getpriority() XXX */
77 /* XXX sys_setregid() XXX */
78 /* XXX sys_setgid() XXX */
79 /* XXX sys_setreuid() XXX */
80 /* XXX sys_setuid() XXX */
81 /* XXX sys_setfsuid() XXX */
82 /* XXX sys_setfsgid() XXX */
83 /* XXX sys_setpgid() XXX */
84 /* XXX sys_getpgid() XXX */
85 /* XXX sys_setsid() XXX */
86 /* XXX sys_getsid() XXX */
diff --git a/arch/sparc/kernel/semaphore.c b/arch/sparc/kernel/semaphore.c
new file mode 100644
index 000000000000..0c37c1a7cd7e
--- /dev/null
+++ b/arch/sparc/kernel/semaphore.c
@@ -0,0 +1,155 @@
1/* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */
2
3/* sparc32 semaphore implementation, based on i386 version */
4
5#include <linux/sched.h>
6#include <linux/errno.h>
7#include <linux/init.h>
8
9#include <asm/semaphore.h>
10
11/*
12 * Semaphores are implemented using a two-way counter:
13 * The "count" variable is decremented for each process
14 * that tries to acquire the semaphore, while the "sleeping"
15 * variable is a count of such acquires.
16 *
17 * Notably, the inline "up()" and "down()" functions can
18 * efficiently test if they need to do any extra work (up
19 * needs to do something only if count was negative before
20 * the increment operation.
21 *
22 * "sleeping" and the contention routine ordering is
23 * protected by the semaphore spinlock.
24 *
25 * Note that these functions are only called when there is
26 * contention on the lock, and as such all this is the
27 * "non-critical" part of the whole semaphore business. The
28 * critical part is the inline stuff in <asm/semaphore.h>
29 * where we want to avoid any extra jumps and calls.
30 */
31
32/*
33 * Logic:
34 * - only on a boundary condition do we need to care. When we go
35 * from a negative count to a non-negative, we wake people up.
36 * - when we go from a non-negative count to a negative do we
37 * (a) synchronize with the "sleeper" count and (b) make sure
38 * that we're on the wakeup list before we synchronize so that
39 * we cannot lose wakeup events.
40 */
41
42void __up(struct semaphore *sem)
43{
44 wake_up(&sem->wait);
45}
46
47static DEFINE_SPINLOCK(semaphore_lock);
48
49void __sched __down(struct semaphore * sem)
50{
51 struct task_struct *tsk = current;
52 DECLARE_WAITQUEUE(wait, tsk);
53 tsk->state = TASK_UNINTERRUPTIBLE;
54 add_wait_queue_exclusive(&sem->wait, &wait);
55
56 spin_lock_irq(&semaphore_lock);
57 sem->sleepers++;
58 for (;;) {
59 int sleepers = sem->sleepers;
60
61 /*
62 * Add "everybody else" into it. They aren't
63 * playing, because we own the spinlock.
64 */
65 if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
66 sem->sleepers = 0;
67 break;
68 }
69 sem->sleepers = 1; /* us - see -1 above */
70 spin_unlock_irq(&semaphore_lock);
71
72 schedule();
73 tsk->state = TASK_UNINTERRUPTIBLE;
74 spin_lock_irq(&semaphore_lock);
75 }
76 spin_unlock_irq(&semaphore_lock);
77 remove_wait_queue(&sem->wait, &wait);
78 tsk->state = TASK_RUNNING;
79 wake_up(&sem->wait);
80}
81
82int __sched __down_interruptible(struct semaphore * sem)
83{
84 int retval = 0;
85 struct task_struct *tsk = current;
86 DECLARE_WAITQUEUE(wait, tsk);
87 tsk->state = TASK_INTERRUPTIBLE;
88 add_wait_queue_exclusive(&sem->wait, &wait);
89
90 spin_lock_irq(&semaphore_lock);
91 sem->sleepers ++;
92 for (;;) {
93 int sleepers = sem->sleepers;
94
95 /*
96 * With signals pending, this turns into
97 * the trylock failure case - we won't be
98 * sleeping, and we* can't get the lock as
99 * it has contention. Just correct the count
100 * and exit.
101 */
102 if (signal_pending(current)) {
103 retval = -EINTR;
104 sem->sleepers = 0;
105 atomic24_add(sleepers, &sem->count);
106 break;
107 }
108
109 /*
110 * Add "everybody else" into it. They aren't
111 * playing, because we own the spinlock. The
112 * "-1" is because we're still hoping to get
113 * the lock.
114 */
115 if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
116 sem->sleepers = 0;
117 break;
118 }
119 sem->sleepers = 1; /* us - see -1 above */
120 spin_unlock_irq(&semaphore_lock);
121
122 schedule();
123 tsk->state = TASK_INTERRUPTIBLE;
124 spin_lock_irq(&semaphore_lock);
125 }
126 spin_unlock_irq(&semaphore_lock);
127 tsk->state = TASK_RUNNING;
128 remove_wait_queue(&sem->wait, &wait);
129 wake_up(&sem->wait);
130 return retval;
131}
132
133/*
134 * Trylock failed - make sure we correct for
135 * having decremented the count.
136 */
137int __down_trylock(struct semaphore * sem)
138{
139 int sleepers;
140 unsigned long flags;
141
142 spin_lock_irqsave(&semaphore_lock, flags);
143 sleepers = sem->sleepers + 1;
144 sem->sleepers = 0;
145
146 /*
147 * Add "everybody else" and us into it. They aren't
148 * playing, because we own the spinlock.
149 */
150 if (!atomic24_add_negative(sleepers, &sem->count))
151 wake_up(&sem->wait);
152
153 spin_unlock_irqrestore(&semaphore_lock, flags);
154 return 1;
155}
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
new file mode 100644
index 000000000000..55352ed85e8a
--- /dev/null
+++ b/arch/sparc/kernel/setup.c
@@ -0,0 +1,476 @@
1/* $Id: setup.c,v 1.126 2001/11/13 00:49:27 davem Exp $
2 * linux/arch/sparc/kernel/setup.c
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
6 */
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/stddef.h>
13#include <linux/unistd.h>
14#include <linux/ptrace.h>
15#include <linux/slab.h>
16#include <linux/initrd.h>
17#include <asm/smp.h>
18#include <linux/user.h>
19#include <linux/a.out.h>
20#include <linux/tty.h>
21#include <linux/delay.h>
22#include <linux/config.h>
23#include <linux/fs.h>
24#include <linux/seq_file.h>
25#include <linux/syscalls.h>
26#include <linux/kdev_t.h>
27#include <linux/major.h>
28#include <linux/string.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/console.h>
32#include <linux/spinlock.h>
33#include <linux/root_dev.h>
34
35#include <asm/segment.h>
36#include <asm/system.h>
37#include <asm/io.h>
38#include <asm/processor.h>
39#include <asm/oplib.h>
40#include <asm/page.h>
41#include <asm/pgtable.h>
42#include <asm/traps.h>
43#include <asm/vaddrs.h>
44#include <asm/kdebug.h>
45#include <asm/mbus.h>
46#include <asm/idprom.h>
47#include <asm/machines.h>
48#include <asm/cpudata.h>
49#include <asm/setup.h>
50
51struct screen_info screen_info = {
52 0, 0, /* orig-x, orig-y */
53 0, /* unused */
54 0, /* orig-video-page */
55 0, /* orig-video-mode */
56 128, /* orig-video-cols */
57 0,0,0, /* ega_ax, ega_bx, ega_cx */
58 54, /* orig-video-lines */
59 0, /* orig-video-isVGA */
60 16 /* orig-video-points */
61};
62
63/* Typing sync at the prom prompt calls the function pointed to by
64 * romvec->pv_synchook which I set to the following function.
65 * This should sync all filesystems and return, for now it just
66 * prints out pretty messages and returns.
67 */
68
69extern unsigned long trapbase;
70void (*prom_palette)(int);
71
72/* Pretty sick eh? */
73void prom_sync_me(void)
74{
75 unsigned long prom_tbr, flags;
76
77 /* XXX Badly broken. FIX! - Anton */
78 local_irq_save(flags);
79 __asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr));
80 __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
81 "nop\n\t"
82 "nop\n\t"
83 "nop\n\t" : : "r" (&trapbase));
84
85 if (prom_palette)
86 prom_palette(1);
87 prom_printf("PROM SYNC COMMAND...\n");
88 show_free_areas();
89 if(current->pid != 0) {
90 local_irq_enable();
91 sys_sync();
92 local_irq_disable();
93 }
94 prom_printf("Returning to prom\n");
95
96 __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
97 "nop\n\t"
98 "nop\n\t"
99 "nop\n\t" : : "r" (prom_tbr));
100 local_irq_restore(flags);
101
102 return;
103}
104
105unsigned int boot_flags __initdata = 0;
106#define BOOTME_DEBUG 0x1
107#define BOOTME_SINGLE 0x2
108
109/* Exported for mm/init.c:paging_init. */
110unsigned long cmdline_memory_size __initdata = 0;
111
112static void
113prom_console_write(struct console *con, const char *s, unsigned n)
114{
115 prom_write(s, n);
116}
117
118static struct console prom_debug_console = {
119 .name = "debug",
120 .write = prom_console_write,
121 .flags = CON_PRINTBUFFER,
122 .index = -1,
123};
124
125int obp_system_intr(void)
126{
127 if (boot_flags & BOOTME_DEBUG) {
128 printk("OBP: system interrupted\n");
129 prom_halt();
130 return 1;
131 }
132 return 0;
133}
134
135/*
136 * Process kernel command line switches that are specific to the
137 * SPARC or that require special low-level processing.
138 */
139static void __init process_switch(char c)
140{
141 switch (c) {
142 case 'd':
143 boot_flags |= BOOTME_DEBUG;
144 break;
145 case 's':
146 boot_flags |= BOOTME_SINGLE;
147 break;
148 case 'h':
149 prom_printf("boot_flags_init: Halt!\n");
150 prom_halt();
151 break;
152 case 'p':
153 /* Use PROM debug console. */
154 register_console(&prom_debug_console);
155 break;
156 default:
157 printk("Unknown boot switch (-%c)\n", c);
158 break;
159 }
160}
161
162static void __init process_console(char *commands)
163{
164 serial_console = 0;
165 commands += 8;
166 /* Linux-style serial */
167 if (!strncmp(commands, "ttyS", 4))
168 serial_console = simple_strtoul(commands + 4, NULL, 10) + 1;
169 else if (!strncmp(commands, "tty", 3)) {
170 char c = *(commands + 3);
171 /* Solaris-style serial */
172 if (c == 'a' || c == 'b')
173 serial_console = c - 'a' + 1;
174 /* else Linux-style fbcon, not serial */
175 }
176#if defined(CONFIG_PROM_CONSOLE)
177 if (!strncmp(commands, "prom", 4)) {
178 char *p;
179
180 for (p = commands - 8; *p && *p != ' '; p++)
181 *p = ' ';
182 conswitchp = &prom_con;
183 }
184#endif
185}
186
187static void __init boot_flags_init(char *commands)
188{
189 while (*commands) {
190 /* Move to the start of the next "argument". */
191 while (*commands && *commands == ' ')
192 commands++;
193
194 /* Process any command switches, otherwise skip it. */
195 if (*commands == '\0')
196 break;
197 if (*commands == '-') {
198 commands++;
199 while (*commands && *commands != ' ')
200 process_switch(*commands++);
201 continue;
202 }
203 if (!strncmp(commands, "console=", 8)) {
204 process_console(commands);
205 } else if (!strncmp(commands, "mem=", 4)) {
206 /*
207 * "mem=XXX[kKmM] overrides the PROM-reported
208 * memory size.
209 */
210 cmdline_memory_size = simple_strtoul(commands + 4,
211 &commands, 0);
212 if (*commands == 'K' || *commands == 'k') {
213 cmdline_memory_size <<= 10;
214 commands++;
215 } else if (*commands=='M' || *commands=='m') {
216 cmdline_memory_size <<= 20;
217 commands++;
218 }
219 }
220 while (*commands && *commands != ' ')
221 commands++;
222 }
223}
224
225/* This routine will in the future do all the nasty prom stuff
226 * to probe for the mmu type and its parameters, etc. This will
227 * also be where SMP things happen plus the Sparc specific memory
228 * physical memory probe as on the alpha.
229 */
230
231extern int prom_probe_memory(void);
232extern void sun4c_probe_vac(void);
233extern char cputypval;
234extern unsigned long start, end;
235extern void panic_setup(char *, int *);
236
237extern unsigned short root_flags;
238extern unsigned short root_dev;
239extern unsigned short ram_flags;
240#define RAMDISK_IMAGE_START_MASK 0x07FF
241#define RAMDISK_PROMPT_FLAG 0x8000
242#define RAMDISK_LOAD_FLAG 0x4000
243
244extern int root_mountflags;
245
246char reboot_command[COMMAND_LINE_SIZE];
247enum sparc_cpu sparc_cpu_model;
248
249struct tt_entry *sparc_ttable;
250
251struct pt_regs fake_swapper_regs;
252
253extern void paging_init(void);
254
255void __init setup_arch(char **cmdline_p)
256{
257 int i;
258 unsigned long highest_paddr;
259
260 sparc_ttable = (struct tt_entry *) &start;
261
262 /* Initialize PROM console and command line. */
263 *cmdline_p = prom_getbootargs();
264 strcpy(saved_command_line, *cmdline_p);
265
266 /* Set sparc_cpu_model */
267 sparc_cpu_model = sun_unknown;
268 if(!strcmp(&cputypval,"sun4 ")) { sparc_cpu_model=sun4; }
269 if(!strcmp(&cputypval,"sun4c")) { sparc_cpu_model=sun4c; }
270 if(!strcmp(&cputypval,"sun4m")) { sparc_cpu_model=sun4m; }
271 if(!strcmp(&cputypval,"sun4s")) { sparc_cpu_model=sun4m; } /* CP-1200 with PROM 2.30 -E */
272 if(!strcmp(&cputypval,"sun4d")) { sparc_cpu_model=sun4d; }
273 if(!strcmp(&cputypval,"sun4e")) { sparc_cpu_model=sun4e; }
274 if(!strcmp(&cputypval,"sun4u")) { sparc_cpu_model=sun4u; }
275
276#ifdef CONFIG_SUN4
277 if (sparc_cpu_model != sun4) {
278 prom_printf("This kernel is for Sun4 architecture only.\n");
279 prom_halt();
280 }
281#endif
282 printk("ARCH: ");
283 switch(sparc_cpu_model) {
284 case sun4:
285 printk("SUN4\n");
286 break;
287 case sun4c:
288 printk("SUN4C\n");
289 break;
290 case sun4m:
291 printk("SUN4M\n");
292 break;
293 case sun4d:
294 printk("SUN4D\n");
295 break;
296 case sun4e:
297 printk("SUN4E\n");
298 break;
299 case sun4u:
300 printk("SUN4U\n");
301 break;
302 default:
303 printk("UNKNOWN!\n");
304 break;
305 };
306
307#ifdef CONFIG_DUMMY_CONSOLE
308 conswitchp = &dummy_con;
309#elif defined(CONFIG_PROM_CONSOLE)
310 conswitchp = &prom_con;
311#endif
312 boot_flags_init(*cmdline_p);
313
314 idprom_init();
315 if (ARCH_SUN4C_SUN4)
316 sun4c_probe_vac();
317 load_mmu();
318 (void) prom_probe_memory();
319
320 phys_base = 0xffffffffUL;
321 highest_paddr = 0UL;
322 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
323 unsigned long top;
324
325 if (sp_banks[i].base_addr < phys_base)
326 phys_base = sp_banks[i].base_addr;
327 top = sp_banks[i].base_addr +
328 sp_banks[i].num_bytes;
329 if (highest_paddr < top)
330 highest_paddr = top;
331 }
332 pfn_base = phys_base >> PAGE_SHIFT;
333
334 if (!root_flags)
335 root_mountflags &= ~MS_RDONLY;
336 ROOT_DEV = old_decode_dev(root_dev);
337#ifdef CONFIG_BLK_DEV_INITRD
338 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
339 rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
340 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
341#endif
342
343 prom_setsync(prom_sync_me);
344
345 if((boot_flags&BOOTME_DEBUG) && (linux_dbvec!=0) &&
346 ((*(short *)linux_dbvec) != -1)) {
347 printk("Booted under KADB. Syncing trap table.\n");
348 (*(linux_dbvec->teach_debugger))();
349 }
350
351 init_mm.context = (unsigned long) NO_CONTEXT;
352 init_task.thread.kregs = &fake_swapper_regs;
353
354 paging_init();
355}
356
357static int __init set_preferred_console(void)
358{
359 int idev, odev;
360
361 /* The user has requested a console so this is already set up. */
362 if (serial_console >= 0)
363 return -EBUSY;
364
365 idev = prom_query_input_device();
366 odev = prom_query_output_device();
367 if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
368 serial_console = 0;
369 } else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
370 serial_console = 1;
371 } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
372 serial_console = 2;
373 } else if (idev == PROMDEV_I_UNK && odev == PROMDEV_OTTYA) {
374 prom_printf("MrCoffee ttya\n");
375 serial_console = 1;
376 } else if (idev == PROMDEV_I_UNK && odev == PROMDEV_OSCREEN) {
377 serial_console = 0;
378 prom_printf("MrCoffee keyboard\n");
379 } else {
380 prom_printf("Confusing console (idev %d, odev %d)\n",
381 idev, odev);
382 serial_console = 1;
383 }
384
385 if (serial_console)
386 return add_preferred_console("ttyS", serial_console - 1, NULL);
387
388 return -ENODEV;
389}
390console_initcall(set_preferred_console);
391
392extern char *sparc_cpu_type;
393extern char *sparc_fpu_type;
394
395static int show_cpuinfo(struct seq_file *m, void *__unused)
396{
397 seq_printf(m,
398 "cpu\t\t: %s\n"
399 "fpu\t\t: %s\n"
400 "promlib\t\t: Version %d Revision %d\n"
401 "prom\t\t: %d.%d\n"
402 "type\t\t: %s\n"
403 "ncpus probed\t: %d\n"
404 "ncpus active\t: %d\n"
405#ifndef CONFIG_SMP
406 "CPU0Bogo\t: %lu.%02lu\n"
407 "CPU0ClkTck\t: %ld\n"
408#endif
409 ,
410 sparc_cpu_type ? sparc_cpu_type : "undetermined",
411 sparc_fpu_type ? sparc_fpu_type : "undetermined",
412 romvec->pv_romvers,
413 prom_rev,
414 romvec->pv_printrev >> 16,
415 romvec->pv_printrev & 0xffff,
416 &cputypval,
417 num_possible_cpus(),
418 num_online_cpus()
419#ifndef CONFIG_SMP
420 , cpu_data(0).udelay_val/(500000/HZ),
421 (cpu_data(0).udelay_val/(5000/HZ)) % 100,
422 cpu_data(0).clock_tick
423#endif
424 );
425
426#ifdef CONFIG_SMP
427 smp_bogo(m);
428#endif
429 mmu_info(m);
430#ifdef CONFIG_SMP
431 smp_info(m);
432#endif
433 return 0;
434}
435
436static void *c_start(struct seq_file *m, loff_t *pos)
437{
438 /* The pointer we are returning is arbitrary,
439 * it just has to be non-NULL and not IS_ERR
440 * in the success case.
441 */
442 return *pos == 0 ? &c_start : NULL;
443}
444
445static void *c_next(struct seq_file *m, void *v, loff_t *pos)
446{
447 ++*pos;
448 return c_start(m, pos);
449}
450
451static void c_stop(struct seq_file *m, void *v)
452{
453}
454
455struct seq_operations cpuinfo_op = {
456 .start =c_start,
457 .next = c_next,
458 .stop = c_stop,
459 .show = show_cpuinfo,
460};
461
462extern int stop_a_enabled;
463
464void sun_do_break(void)
465{
466 if (!stop_a_enabled)
467 return;
468
469 printk("\n");
470 flush_user_windows();
471
472 prom_cmdline();
473}
474
475int serial_console = -1;
476int stop_a_enabled = 1;
diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c
new file mode 100644
index 000000000000..011ff35057a5
--- /dev/null
+++ b/arch/sparc/kernel/signal.c
@@ -0,0 +1,1181 @@
1/* $Id: signal.c,v 1.110 2002/02/08 03:57:14 davem Exp $
2 * linux/arch/sparc/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
8 */
9
10#include <linux/config.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/errno.h>
15#include <linux/wait.h>
16#include <linux/ptrace.h>
17#include <linux/unistd.h>
18#include <linux/mm.h>
19#include <linux/tty.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/binfmts.h> /* do_coredum */
23#include <linux/bitops.h>
24
25#include <asm/uaccess.h>
26#include <asm/ptrace.h>
27#include <asm/svr4.h>
28#include <asm/pgalloc.h>
29#include <asm/pgtable.h>
30#include <asm/cacheflush.h> /* flush_sig_insns */
31
32#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
33
34extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
35 void *fpqueue, unsigned long *fpqdepth);
36extern void fpload(unsigned long *fpregs, unsigned long *fsr);
37
38asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
39 unsigned long orig_o0, int restart_syscall);
40
41/* Signal frames: the original one (compatible with SunOS):
42 *
43 * Set up a signal frame... Make the stack look the way SunOS
44 * expects it to look which is basically:
45 *
46 * ---------------------------------- <-- %sp at signal time
47 * Struct sigcontext
48 * Signal address
49 * Ptr to sigcontext area above
50 * Signal code
51 * The signal number itself
52 * One register window
53 * ---------------------------------- <-- New %sp
54 */
55struct signal_sframe {
56 struct reg_window sig_window;
57 int sig_num;
58 int sig_code;
59 struct sigcontext __user *sig_scptr;
60 int sig_address;
61 struct sigcontext sig_context;
62 unsigned int extramask[_NSIG_WORDS - 1];
63};
64
65/*
66 * And the new one, intended to be used for Linux applications only
67 * (we have enough in there to work with clone).
68 * All the interesting bits are in the info field.
69 */
70
71struct new_signal_frame {
72 struct sparc_stackf ss;
73 __siginfo_t info;
74 __siginfo_fpu_t __user *fpu_save;
75 unsigned long insns[2] __attribute__ ((aligned (8)));
76 unsigned int extramask[_NSIG_WORDS - 1];
77 unsigned int extra_size; /* Should be 0 */
78 __siginfo_fpu_t fpu_state;
79};
80
81struct rt_signal_frame {
82 struct sparc_stackf ss;
83 siginfo_t info;
84 struct pt_regs regs;
85 sigset_t mask;
86 __siginfo_fpu_t __user *fpu_save;
87 unsigned int insns[2];
88 stack_t stack;
89 unsigned int extra_size; /* Should be 0 */
90 __siginfo_fpu_t fpu_state;
91};
92
93/* Align macros */
94#define SF_ALIGNEDSZ (((sizeof(struct signal_sframe) + 7) & (~7)))
95#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7)))
96#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
97
98/*
99 * atomically swap in the new signal mask, and wait for a signal.
100 * This is really tricky on the Sparc, watch out...
101 */
102asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
103{
104 sigset_t saveset;
105
106 set &= _BLOCKABLE;
107 spin_lock_irq(&current->sighand->siglock);
108 saveset = current->blocked;
109 siginitset(&current->blocked, set);
110 recalc_sigpending();
111 spin_unlock_irq(&current->sighand->siglock);
112
113 regs->pc = regs->npc;
114 regs->npc += 4;
115
116 /* Condition codes and return value where set here for sigpause,
117 * and so got used by setup_frame, which again causes sigreturn()
118 * to return -EINTR.
119 */
120 while (1) {
121 current->state = TASK_INTERRUPTIBLE;
122 schedule();
123 /*
124 * Return -EINTR and set condition code here,
125 * so the interrupted system call actually returns
126 * these.
127 */
128 regs->psr |= PSR_C;
129 regs->u_regs[UREG_I0] = EINTR;
130 if (do_signal(&saveset, regs, 0, 0))
131 return;
132 }
133}
134
135asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
136{
137 _sigpause_common(set, regs);
138}
139
140asmlinkage void do_sigsuspend (struct pt_regs *regs)
141{
142 _sigpause_common(regs->u_regs[UREG_I0], regs);
143}
144
145asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
146 struct pt_regs *regs)
147{
148 sigset_t oldset, set;
149
150 /* XXX: Don't preclude handling different sized sigset_t's. */
151 if (sigsetsize != sizeof(sigset_t)) {
152 regs->psr |= PSR_C;
153 regs->u_regs[UREG_I0] = EINVAL;
154 return;
155 }
156
157 if (copy_from_user(&set, uset, sizeof(set))) {
158 regs->psr |= PSR_C;
159 regs->u_regs[UREG_I0] = EFAULT;
160 return;
161 }
162
163 sigdelsetmask(&set, ~_BLOCKABLE);
164 spin_lock_irq(&current->sighand->siglock);
165 oldset = current->blocked;
166 current->blocked = set;
167 recalc_sigpending();
168 spin_unlock_irq(&current->sighand->siglock);
169
170 regs->pc = regs->npc;
171 regs->npc += 4;
172
173 /* Condition codes and return value where set here for sigpause,
174 * and so got used by setup_frame, which again causes sigreturn()
175 * to return -EINTR.
176 */
177 while (1) {
178 current->state = TASK_INTERRUPTIBLE;
179 schedule();
180 /*
181 * Return -EINTR and set condition code here,
182 * so the interrupted system call actually returns
183 * these.
184 */
185 regs->psr |= PSR_C;
186 regs->u_regs[UREG_I0] = EINTR;
187 if (do_signal(&oldset, regs, 0, 0))
188 return;
189 }
190}
191
192static inline int
193restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
194{
195 int err;
196#ifdef CONFIG_SMP
197 if (test_tsk_thread_flag(current, TIF_USEDFPU))
198 regs->psr &= ~PSR_EF;
199#else
200 if (current == last_task_used_math) {
201 last_task_used_math = NULL;
202 regs->psr &= ~PSR_EF;
203 }
204#endif
205 set_used_math();
206 clear_tsk_thread_flag(current, TIF_USEDFPU);
207
208 if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu)))
209 return -EFAULT;
210
211 err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0],
212 (sizeof(unsigned long) * 32));
213 err |= __get_user(current->thread.fsr, &fpu->si_fsr);
214 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
215 if (current->thread.fpqdepth != 0)
216 err |= __copy_from_user(&current->thread.fpqueue[0],
217 &fpu->si_fpqueue[0],
218 ((sizeof(unsigned long) +
219 (sizeof(unsigned long *)))*16));
220 return err;
221}
222
223static inline void do_new_sigreturn (struct pt_regs *regs)
224{
225 struct new_signal_frame __user *sf;
226 unsigned long up_psr, pc, npc;
227 sigset_t set;
228 __siginfo_fpu_t __user *fpu_save;
229 int err;
230
231 sf = (struct new_signal_frame __user *) regs->u_regs[UREG_FP];
232
233 /* 1. Make sure we are not getting garbage from the user */
234 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
235 goto segv_and_exit;
236
237 if (((unsigned long) sf) & 3)
238 goto segv_and_exit;
239
240 err = __get_user(pc, &sf->info.si_regs.pc);
241 err |= __get_user(npc, &sf->info.si_regs.npc);
242
243 if ((pc | npc) & 3)
244 goto segv_and_exit;
245
246 /* 2. Restore the state */
247 up_psr = regs->psr;
248 err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs));
249
250 /* User can only change condition codes and FPU enabling in %psr. */
251 regs->psr = (up_psr & ~(PSR_ICC | PSR_EF))
252 | (regs->psr & (PSR_ICC | PSR_EF));
253
254 err |= __get_user(fpu_save, &sf->fpu_save);
255
256 if (fpu_save)
257 err |= restore_fpu_state(regs, fpu_save);
258
259 /* This is pretty much atomic, no amount locking would prevent
260 * the races which exist anyways.
261 */
262 err |= __get_user(set.sig[0], &sf->info.si_mask);
263 err |= __copy_from_user(&set.sig[1], &sf->extramask,
264 (_NSIG_WORDS-1) * sizeof(unsigned int));
265
266 if (err)
267 goto segv_and_exit;
268
269 sigdelsetmask(&set, ~_BLOCKABLE);
270 spin_lock_irq(&current->sighand->siglock);
271 current->blocked = set;
272 recalc_sigpending();
273 spin_unlock_irq(&current->sighand->siglock);
274 return;
275
276segv_and_exit:
277 force_sig(SIGSEGV, current);
278}
279
280asmlinkage void do_sigreturn(struct pt_regs *regs)
281{
282 struct sigcontext __user *scptr;
283 unsigned long pc, npc, psr;
284 sigset_t set;
285 int err;
286
287 /* Always make any pending restarted system calls return -EINTR */
288 current_thread_info()->restart_block.fn = do_no_restart_syscall;
289
290 synchronize_user_stack();
291
292 if (current->thread.new_signal) {
293 do_new_sigreturn(regs);
294 return;
295 }
296
297 scptr = (struct sigcontext __user *) regs->u_regs[UREG_I0];
298
299 /* Check sanity of the user arg. */
300 if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext)) ||
301 (((unsigned long) scptr) & 3))
302 goto segv_and_exit;
303
304 err = __get_user(pc, &scptr->sigc_pc);
305 err |= __get_user(npc, &scptr->sigc_npc);
306
307 if ((pc | npc) & 3)
308 goto segv_and_exit;
309
310 /* This is pretty much atomic, no amount locking would prevent
311 * the races which exist anyways.
312 */
313 err |= __get_user(set.sig[0], &scptr->sigc_mask);
314 /* Note that scptr + 1 points to extramask */
315 err |= __copy_from_user(&set.sig[1], scptr + 1,
316 (_NSIG_WORDS - 1) * sizeof(unsigned int));
317
318 if (err)
319 goto segv_and_exit;
320
321 sigdelsetmask(&set, ~_BLOCKABLE);
322 spin_lock_irq(&current->sighand->siglock);
323 current->blocked = set;
324 recalc_sigpending();
325 spin_unlock_irq(&current->sighand->siglock);
326
327 regs->pc = pc;
328 regs->npc = npc;
329
330 err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
331 err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
332 err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
333
334 /* User can only change condition codes in %psr. */
335 err |= __get_user(psr, &scptr->sigc_psr);
336 if (err)
337 goto segv_and_exit;
338
339 regs->psr &= ~(PSR_ICC);
340 regs->psr |= (psr & PSR_ICC);
341 return;
342
343segv_and_exit:
344 force_sig(SIGSEGV, current);
345}
346
347asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
348{
349 struct rt_signal_frame __user *sf;
350 unsigned int psr, pc, npc;
351 __siginfo_fpu_t __user *fpu_save;
352 mm_segment_t old_fs;
353 sigset_t set;
354 stack_t st;
355 int err;
356
357 synchronize_user_stack();
358 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
359 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
360 (((unsigned long) sf) & 0x03))
361 goto segv;
362
363 err = __get_user(pc, &sf->regs.pc);
364 err |= __get_user(npc, &sf->regs.npc);
365 err |= ((pc | npc) & 0x03);
366
367 err |= __get_user(regs->y, &sf->regs.y);
368 err |= __get_user(psr, &sf->regs.psr);
369
370 err |= __copy_from_user(&regs->u_regs[UREG_G1],
371 &sf->regs.u_regs[UREG_G1], 15 * sizeof(u32));
372
373 regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC);
374
375 err |= __get_user(fpu_save, &sf->fpu_save);
376
377 if (fpu_save)
378 err |= restore_fpu_state(regs, fpu_save);
379 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
380
381 err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
382
383 if (err)
384 goto segv;
385
386 regs->pc = pc;
387 regs->npc = npc;
388
389 /* It is more difficult to avoid calling this function than to
390 * call it and ignore errors.
391 */
392 old_fs = get_fs();
393 set_fs(KERNEL_DS);
394 do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
395 set_fs(old_fs);
396
397 sigdelsetmask(&set, ~_BLOCKABLE);
398 spin_lock_irq(&current->sighand->siglock);
399 current->blocked = set;
400 recalc_sigpending();
401 spin_unlock_irq(&current->sighand->siglock);
402 return;
403segv:
404 force_sig(SIGSEGV, current);
405}
406
407/* Checks if the fp is valid */
408static inline int invalid_frame_pointer(void __user *fp, int fplen)
409{
410 if ((((unsigned long) fp) & 7) ||
411 !__access_ok((unsigned long)fp, fplen) ||
412 ((sparc_cpu_model == sun4 || sparc_cpu_model == sun4c) &&
413 ((unsigned long) fp < 0xe0000000 && (unsigned long) fp >= 0x20000000)))
414 return 1;
415
416 return 0;
417}
418
419static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
420{
421 unsigned long sp;
422
423 sp = regs->u_regs[UREG_FP];
424
425 /* This is the X/Open sanctioned signal stack switching. */
426 if (sa->sa_flags & SA_ONSTACK) {
427 if (!on_sig_stack(sp) && !((current->sas_ss_sp + current->sas_ss_size) & 7))
428 sp = current->sas_ss_sp + current->sas_ss_size;
429 }
430 return (void __user *)(sp - framesize);
431}
432
433static inline void
434setup_frame(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info)
435{
436 struct signal_sframe __user *sframep;
437 struct sigcontext __user *sc;
438 int window = 0, err;
439 unsigned long pc = regs->pc;
440 unsigned long npc = regs->npc;
441 struct thread_info *tp = current_thread_info();
442 void __user *sig_address;
443 int sig_code;
444
445 synchronize_user_stack();
446 sframep = (struct signal_sframe __user *)
447 get_sigframe(sa, regs, SF_ALIGNEDSZ);
448 if (invalid_frame_pointer(sframep, sizeof(*sframep))){
449 /* Don't change signal code and address, so that
450 * post mortem debuggers can have a look.
451 */
452 goto sigill_and_return;
453 }
454
455 sc = &sframep->sig_context;
456
457 /* We've already made sure frame pointer isn't in kernel space... */
458 err = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK),
459 &sc->sigc_onstack);
460 err |= __put_user(oldset->sig[0], &sc->sigc_mask);
461 err |= __copy_to_user(sframep->extramask, &oldset->sig[1],
462 (_NSIG_WORDS - 1) * sizeof(unsigned int));
463 err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
464 err |= __put_user(pc, &sc->sigc_pc);
465 err |= __put_user(npc, &sc->sigc_npc);
466 err |= __put_user(regs->psr, &sc->sigc_psr);
467 err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
468 err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
469 err |= __put_user(tp->w_saved, &sc->sigc_oswins);
470 if (tp->w_saved)
471 for (window = 0; window < tp->w_saved; window++) {
472 put_user((char *)tp->rwbuf_stkptrs[window],
473 &sc->sigc_spbuf[window]);
474 err |= __copy_to_user(&sc->sigc_wbuf[window],
475 &tp->reg_window[window],
476 sizeof(struct reg_window));
477 }
478 else
479 err |= __copy_to_user(sframep, (char *) regs->u_regs[UREG_FP],
480 sizeof(struct reg_window));
481
482 tp->w_saved = 0; /* So process is allowed to execute. */
483
484 err |= __put_user(signr, &sframep->sig_num);
485 sig_address = NULL;
486 sig_code = 0;
487 if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) {
488 sig_address = info->si_addr;
489 switch (signr) {
490 case SIGSEGV:
491 switch (info->si_code) {
492 case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break;
493 default: sig_code = SUBSIG_PROTECTION; break;
494 }
495 break;
496 case SIGILL:
497 switch (info->si_code) {
498 case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break;
499 case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break;
500 case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break;
501 default: sig_code = SUBSIG_STACK; break;
502 }
503 break;
504 case SIGFPE:
505 switch (info->si_code) {
506 case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break;
507 case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break;
508 case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break;
509 case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break;
510 case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break;
511 case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break;
512 case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break;
513 default: sig_code = SUBSIG_FPERROR; break;
514 }
515 break;
516 case SIGBUS:
517 switch (info->si_code) {
518 case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break;
519 case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break;
520 default: sig_code = SUBSIG_BUSTIMEOUT; break;
521 }
522 break;
523 case SIGEMT:
524 switch (info->si_code) {
525 case EMT_TAGOVF: sig_code = SUBSIG_TAG; break;
526 }
527 break;
528 case SIGSYS:
529 if (info->si_code == (__SI_FAULT|0x100)) {
530 /* See sys_sunos.c */
531 sig_code = info->si_trapno;
532 break;
533 }
534 default:
535 sig_address = NULL;
536 }
537 }
538 err |= __put_user((unsigned long)sig_address, &sframep->sig_address);
539 err |= __put_user(sig_code, &sframep->sig_code);
540 err |= __put_user(sc, &sframep->sig_scptr);
541 if (err)
542 goto sigsegv;
543
544 regs->u_regs[UREG_FP] = (unsigned long) sframep;
545 regs->pc = (unsigned long) sa->sa_handler;
546 regs->npc = (regs->pc + 4);
547 return;
548
549sigill_and_return:
550 do_exit(SIGILL);
551sigsegv:
552 force_sigsegv(signr, current);
553}
554
555
556static inline int
557save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
558{
559 int err = 0;
560#ifdef CONFIG_SMP
561 if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
562 put_psr(get_psr() | PSR_EF);
563 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
564 &current->thread.fpqueue[0], &current->thread.fpqdepth);
565 regs->psr &= ~(PSR_EF);
566 clear_tsk_thread_flag(current, TIF_USEDFPU);
567 }
568#else
569 if (current == last_task_used_math) {
570 put_psr(get_psr() | PSR_EF);
571 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
572 &current->thread.fpqueue[0], &current->thread.fpqdepth);
573 last_task_used_math = NULL;
574 regs->psr &= ~(PSR_EF);
575 }
576#endif
577 err |= __copy_to_user(&fpu->si_float_regs[0],
578 &current->thread.float_regs[0],
579 (sizeof(unsigned long) * 32));
580 err |= __put_user(current->thread.fsr, &fpu->si_fsr);
581 err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
582 if (current->thread.fpqdepth != 0)
583 err |= __copy_to_user(&fpu->si_fpqueue[0],
584 &current->thread.fpqueue[0],
585 ((sizeof(unsigned long) +
586 (sizeof(unsigned long *)))*16));
587 clear_used_math();
588 return err;
589}
590
591static inline void
592new_setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
593 int signo, sigset_t *oldset)
594{
595 struct new_signal_frame __user *sf;
596 int sigframe_size, err;
597
598 /* 1. Make sure everything is clean */
599 synchronize_user_stack();
600
601 sigframe_size = NF_ALIGNEDSZ;
602 if (!used_math())
603 sigframe_size -= sizeof(__siginfo_fpu_t);
604
605 sf = (struct new_signal_frame __user *)
606 get_sigframe(&ka->sa, regs, sigframe_size);
607
608 if (invalid_frame_pointer(sf, sigframe_size))
609 goto sigill_and_return;
610
611 if (current_thread_info()->w_saved != 0)
612 goto sigill_and_return;
613
614 /* 2. Save the current process state */
615 err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs));
616
617 err |= __put_user(0, &sf->extra_size);
618
619 if (used_math()) {
620 err |= save_fpu_state(regs, &sf->fpu_state);
621 err |= __put_user(&sf->fpu_state, &sf->fpu_save);
622 } else {
623 err |= __put_user(0, &sf->fpu_save);
624 }
625
626 err |= __put_user(oldset->sig[0], &sf->info.si_mask);
627 err |= __copy_to_user(sf->extramask, &oldset->sig[1],
628 (_NSIG_WORDS - 1) * sizeof(unsigned int));
629 err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
630 sizeof(struct reg_window));
631 if (err)
632 goto sigsegv;
633
634 /* 3. signal handler back-trampoline and parameters */
635 regs->u_regs[UREG_FP] = (unsigned long) sf;
636 regs->u_regs[UREG_I0] = signo;
637 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
638 regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
639
640 /* 4. signal handler */
641 regs->pc = (unsigned long) ka->sa.sa_handler;
642 regs->npc = (regs->pc + 4);
643
644 /* 5. return to kernel instructions */
645 if (ka->ka_restorer)
646 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
647 else {
648 regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
649
650 /* mov __NR_sigreturn, %g1 */
651 err |= __put_user(0x821020d8, &sf->insns[0]);
652
653 /* t 0x10 */
654 err |= __put_user(0x91d02010, &sf->insns[1]);
655 if (err)
656 goto sigsegv;
657
658 /* Flush instruction space. */
659 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
660 }
661 return;
662
663sigill_and_return:
664 do_exit(SIGILL);
665sigsegv:
666 force_sigsegv(signo, current);
667}
668
669static inline void
670new_setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
671 int signo, sigset_t *oldset, siginfo_t *info)
672{
673 struct rt_signal_frame __user *sf;
674 int sigframe_size;
675 unsigned int psr;
676 int err;
677
678 synchronize_user_stack();
679 sigframe_size = RT_ALIGNEDSZ;
680 if (!used_math())
681 sigframe_size -= sizeof(__siginfo_fpu_t);
682 sf = (struct rt_signal_frame __user *)
683 get_sigframe(&ka->sa, regs, sigframe_size);
684 if (invalid_frame_pointer(sf, sigframe_size))
685 goto sigill;
686 if (current_thread_info()->w_saved != 0)
687 goto sigill;
688
689 err = __put_user(regs->pc, &sf->regs.pc);
690 err |= __put_user(regs->npc, &sf->regs.npc);
691 err |= __put_user(regs->y, &sf->regs.y);
692 psr = regs->psr;
693 if (used_math())
694 psr |= PSR_EF;
695 err |= __put_user(psr, &sf->regs.psr);
696 err |= __copy_to_user(&sf->regs.u_regs, regs->u_regs, sizeof(regs->u_regs));
697 err |= __put_user(0, &sf->extra_size);
698
699 if (psr & PSR_EF) {
700 err |= save_fpu_state(regs, &sf->fpu_state);
701 err |= __put_user(&sf->fpu_state, &sf->fpu_save);
702 } else {
703 err |= __put_user(0, &sf->fpu_save);
704 }
705 err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t));
706
707 /* Setup sigaltstack */
708 err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
709 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
710 err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
711
712 err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
713 sizeof(struct reg_window));
714
715 err |= copy_siginfo_to_user(&sf->info, info);
716
717 if (err)
718 goto sigsegv;
719
720 regs->u_regs[UREG_FP] = (unsigned long) sf;
721 regs->u_regs[UREG_I0] = signo;
722 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
723 regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
724
725 regs->pc = (unsigned long) ka->sa.sa_handler;
726 regs->npc = (regs->pc + 4);
727
728 if (ka->ka_restorer)
729 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
730 else {
731 regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
732
733 /* mov __NR_sigreturn, %g1 */
734 err |= __put_user(0x821020d8, &sf->insns[0]);
735
736 /* t 0x10 */
737 err |= __put_user(0x91d02010, &sf->insns[1]);
738 if (err)
739 goto sigsegv;
740
741 /* Flush instruction space. */
742 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
743 }
744 return;
745
746sigill:
747 do_exit(SIGILL);
748sigsegv:
749 force_sigsegv(signo, current);
750}
751
752/* Setup a Solaris stack frame */
753static inline void
754setup_svr4_frame(struct sigaction *sa, unsigned long pc, unsigned long npc,
755 struct pt_regs *regs, int signr, sigset_t *oldset)
756{
757 svr4_signal_frame_t __user *sfp;
758 svr4_gregset_t __user *gr;
759 svr4_siginfo_t __user *si;
760 svr4_mcontext_t __user *mc;
761 svr4_gwindows_t __user *gw;
762 svr4_ucontext_t __user *uc;
763 svr4_sigset_t setv;
764 struct thread_info *tp = current_thread_info();
765 int window = 0, err;
766
767 synchronize_user_stack();
768 sfp = (svr4_signal_frame_t __user *)
769 get_sigframe(sa, regs, SVR4_SF_ALIGNED + sizeof(struct reg_window));
770
771 if (invalid_frame_pointer(sfp, sizeof(*sfp)))
772 goto sigill_and_return;
773
774 /* Start with a clean frame pointer and fill it */
775 err = __clear_user(sfp, sizeof(*sfp));
776
777 /* Setup convenience variables */
778 si = &sfp->si;
779 uc = &sfp->uc;
780 gw = &sfp->gw;
781 mc = &uc->mcontext;
782 gr = &mc->greg;
783
784 /* FIXME: where am I supposed to put this?
785 * sc->sigc_onstack = old_status;
786 * anyways, it does not look like it is used for anything at all.
787 */
788 setv.sigbits[0] = oldset->sig[0];
789 setv.sigbits[1] = oldset->sig[1];
790 if (_NSIG_WORDS >= 4) {
791 setv.sigbits[2] = oldset->sig[2];
792 setv.sigbits[3] = oldset->sig[3];
793 err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
794 } else
795 err |= __copy_to_user(&uc->sigmask, &setv,
796 2 * sizeof(unsigned int));
797
798 /* Store registers */
799 err |= __put_user(regs->pc, &((*gr)[SVR4_PC]));
800 err |= __put_user(regs->npc, &((*gr)[SVR4_NPC]));
801 err |= __put_user(regs->psr, &((*gr)[SVR4_PSR]));
802 err |= __put_user(regs->y, &((*gr)[SVR4_Y]));
803
804 /* Copy g[1..7] and o[0..7] registers */
805 err |= __copy_to_user(&(*gr)[SVR4_G1], &regs->u_regs[UREG_G1],
806 sizeof(long) * 7);
807 err |= __copy_to_user(&(*gr)[SVR4_O0], &regs->u_regs[UREG_I0],
808 sizeof(long) * 8);
809
810 /* Setup sigaltstack */
811 err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
812 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
813 err |= __put_user(current->sas_ss_size, &uc->stack.size);
814
815 /* Save the currently window file: */
816
817 /* 1. Link sfp->uc->gwins to our windows */
818 err |= __put_user(gw, &mc->gwin);
819
820 /* 2. Number of windows to restore at setcontext(): */
821 err |= __put_user(tp->w_saved, &gw->count);
822
823 /* 3. Save each valid window
824 * Currently, it makes a copy of the windows from the kernel copy.
825 * David's code for SunOS, makes the copy but keeps the pointer to
826 * the kernel. My version makes the pointer point to a userland
827 * copy of those. Mhm, I wonder if I shouldn't just ignore those
828 * on setcontext and use those that are on the kernel, the signal
829 * handler should not be modyfing those, mhm.
830 *
831 * These windows are just used in case synchronize_user_stack failed
832 * to flush the user windows.
833 */
834 for (window = 0; window < tp->w_saved; window++) {
835 err |= __put_user((int __user *) &(gw->win[window]), &gw->winptr[window]);
836 err |= __copy_to_user(&gw->win[window],
837 &tp->reg_window[window],
838 sizeof(svr4_rwindow_t));
839 err |= __put_user(0, gw->winptr[window]);
840 }
841
842 /* 4. We just pay attention to the gw->count field on setcontext */
843 tp->w_saved = 0; /* So process is allowed to execute. */
844
845 /* Setup the signal information. Solaris expects a bunch of
846 * information to be passed to the signal handler, we don't provide
847 * that much currently, should use siginfo.
848 */
849 err |= __put_user(signr, &si->siginfo.signo);
850 err |= __put_user(SVR4_SINOINFO, &si->siginfo.code);
851 if (err)
852 goto sigsegv;
853
854 regs->u_regs[UREG_FP] = (unsigned long) sfp;
855 regs->pc = (unsigned long) sa->sa_handler;
856 regs->npc = (regs->pc + 4);
857
858 /* Arguments passed to signal handler */
859 if (regs->u_regs[14]){
860 struct reg_window __user *rw = (struct reg_window __user *)
861 regs->u_regs[14];
862
863 err |= __put_user(signr, &rw->ins[0]);
864 err |= __put_user(si, &rw->ins[1]);
865 err |= __put_user(uc, &rw->ins[2]);
866 err |= __put_user(sfp, &rw->ins[6]); /* frame pointer */
867 if (err)
868 goto sigsegv;
869
870 regs->u_regs[UREG_I0] = signr;
871 regs->u_regs[UREG_I1] = (unsigned long) si;
872 regs->u_regs[UREG_I2] = (unsigned long) uc;
873 }
874 return;
875
876sigill_and_return:
877 do_exit(SIGILL);
878sigsegv:
879 force_sigsegv(signr, current);
880}
881
882asmlinkage int svr4_getcontext(svr4_ucontext_t __user *uc, struct pt_regs *regs)
883{
884 svr4_gregset_t __user *gr;
885 svr4_mcontext_t __user *mc;
886 svr4_sigset_t setv;
887 int err = 0;
888
889 synchronize_user_stack();
890
891 if (current_thread_info()->w_saved)
892 return -EFAULT;
893
894 err = clear_user(uc, sizeof(*uc));
895 if (err)
896 return -EFAULT;
897
898 /* Setup convenience variables */
899 mc = &uc->mcontext;
900 gr = &mc->greg;
901
902 setv.sigbits[0] = current->blocked.sig[0];
903 setv.sigbits[1] = current->blocked.sig[1];
904 if (_NSIG_WORDS >= 4) {
905 setv.sigbits[2] = current->blocked.sig[2];
906 setv.sigbits[3] = current->blocked.sig[3];
907 err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
908 } else
909 err |= __copy_to_user(&uc->sigmask, &setv,
910 2 * sizeof(unsigned int));
911
912 /* Store registers */
913 err |= __put_user(regs->pc, &uc->mcontext.greg[SVR4_PC]);
914 err |= __put_user(regs->npc, &uc->mcontext.greg[SVR4_NPC]);
915 err |= __put_user(regs->psr, &uc->mcontext.greg[SVR4_PSR]);
916 err |= __put_user(regs->y, &uc->mcontext.greg[SVR4_Y]);
917
918 /* Copy g[1..7] and o[0..7] registers */
919 err |= __copy_to_user(&(*gr)[SVR4_G1], &regs->u_regs[UREG_G1],
920 sizeof(uint) * 7);
921 err |= __copy_to_user(&(*gr)[SVR4_O0], &regs->u_regs[UREG_I0],
922 sizeof(uint) * 8);
923
924 /* Setup sigaltstack */
925 err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
926 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
927 err |= __put_user(current->sas_ss_size, &uc->stack.size);
928
929 /* The register file is not saved
930 * we have already stuffed all of it with sync_user_stack
931 */
932 return (err ? -EFAULT : 0);
933}
934
935/* Set the context for a svr4 application, this is Solaris way to sigreturn */
936asmlinkage int svr4_setcontext(svr4_ucontext_t __user *c, struct pt_regs *regs)
937{
938 svr4_gregset_t __user *gr;
939 unsigned long pc, npc, psr;
940 mm_segment_t old_fs;
941 sigset_t set;
942 svr4_sigset_t setv;
943 int err;
944 stack_t st;
945
946 /* Fixme: restore windows, or is this already taken care of in
947 * svr4_setup_frame when sync_user_windows is done?
948 */
949 flush_user_windows();
950
951 if (current_thread_info()->w_saved)
952 goto sigsegv_and_return;
953
954 if (((unsigned long) c) & 3)
955 goto sigsegv_and_return;
956
957 if (!__access_ok((unsigned long)c, sizeof(*c)))
958 goto sigsegv_and_return;
959
960 /* Check for valid PC and nPC */
961 gr = &c->mcontext.greg;
962 err = __get_user(pc, &((*gr)[SVR4_PC]));
963 err |= __get_user(npc, &((*gr)[SVR4_NPC]));
964
965 if ((pc | npc) & 3)
966 goto sigsegv_and_return;
967
968 /* Retrieve information from passed ucontext */
969 /* note that nPC is ored a 1, this is used to inform entry.S */
970 /* that we don't want it to mess with our PC and nPC */
971
972 /* This is pretty much atomic, no amount locking would prevent
973 * the races which exist anyways.
974 */
975 err |= __copy_from_user(&setv, &c->sigmask, sizeof(svr4_sigset_t));
976
977 err |= __get_user(st.ss_sp, &c->stack.sp);
978 err |= __get_user(st.ss_flags, &c->stack.flags);
979 err |= __get_user(st.ss_size, &c->stack.size);
980
981 if (err)
982 goto sigsegv_and_return;
983
984 /* It is more difficult to avoid calling this function than to
985 call it and ignore errors. */
986 old_fs = get_fs();
987 set_fs(KERNEL_DS);
988 do_sigaltstack((const stack_t __user *) &st, NULL,
989 regs->u_regs[UREG_I6]);
990 set_fs(old_fs);
991
992 set.sig[0] = setv.sigbits[0];
993 set.sig[1] = setv.sigbits[1];
994 if (_NSIG_WORDS >= 4) {
995 set.sig[2] = setv.sigbits[2];
996 set.sig[3] = setv.sigbits[3];
997 }
998 sigdelsetmask(&set, ~_BLOCKABLE);
999 spin_lock_irq(&current->sighand->siglock);
1000 current->blocked = set;
1001 recalc_sigpending();
1002 spin_unlock_irq(&current->sighand->siglock);
1003 regs->pc = pc;
1004 regs->npc = npc | 1;
1005 err |= __get_user(regs->y, &((*gr)[SVR4_Y]));
1006 err |= __get_user(psr, &((*gr)[SVR4_PSR]));
1007 regs->psr &= ~(PSR_ICC);
1008 regs->psr |= (psr & PSR_ICC);
1009
1010 /* Restore g[1..7] and o[0..7] registers */
1011 err |= __copy_from_user(&regs->u_regs[UREG_G1], &(*gr)[SVR4_G1],
1012 sizeof(long) * 7);
1013 err |= __copy_from_user(&regs->u_regs[UREG_I0], &(*gr)[SVR4_O0],
1014 sizeof(long) * 8);
1015 return (err ? -EFAULT : 0);
1016
1017sigsegv_and_return:
1018 force_sig(SIGSEGV, current);
1019 return -EFAULT;
1020}
1021
1022static inline void
1023handle_signal(unsigned long signr, struct k_sigaction *ka,
1024 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs,
1025 int svr4_signal)
1026{
1027 if (svr4_signal)
1028 setup_svr4_frame(&ka->sa, regs->pc, regs->npc, regs, signr, oldset);
1029 else {
1030 if (ka->sa.sa_flags & SA_SIGINFO)
1031 new_setup_rt_frame(ka, regs, signr, oldset, info);
1032 else if (current->thread.new_signal)
1033 new_setup_frame(ka, regs, signr, oldset);
1034 else
1035 setup_frame(&ka->sa, regs, signr, oldset, info);
1036 }
1037 if (!(ka->sa.sa_flags & SA_NOMASK)) {
1038 spin_lock_irq(&current->sighand->siglock);
1039 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
1040 sigaddset(&current->blocked, signr);
1041 recalc_sigpending();
1042 spin_unlock_irq(&current->sighand->siglock);
1043 }
1044}
1045
1046static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
1047 struct sigaction *sa)
1048{
1049 switch(regs->u_regs[UREG_I0]) {
1050 case ERESTART_RESTARTBLOCK:
1051 case ERESTARTNOHAND:
1052 no_system_call_restart:
1053 regs->u_regs[UREG_I0] = EINTR;
1054 regs->psr |= PSR_C;
1055 break;
1056 case ERESTARTSYS:
1057 if (!(sa->sa_flags & SA_RESTART))
1058 goto no_system_call_restart;
1059 /* fallthrough */
1060 case ERESTARTNOINTR:
1061 regs->u_regs[UREG_I0] = orig_i0;
1062 regs->pc -= 4;
1063 regs->npc -= 4;
1064 }
1065}
1066
1067/* Note that 'init' is a special process: it doesn't get signals it doesn't
1068 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1069 * mistake.
1070 */
1071asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
1072 unsigned long orig_i0, int restart_syscall)
1073{
1074 siginfo_t info;
1075 struct sparc_deliver_cookie cookie;
1076 struct k_sigaction ka;
1077 int signr;
1078
1079 /*
1080 * XXX Disable svr4 signal handling until solaris emulation works.
1081 * It is buggy - Anton
1082 */
1083#define SVR4_SIGNAL_BROKEN 1
1084#ifdef SVR4_SIGNAL_BROKEN
1085 int svr4_signal = 0;
1086#else
1087 int svr4_signal = current->personality == PER_SVR4;
1088#endif
1089
1090 cookie.restart_syscall = restart_syscall;
1091 cookie.orig_i0 = orig_i0;
1092
1093 if (!oldset)
1094 oldset = &current->blocked;
1095
1096 signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
1097 if (signr > 0) {
1098 if (cookie.restart_syscall)
1099 syscall_restart(cookie.orig_i0, regs, &ka.sa);
1100 handle_signal(signr, &ka, &info, oldset,
1101 regs, svr4_signal);
1102 return 1;
1103 }
1104 if (cookie.restart_syscall &&
1105 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
1106 regs->u_regs[UREG_I0] == ERESTARTSYS ||
1107 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
1108 /* replay the system call when we are done */
1109 regs->u_regs[UREG_I0] = cookie.orig_i0;
1110 regs->pc -= 4;
1111 regs->npc -= 4;
1112 }
1113 if (cookie.restart_syscall &&
1114 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
1115 regs->u_regs[UREG_G1] = __NR_restart_syscall;
1116 regs->pc -= 4;
1117 regs->npc -= 4;
1118 }
1119 return 0;
1120}
1121
1122asmlinkage int
1123do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr,
1124 unsigned long sp)
1125{
1126 int ret = -EFAULT;
1127
1128 /* First see if old state is wanted. */
1129 if (ossptr) {
1130 if (put_user(current->sas_ss_sp + current->sas_ss_size,
1131 &ossptr->the_stack) ||
1132 __put_user(on_sig_stack(sp), &ossptr->cur_status))
1133 goto out;
1134 }
1135
1136 /* Now see if we want to update the new state. */
1137 if (ssptr) {
1138 char *ss_sp;
1139
1140 if (get_user(ss_sp, &ssptr->the_stack))
1141 goto out;
1142 /* If the current stack was set with sigaltstack, don't
1143 swap stacks while we are on it. */
1144 ret = -EPERM;
1145 if (current->sas_ss_sp && on_sig_stack(sp))
1146 goto out;
1147
1148 /* Since we don't know the extent of the stack, and we don't
1149 track onstack-ness, but rather calculate it, we must
1150 presume a size. Ho hum this interface is lossy. */
1151 current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
1152 current->sas_ss_size = SIGSTKSZ;
1153 }
1154 ret = 0;
1155out:
1156 return ret;
1157}
1158
1159void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
1160{
1161 struct sparc_deliver_cookie *cp = cookie;
1162
1163 if (cp->restart_syscall &&
1164 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
1165 regs->u_regs[UREG_I0] == ERESTARTSYS ||
1166 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
1167 /* replay the system call when we are done */
1168 regs->u_regs[UREG_I0] = cp->orig_i0;
1169 regs->pc -= 4;
1170 regs->npc -= 4;
1171 cp->restart_syscall = 0;
1172 }
1173
1174 if (cp->restart_syscall &&
1175 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
1176 regs->u_regs[UREG_G1] = __NR_restart_syscall;
1177 regs->pc -= 4;
1178 regs->npc -= 4;
1179 cp->restart_syscall = 0;
1180 }
1181}
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
new file mode 100644
index 000000000000..c6e721d8f477
--- /dev/null
+++ b/arch/sparc/kernel/smp.c
@@ -0,0 +1,295 @@
1/* smp.c: Sparc SMP support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
6 */
7
8#include <asm/head.h>
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/threads.h>
13#include <linux/smp.h>
14#include <linux/smp_lock.h>
15#include <linux/interrupt.h>
16#include <linux/kernel_stat.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/seq_file.h>
22#include <linux/cache.h>
23#include <linux/delay.h>
24
25#include <asm/ptrace.h>
26#include <asm/atomic.h>
27
28#include <asm/irq.h>
29#include <asm/page.h>
30#include <asm/pgalloc.h>
31#include <asm/pgtable.h>
32#include <asm/oplib.h>
33#include <asm/cacheflush.h>
34#include <asm/tlbflush.h>
35#include <asm/cpudata.h>
36
37volatile int smp_processors_ready = 0;
38int smp_num_cpus = 1;
39volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
40unsigned char boot_cpu_id = 0;
41unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
42int smp_activated = 0;
43volatile int __cpu_number_map[NR_CPUS];
44volatile int __cpu_logical_map[NR_CPUS];
45
46cpumask_t cpu_online_map = CPU_MASK_NONE;
47cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
48
49/* The only guaranteed locking primitive available on all Sparc
50 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
51 * places the current byte at the effective address into dest_reg and
52 * places 0xff there afterwards. Pretty lame locking primitive
53 * compared to the Alpha and the Intel no? Most Sparcs have 'swap'
54 * instruction which is much better...
55 */
56
57/* Used to make bitops atomic */
58unsigned char bitops_spinlock = 0;
59
60volatile unsigned long ipi_count;
61
62volatile int smp_process_available=0;
63volatile int smp_commenced = 0;
64
65void __init smp_store_cpu_info(int id)
66{
67 int cpu_node;
68
69 cpu_data(id).udelay_val = loops_per_jiffy;
70
71 cpu_find_by_mid(id, &cpu_node);
72 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
73 "clock-frequency", 0);
74 cpu_data(id).prom_node = cpu_node;
75 cpu_data(id).mid = cpu_get_hwmid(cpu_node);
76 if (cpu_data(id).mid < 0)
77 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
78}
79
80void __init smp_cpus_done(unsigned int max_cpus)
81{
82}
83
84void cpu_panic(void)
85{
86 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
87 panic("SMP bolixed\n");
88}
89
90struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };
91
92void __init smp_boot_cpus(void)
93{
94 extern void smp4m_boot_cpus(void);
95 extern void smp4d_boot_cpus(void);
96
97 if (sparc_cpu_model == sun4m)
98 smp4m_boot_cpus();
99 else
100 smp4d_boot_cpus();
101}
102
103void smp_send_reschedule(int cpu)
104{
105 /* See sparc64 */
106}
107
108void smp_send_stop(void)
109{
110}
111
112void smp_flush_cache_all(void)
113{
114 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
115 local_flush_cache_all();
116}
117
118void smp_flush_tlb_all(void)
119{
120 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
121 local_flush_tlb_all();
122}
123
124void smp_flush_cache_mm(struct mm_struct *mm)
125{
126 if(mm->context != NO_CONTEXT) {
127 cpumask_t cpu_mask = mm->cpu_vm_mask;
128 cpu_clear(smp_processor_id(), cpu_mask);
129 if (!cpus_empty(cpu_mask))
130 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
131 local_flush_cache_mm(mm);
132 }
133}
134
135void smp_flush_tlb_mm(struct mm_struct *mm)
136{
137 if(mm->context != NO_CONTEXT) {
138 cpumask_t cpu_mask = mm->cpu_vm_mask;
139 cpu_clear(smp_processor_id(), cpu_mask);
140 if (!cpus_empty(cpu_mask)) {
141 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
142 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
143 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
144 }
145 local_flush_tlb_mm(mm);
146 }
147}
148
149void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
150 unsigned long end)
151{
152 struct mm_struct *mm = vma->vm_mm;
153
154 if (mm->context != NO_CONTEXT) {
155 cpumask_t cpu_mask = mm->cpu_vm_mask;
156 cpu_clear(smp_processor_id(), cpu_mask);
157 if (!cpus_empty(cpu_mask))
158 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
159 local_flush_cache_range(vma, start, end);
160 }
161}
162
163void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
164 unsigned long end)
165{
166 struct mm_struct *mm = vma->vm_mm;
167
168 if (mm->context != NO_CONTEXT) {
169 cpumask_t cpu_mask = mm->cpu_vm_mask;
170 cpu_clear(smp_processor_id(), cpu_mask);
171 if (!cpus_empty(cpu_mask))
172 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
173 local_flush_tlb_range(vma, start, end);
174 }
175}
176
177void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
178{
179 struct mm_struct *mm = vma->vm_mm;
180
181 if(mm->context != NO_CONTEXT) {
182 cpumask_t cpu_mask = mm->cpu_vm_mask;
183 cpu_clear(smp_processor_id(), cpu_mask);
184 if (!cpus_empty(cpu_mask))
185 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
186 local_flush_cache_page(vma, page);
187 }
188}
189
190void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
191{
192 struct mm_struct *mm = vma->vm_mm;
193
194 if(mm->context != NO_CONTEXT) {
195 cpumask_t cpu_mask = mm->cpu_vm_mask;
196 cpu_clear(smp_processor_id(), cpu_mask);
197 if (!cpus_empty(cpu_mask))
198 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
199 local_flush_tlb_page(vma, page);
200 }
201}
202
203void smp_reschedule_irq(void)
204{
205 set_need_resched();
206}
207
208void smp_flush_page_to_ram(unsigned long page)
209{
210 /* Current theory is that those who call this are the one's
211 * who have just dirtied their cache with the pages contents
212 * in kernel space, therefore we only run this on local cpu.
213 *
214 * XXX This experiment failed, research further... -DaveM
215 */
216#if 1
217 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
218#endif
219 local_flush_page_to_ram(page);
220}
221
222void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
223{
224 cpumask_t cpu_mask = mm->cpu_vm_mask;
225 cpu_clear(smp_processor_id(), cpu_mask);
226 if (!cpus_empty(cpu_mask))
227 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
228 local_flush_sig_insns(mm, insn_addr);
229}
230
231extern unsigned int lvl14_resolution;
232
233/* /proc/profile writes can call this, don't __init it please. */
234static DEFINE_SPINLOCK(prof_setup_lock);
235
236int setup_profiling_timer(unsigned int multiplier)
237{
238 int i;
239 unsigned long flags;
240
241 /* Prevent level14 ticker IRQ flooding. */
242 if((!multiplier) || (lvl14_resolution / multiplier) < 500)
243 return -EINVAL;
244
245 spin_lock_irqsave(&prof_setup_lock, flags);
246 for(i = 0; i < NR_CPUS; i++) {
247 if (cpu_possible(i))
248 load_profile_irq(i, lvl14_resolution / multiplier);
249 prof_multiplier(i) = multiplier;
250 }
251 spin_unlock_irqrestore(&prof_setup_lock, flags);
252
253 return 0;
254}
255
256void __init smp_prepare_cpus(unsigned int maxcpus)
257{
258}
259
260void __devinit smp_prepare_boot_cpu(void)
261{
262 current_thread_info()->cpu = hard_smp_processor_id();
263 cpu_set(smp_processor_id(), cpu_online_map);
264 cpu_set(smp_processor_id(), phys_cpu_present_map);
265}
266
267int __devinit __cpu_up(unsigned int cpu)
268{
269 panic("smp doesn't work\n");
270}
271
272void smp_bogo(struct seq_file *m)
273{
274 int i;
275
276 for (i = 0; i < NR_CPUS; i++) {
277 if (cpu_online(i))
278 seq_printf(m,
279 "Cpu%dBogo\t: %lu.%02lu\n",
280 i,
281 cpu_data(i).udelay_val/(500000/HZ),
282 (cpu_data(i).udelay_val/(5000/HZ))%100);
283 }
284}
285
286void smp_info(struct seq_file *m)
287{
288 int i;
289
290 seq_printf(m, "State:\n");
291 for (i = 0; i < NR_CPUS; i++) {
292 if (cpu_online(i))
293 seq_printf(m, "CPU%d\t\t: online\n", i);
294 }
295}
diff --git a/arch/sparc/kernel/sparc-stub.c b/arch/sparc/kernel/sparc-stub.c
new file mode 100644
index 000000000000..e84f815e6903
--- /dev/null
+++ b/arch/sparc/kernel/sparc-stub.c
@@ -0,0 +1,724 @@
1/* $Id: sparc-stub.c,v 1.28 2001/10/30 04:54:21 davem Exp $
2 * sparc-stub.c: KGDB support for the Linux kernel.
3 *
4 * Modifications to run under Linux
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * This file originally came from the gdb sources, and the
8 * copyright notices have been retained below.
9 */
10
11/****************************************************************************
12
13 THIS SOFTWARE IS NOT COPYRIGHTED
14
15 HP offers the following for use in the public domain. HP makes no
16 warranty with regard to the software or its performance and the
17 user accepts the software "AS IS" with all faults.
18
19 HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD
20 TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES
21 OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22
23****************************************************************************/
24
25/****************************************************************************
26 * Header: remcom.c,v 1.34 91/03/09 12:29:49 glenne Exp $
27 *
28 * Module name: remcom.c $
29 * Revision: 1.34 $
30 * Date: 91/03/09 12:29:49 $
31 * Contributor: Lake Stevens Instrument Division$
32 *
33 * Description: low level support for gdb debugger. $
34 *
35 * Considerations: only works on target hardware $
36 *
37 * Written by: Glenn Engel $
38 * ModuleState: Experimental $
39 *
40 * NOTES: See Below $
41 *
42 * Modified for SPARC by Stu Grossman, Cygnus Support.
43 *
44 * This code has been extensively tested on the Fujitsu SPARClite demo board.
45 *
46 * To enable debugger support, two things need to happen. One, a
47 * call to set_debug_traps() is necessary in order to allow any breakpoints
48 * or error conditions to be properly intercepted and reported to gdb.
49 * Two, a breakpoint needs to be generated to begin communication. This
50 * is most easily accomplished by a call to breakpoint(). Breakpoint()
51 * simulates a breakpoint by executing a trap #1.
52 *
53 *************
54 *
55 * The following gdb commands are supported:
56 *
57 * command function Return value
58 *
59 * g return the value of the CPU registers hex data or ENN
60 * G set the value of the CPU registers OK or ENN
61 *
62 * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
63 * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
64 *
65 * c Resume at current address SNN ( signal NN)
66 * cAA..AA Continue at address AA..AA SNN
67 *
68 * s Step one instruction SNN
69 * sAA..AA Step one instruction from AA..AA SNN
70 *
71 * k kill
72 *
73 * ? What was the last sigval ? SNN (signal NN)
74 *
75 * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
76 * baud rate
77 *
78 * All commands and responses are sent with a packet which includes a
79 * checksum. A packet consists of
80 *
81 * $<packet info>#<checksum>.
82 *
83 * where
84 * <packet info> :: <characters representing the command or response>
85 * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
86 *
87 * When a packet is received, it is first acknowledged with either '+' or '-'.
88 * '+' indicates a successful transfer. '-' indicates a failed transfer.
89 *
90 * Example:
91 *
92 * Host: Reply:
93 * $m0,10#2a +$00010203040506070809101112131415#42
94 *
95 ****************************************************************************/
96
97#include <linux/kernel.h>
98#include <linux/string.h>
99#include <linux/mm.h>
100#include <linux/smp.h>
101#include <linux/smp_lock.h>
102
103#include <asm/system.h>
104#include <asm/signal.h>
105#include <asm/oplib.h>
106#include <asm/head.h>
107#include <asm/traps.h>
108#include <asm/vac-ops.h>
109#include <asm/kgdb.h>
110#include <asm/pgalloc.h>
111#include <asm/pgtable.h>
112#include <asm/cacheflush.h>
113
114/*
115 *
116 * external low-level support routines
117 */
118
119extern void putDebugChar(char); /* write a single character */
120extern char getDebugChar(void); /* read and return a single char */
121
122/*
123 * BUFMAX defines the maximum number of characters in inbound/outbound buffers
124 * at least NUMREGBYTES*2 are needed for register packets
125 */
126#define BUFMAX 2048
127
128static int initialized; /* !0 means we've been initialized */
129
130static const char hexchars[]="0123456789abcdef";
131
132#define NUMREGS 72
133
134/* Number of bytes of registers. */
135#define NUMREGBYTES (NUMREGS * 4)
136enum regnames {G0, G1, G2, G3, G4, G5, G6, G7,
137 O0, O1, O2, O3, O4, O5, SP, O7,
138 L0, L1, L2, L3, L4, L5, L6, L7,
139 I0, I1, I2, I3, I4, I5, FP, I7,
140
141 F0, F1, F2, F3, F4, F5, F6, F7,
142 F8, F9, F10, F11, F12, F13, F14, F15,
143 F16, F17, F18, F19, F20, F21, F22, F23,
144 F24, F25, F26, F27, F28, F29, F30, F31,
145 Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR };
146
147
148extern void trap_low(void); /* In arch/sparc/kernel/entry.S */
149
150unsigned long get_sun4cpte(unsigned long addr)
151{
152 unsigned long entry;
153
154 __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
155 "=r" (entry) :
156 "r" (addr), "i" (ASI_PTE));
157 return entry;
158}
159
160unsigned long get_sun4csegmap(unsigned long addr)
161{
162 unsigned long entry;
163
164 __asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" :
165 "=r" (entry) :
166 "r" (addr), "i" (ASI_SEGMAP));
167 return entry;
168}
169
170#if 0
171/* Have to sort this out. This cannot be done after initialization. */
172static void flush_cache_all_nop(void) {}
173#endif
174
175/* Place where we save old trap entries for restoration */
176struct tt_entry kgdb_savettable[256];
177typedef void (*trapfunc_t)(void);
178
179/* Helper routine for manipulation of kgdb_savettable */
180static inline void copy_ttentry(struct tt_entry *src, struct tt_entry *dest)
181{
182 dest->inst_one = src->inst_one;
183 dest->inst_two = src->inst_two;
184 dest->inst_three = src->inst_three;
185 dest->inst_four = src->inst_four;
186}
187
188/* Initialize the kgdb_savettable so that debugging can commence */
189static void eh_init(void)
190{
191 int i;
192
193 for(i=0; i < 256; i++)
194 copy_ttentry(&sparc_ttable[i], &kgdb_savettable[i]);
195}
196
197/* Install an exception handler for kgdb */
198static void exceptionHandler(int tnum, trapfunc_t trap_entry)
199{
200 unsigned long te_addr = (unsigned long) trap_entry;
201
202 /* Make new vector */
203 sparc_ttable[tnum].inst_one =
204 SPARC_BRANCH((unsigned long) te_addr,
205 (unsigned long) &sparc_ttable[tnum].inst_one);
206 sparc_ttable[tnum].inst_two = SPARC_RD_PSR_L0;
207 sparc_ttable[tnum].inst_three = SPARC_NOP;
208 sparc_ttable[tnum].inst_four = SPARC_NOP;
209}
210
211/* Convert ch from a hex digit to an int */
212static int
213hex(unsigned char ch)
214{
215 if (ch >= 'a' && ch <= 'f')
216 return ch-'a'+10;
217 if (ch >= '0' && ch <= '9')
218 return ch-'0';
219 if (ch >= 'A' && ch <= 'F')
220 return ch-'A'+10;
221 return -1;
222}
223
224/* scan for the sequence $<data>#<checksum> */
225static void
226getpacket(char *buffer)
227{
228 unsigned char checksum;
229 unsigned char xmitcsum;
230 int i;
231 int count;
232 unsigned char ch;
233
234 do {
235 /* wait around for the start character, ignore all other characters */
236 while ((ch = (getDebugChar() & 0x7f)) != '$') ;
237
238 checksum = 0;
239 xmitcsum = -1;
240
241 count = 0;
242
243 /* now, read until a # or end of buffer is found */
244 while (count < BUFMAX) {
245 ch = getDebugChar() & 0x7f;
246 if (ch == '#')
247 break;
248 checksum = checksum + ch;
249 buffer[count] = ch;
250 count = count + 1;
251 }
252
253 if (count >= BUFMAX)
254 continue;
255
256 buffer[count] = 0;
257
258 if (ch == '#') {
259 xmitcsum = hex(getDebugChar() & 0x7f) << 4;
260 xmitcsum |= hex(getDebugChar() & 0x7f);
261 if (checksum != xmitcsum)
262 putDebugChar('-'); /* failed checksum */
263 else {
264 putDebugChar('+'); /* successful transfer */
265 /* if a sequence char is present, reply the ID */
266 if (buffer[2] == ':') {
267 putDebugChar(buffer[0]);
268 putDebugChar(buffer[1]);
269 /* remove sequence chars from buffer */
270 count = strlen(buffer);
271 for (i=3; i <= count; i++)
272 buffer[i-3] = buffer[i];
273 }
274 }
275 }
276 } while (checksum != xmitcsum);
277}
278
279/* send the packet in buffer. */
280
281static void
282putpacket(unsigned char *buffer)
283{
284 unsigned char checksum;
285 int count;
286 unsigned char ch, recv;
287
288 /* $<packet info>#<checksum>. */
289 do {
290 putDebugChar('$');
291 checksum = 0;
292 count = 0;
293
294 while ((ch = buffer[count])) {
295 putDebugChar(ch);
296 checksum += ch;
297 count += 1;
298 }
299
300 putDebugChar('#');
301 putDebugChar(hexchars[checksum >> 4]);
302 putDebugChar(hexchars[checksum & 0xf]);
303 recv = getDebugChar();
304 } while ((recv & 0x7f) != '+');
305}
306
307static char remcomInBuffer[BUFMAX];
308static char remcomOutBuffer[BUFMAX];
309
310/* Convert the memory pointed to by mem into hex, placing result in buf.
311 * Return a pointer to the last char put in buf (null), in case of mem fault,
312 * return 0.
313 */
314
315static unsigned char *
316mem2hex(char *mem, char *buf, int count)
317{
318 unsigned char ch;
319
320 while (count-- > 0) {
321 /* This assembler code is basically: ch = *mem++;
322 * except that we use the SPARC/Linux exception table
323 * mechanism (see how "fixup" works in kernel_mna_trap_fault)
324 * to arrange for a "return 0" upon a memory fault
325 */
326 __asm__(
327 "\n1:\n\t"
328 "ldub [%0], %1\n\t"
329 "inc %0\n\t"
330 ".section .fixup,#alloc,#execinstr\n\t"
331 ".align 4\n"
332 "2:\n\t"
333 "retl\n\t"
334 " mov 0, %%o0\n\t"
335 ".section __ex_table, #alloc\n\t"
336 ".align 4\n\t"
337 ".word 1b, 2b\n\t"
338 ".text\n"
339 : "=r" (mem), "=r" (ch) : "0" (mem));
340 *buf++ = hexchars[ch >> 4];
341 *buf++ = hexchars[ch & 0xf];
342 }
343
344 *buf = 0;
345 return buf;
346}
347
348/* convert the hex array pointed to by buf into binary to be placed in mem
349 * return a pointer to the character AFTER the last byte written.
350*/
351static char *
352hex2mem(char *buf, char *mem, int count)
353{
354 int i;
355 unsigned char ch;
356
357 for (i=0; i<count; i++) {
358
359 ch = hex(*buf++) << 4;
360 ch |= hex(*buf++);
361 /* Assembler code is *mem++ = ch; with return 0 on fault */
362 __asm__(
363 "\n1:\n\t"
364 "stb %1, [%0]\n\t"
365 "inc %0\n\t"
366 ".section .fixup,#alloc,#execinstr\n\t"
367 ".align 4\n"
368 "2:\n\t"
369 "retl\n\t"
370 " mov 0, %%o0\n\t"
371 ".section __ex_table, #alloc\n\t"
372 ".align 4\n\t"
373 ".word 1b, 2b\n\t"
374 ".text\n"
375 : "=r" (mem) : "r" (ch) , "0" (mem));
376 }
377 return mem;
378}
379
380/* This table contains the mapping between SPARC hardware trap types, and
381 signals, which are primarily what GDB understands. It also indicates
382 which hardware traps we need to commandeer when initializing the stub. */
383
384static struct hard_trap_info
385{
386 unsigned char tt; /* Trap type code for SPARC */
387 unsigned char signo; /* Signal that we map this trap into */
388} hard_trap_info[] = {
389 {SP_TRAP_SBPT, SIGTRAP}, /* ta 1 - Linux/KGDB software breakpoint */
390 {0, 0} /* Must be last */
391};
392
393/* Set up exception handlers for tracing and breakpoints */
394
395void
396set_debug_traps(void)
397{
398 struct hard_trap_info *ht;
399 unsigned long flags;
400
401 local_irq_save(flags);
402#if 0
403/* Have to sort this out. This cannot be done after initialization. */
404 BTFIXUPSET_CALL(flush_cache_all, flush_cache_all_nop, BTFIXUPCALL_NOP);
405#endif
406
407 /* Initialize our copy of the Linux Sparc trap table */
408 eh_init();
409
410 for (ht = hard_trap_info; ht->tt && ht->signo; ht++) {
411 /* Only if it doesn't destroy our fault handlers */
412 if((ht->tt != SP_TRAP_TFLT) &&
413 (ht->tt != SP_TRAP_DFLT))
414 exceptionHandler(ht->tt, trap_low);
415 }
416
417 /* In case GDB is started before us, ack any packets (presumably
418 * "$?#xx") sitting there.
419 *
420 * I've found this code causes more problems than it solves,
421 * so that's why it's commented out. GDB seems to work fine
422 * now starting either before or after the kernel -bwb
423 */
424#if 0
425 while((c = getDebugChar()) != '$');
426 while((c = getDebugChar()) != '#');
427 c = getDebugChar(); /* eat first csum byte */
428 c = getDebugChar(); /* eat second csum byte */
429 putDebugChar('+'); /* ack it */
430#endif
431
432 initialized = 1; /* connect! */
433 local_irq_restore(flags);
434}
435
436/* Convert the SPARC hardware trap type code to a unix signal number. */
437
438static int
439computeSignal(int tt)
440{
441 struct hard_trap_info *ht;
442
443 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
444 if (ht->tt == tt)
445 return ht->signo;
446
447 return SIGHUP; /* default for things we don't know about */
448}
449
450/*
451 * While we find nice hex chars, build an int.
452 * Return number of chars processed.
453 */
454
455static int
456hexToInt(char **ptr, int *intValue)
457{
458 int numChars = 0;
459 int hexValue;
460
461 *intValue = 0;
462
463 while (**ptr) {
464 hexValue = hex(**ptr);
465 if (hexValue < 0)
466 break;
467
468 *intValue = (*intValue << 4) | hexValue;
469 numChars ++;
470
471 (*ptr)++;
472 }
473
474 return (numChars);
475}
476
477/*
478 * This function does all command processing for interfacing to gdb. It
479 * returns 1 if you should skip the instruction at the trap address, 0
480 * otherwise.
481 */
482
483extern void breakinst(void);
484
485void
486handle_exception (unsigned long *registers)
487{
488 int tt; /* Trap type */
489 int sigval;
490 int addr;
491 int length;
492 char *ptr;
493 unsigned long *sp;
494
495 /* First, we must force all of the windows to be spilled out */
496
497 asm("save %sp, -64, %sp\n\t"
498 "save %sp, -64, %sp\n\t"
499 "save %sp, -64, %sp\n\t"
500 "save %sp, -64, %sp\n\t"
501 "save %sp, -64, %sp\n\t"
502 "save %sp, -64, %sp\n\t"
503 "save %sp, -64, %sp\n\t"
504 "save %sp, -64, %sp\n\t"
505 "restore\n\t"
506 "restore\n\t"
507 "restore\n\t"
508 "restore\n\t"
509 "restore\n\t"
510 "restore\n\t"
511 "restore\n\t"
512 "restore\n\t");
513
514 lock_kernel();
515 if (registers[PC] == (unsigned long)breakinst) {
516 /* Skip over breakpoint trap insn */
517 registers[PC] = registers[NPC];
518 registers[NPC] += 4;
519 }
520
521 sp = (unsigned long *)registers[SP];
522
523 tt = (registers[TBR] >> 4) & 0xff;
524
525 /* reply to host that an exception has occurred */
526 sigval = computeSignal(tt);
527 ptr = remcomOutBuffer;
528
529 *ptr++ = 'T';
530 *ptr++ = hexchars[sigval >> 4];
531 *ptr++ = hexchars[sigval & 0xf];
532
533 *ptr++ = hexchars[PC >> 4];
534 *ptr++ = hexchars[PC & 0xf];
535 *ptr++ = ':';
536 ptr = mem2hex((char *)&registers[PC], ptr, 4);
537 *ptr++ = ';';
538
539 *ptr++ = hexchars[FP >> 4];
540 *ptr++ = hexchars[FP & 0xf];
541 *ptr++ = ':';
542 ptr = mem2hex((char *) (sp + 8 + 6), ptr, 4); /* FP */
543 *ptr++ = ';';
544
545 *ptr++ = hexchars[SP >> 4];
546 *ptr++ = hexchars[SP & 0xf];
547 *ptr++ = ':';
548 ptr = mem2hex((char *)&sp, ptr, 4);
549 *ptr++ = ';';
550
551 *ptr++ = hexchars[NPC >> 4];
552 *ptr++ = hexchars[NPC & 0xf];
553 *ptr++ = ':';
554 ptr = mem2hex((char *)&registers[NPC], ptr, 4);
555 *ptr++ = ';';
556
557 *ptr++ = hexchars[O7 >> 4];
558 *ptr++ = hexchars[O7 & 0xf];
559 *ptr++ = ':';
560 ptr = mem2hex((char *)&registers[O7], ptr, 4);
561 *ptr++ = ';';
562
563 *ptr++ = 0;
564
565 putpacket(remcomOutBuffer);
566
567 /* XXX We may want to add some features dealing with poking the
568 * XXX page tables, the real ones on the srmmu, and what is currently
569 * XXX loaded in the sun4/sun4c tlb at this point in time. But this
570 * XXX also required hacking to the gdb sources directly...
571 */
572
573 while (1) {
574 remcomOutBuffer[0] = 0;
575
576 getpacket(remcomInBuffer);
577 switch (remcomInBuffer[0]) {
578 case '?':
579 remcomOutBuffer[0] = 'S';
580 remcomOutBuffer[1] = hexchars[sigval >> 4];
581 remcomOutBuffer[2] = hexchars[sigval & 0xf];
582 remcomOutBuffer[3] = 0;
583 break;
584
585 case 'd':
586 /* toggle debug flag */
587 break;
588
589 case 'g': /* return the value of the CPU registers */
590 {
591 ptr = remcomOutBuffer;
592 /* G & O regs */
593 ptr = mem2hex((char *)registers, ptr, 16 * 4);
594 /* L & I regs */
595 ptr = mem2hex((char *) (sp + 0), ptr, 16 * 4);
596 /* Floating point */
597 memset(ptr, '0', 32 * 8);
598 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
599 mem2hex((char *)&registers[Y], (ptr + 32 * 4 * 2), (8 * 4));
600 }
601 break;
602
603 case 'G': /* set the value of the CPU registers - return OK */
604 {
605 unsigned long *newsp, psr;
606
607 psr = registers[PSR];
608
609 ptr = &remcomInBuffer[1];
610 /* G & O regs */
611 hex2mem(ptr, (char *)registers, 16 * 4);
612 /* L & I regs */
613 hex2mem(ptr + 16 * 4 * 2, (char *) (sp + 0), 16 * 4);
614 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
615 hex2mem(ptr + 64 * 4 * 2, (char *)&registers[Y], 8 * 4);
616
617 /* See if the stack pointer has moved. If so,
618 * then copy the saved locals and ins to the
619 * new location. This keeps the window
620 * overflow and underflow routines happy.
621 */
622
623 newsp = (unsigned long *)registers[SP];
624 if (sp != newsp)
625 sp = memcpy(newsp, sp, 16 * 4);
626
627 /* Don't allow CWP to be modified. */
628
629 if (psr != registers[PSR])
630 registers[PSR] = (psr & 0x1f) | (registers[PSR] & ~0x1f);
631
632 strcpy(remcomOutBuffer,"OK");
633 }
634 break;
635
636 case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
637 /* Try to read %x,%x. */
638
639 ptr = &remcomInBuffer[1];
640
641 if (hexToInt(&ptr, &addr)
642 && *ptr++ == ','
643 && hexToInt(&ptr, &length)) {
644 if (mem2hex((char *)addr, remcomOutBuffer, length))
645 break;
646
647 strcpy (remcomOutBuffer, "E03");
648 } else {
649 strcpy(remcomOutBuffer,"E01");
650 }
651 break;
652
653 case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
654 /* Try to read '%x,%x:'. */
655
656 ptr = &remcomInBuffer[1];
657
658 if (hexToInt(&ptr, &addr)
659 && *ptr++ == ','
660 && hexToInt(&ptr, &length)
661 && *ptr++ == ':') {
662 if (hex2mem(ptr, (char *)addr, length)) {
663 strcpy(remcomOutBuffer, "OK");
664 } else {
665 strcpy(remcomOutBuffer, "E03");
666 }
667 } else {
668 strcpy(remcomOutBuffer, "E02");
669 }
670 break;
671
672 case 'c': /* cAA..AA Continue at address AA..AA(optional) */
673 /* try to read optional parameter, pc unchanged if no parm */
674
675 ptr = &remcomInBuffer[1];
676 if (hexToInt(&ptr, &addr)) {
677 registers[PC] = addr;
678 registers[NPC] = addr + 4;
679 }
680
681/* Need to flush the instruction cache here, as we may have deposited a
682 * breakpoint, and the icache probably has no way of knowing that a data ref to
683 * some location may have changed something that is in the instruction cache.
684 */
685 flush_cache_all();
686 unlock_kernel();
687 return;
688
689 /* kill the program */
690 case 'k' : /* do nothing */
691 break;
692 case 'r': /* Reset */
693 asm ("call 0\n\t"
694 "nop\n\t");
695 break;
696 } /* switch */
697
698 /* reply to the request */
699 putpacket(remcomOutBuffer);
700 } /* while(1) */
701}
702
703/* This function will generate a breakpoint exception. It is used at the
704 beginning of a program to sync up with a debugger and can be used
705 otherwise as a quick means to stop program execution and "break" into
706 the debugger. */
707
708void
709breakpoint(void)
710{
711 if (!initialized)
712 return;
713
714 /* Again, watch those c-prefixes for ELF kernels */
715#if defined(__svr4__) || defined(__ELF__)
716 asm(".globl breakinst\n"
717 "breakinst:\n\t"
718 "ta 1\n");
719#else
720 asm(".globl _breakinst\n"
721 "_breakinst:\n\t"
722 "ta 1\n");
723#endif
724}
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
new file mode 100644
index 000000000000..f91b0e8d0dc8
--- /dev/null
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -0,0 +1,334 @@
1/* $Id: sparc_ksyms.c,v 1.107 2001/07/17 16:17:33 anton Exp $
2 * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 */
7
8/* Tell string.h we don't want memcpy etc. as cpp defines */
9#define EXPORT_SYMTAB_STROPS
10#define PROMLIB_INTERNAL
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/smp.h>
16#include <linux/types.h>
17#include <linux/string.h>
18#include <linux/sched.h>
19#include <linux/interrupt.h>
20#include <linux/in6.h>
21#include <linux/spinlock.h>
22#include <linux/mm.h>
23#ifdef CONFIG_PCI
24#include <linux/pci.h>
25#endif
26#include <linux/pm.h>
27#ifdef CONFIG_HIGHMEM
28#include <linux/highmem.h>
29#endif
30
31#include <asm/oplib.h>
32#include <asm/delay.h>
33#include <asm/system.h>
34#include <asm/auxio.h>
35#include <asm/pgtable.h>
36#include <asm/io.h>
37#include <asm/irq.h>
38#include <asm/idprom.h>
39#include <asm/svr4.h>
40#include <asm/head.h>
41#include <asm/smp.h>
42#include <asm/mostek.h>
43#include <asm/ptrace.h>
44#include <asm/user.h>
45#include <asm/uaccess.h>
46#include <asm/checksum.h>
47#ifdef CONFIG_SBUS
48#include <asm/sbus.h>
49#include <asm/dma.h>
50#endif
51#ifdef CONFIG_PCI
52#include <asm/ebus.h>
53#endif
54#include <asm/a.out.h>
55#include <asm/io-unit.h>
56#include <asm/bug.h>
57
58extern spinlock_t rtc_lock;
59
60struct poll {
61 int fd;
62 short events;
63 short revents;
64};
65
66extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *);
67extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *);
68void _sigpause_common (unsigned int set, struct pt_regs *);
69extern void (*__copy_1page)(void *, const void *);
70extern void __memmove(void *, const void *, __kernel_size_t);
71extern void (*bzero_1page)(void *);
72extern void *__bzero(void *, size_t);
73extern void *__memscan_zero(void *, size_t);
74extern void *__memscan_generic(void *, int, size_t);
75extern int __memcmp(const void *, const void *, __kernel_size_t);
76extern int __strncmp(const char *, const char *, __kernel_size_t);
77
78extern int __ashrdi3(int, int);
79extern int __ashldi3(int, int);
80extern int __lshrdi3(int, int);
81extern int __muldi3(int, int);
82extern int __divdi3(int, int);
83
84extern void dump_thread(struct pt_regs *, struct user *);
85
86/* Private functions with odd calling conventions. */
87extern void ___atomic24_add(void);
88extern void ___atomic24_sub(void);
89extern void ___set_bit(void);
90extern void ___clear_bit(void);
91extern void ___change_bit(void);
92
93/* Alias functions whose names begin with "." and export the aliases.
94 * The module references will be fixed up by module_frob_arch_sections.
95 */
96#define DOT_ALIAS2(__ret, __x, __arg1, __arg2) \
97 extern __ret __x(__arg1, __arg2) \
98 __attribute__((weak, alias("." # __x)));
99
100DOT_ALIAS2(int, div, int, int)
101DOT_ALIAS2(int, mul, int, int)
102DOT_ALIAS2(int, rem, int, int)
103DOT_ALIAS2(unsigned, udiv, unsigned, unsigned)
104DOT_ALIAS2(unsigned, umul, unsigned, unsigned)
105DOT_ALIAS2(unsigned, urem, unsigned, unsigned)
106
107#undef DOT_ALIAS2
108
109/* used by various drivers */
110EXPORT_SYMBOL(sparc_cpu_model);
111EXPORT_SYMBOL(kernel_thread);
112#ifdef CONFIG_DEBUG_SPINLOCK
113#ifdef CONFIG_SMP
114EXPORT_SYMBOL(_do_spin_lock);
115EXPORT_SYMBOL(_do_spin_unlock);
116EXPORT_SYMBOL(_spin_trylock);
117EXPORT_SYMBOL(_do_read_lock);
118EXPORT_SYMBOL(_do_read_unlock);
119EXPORT_SYMBOL(_do_write_lock);
120EXPORT_SYMBOL(_do_write_unlock);
121#endif
122#else
123// XXX find what uses (or used) these.
124// EXPORT_SYMBOL_PRIVATE(_rw_read_enter);
125// EXPORT_SYMBOL_PRIVATE(_rw_read_exit);
126// EXPORT_SYMBOL_PRIVATE(_rw_write_enter);
127#endif
128/* semaphores */
129EXPORT_SYMBOL(__up);
130EXPORT_SYMBOL(__down);
131EXPORT_SYMBOL(__down_trylock);
132EXPORT_SYMBOL(__down_interruptible);
133
134EXPORT_SYMBOL(sparc_valid_addr_bitmap);
135EXPORT_SYMBOL(phys_base);
136EXPORT_SYMBOL(pfn_base);
137
138/* Atomic operations. */
139EXPORT_SYMBOL(___atomic24_add);
140EXPORT_SYMBOL(___atomic24_sub);
141
142/* Bit operations. */
143EXPORT_SYMBOL(___set_bit);
144EXPORT_SYMBOL(___clear_bit);
145EXPORT_SYMBOL(___change_bit);
146
147#ifdef CONFIG_SMP
148/* IRQ implementation. */
149EXPORT_SYMBOL(synchronize_irq);
150
151/* Misc SMP information */
152EXPORT_SYMBOL(__cpu_number_map);
153EXPORT_SYMBOL(__cpu_logical_map);
154#endif
155
156EXPORT_SYMBOL(__udelay);
157EXPORT_SYMBOL(__ndelay);
158EXPORT_SYMBOL(rtc_lock);
159EXPORT_SYMBOL(mostek_lock);
160EXPORT_SYMBOL(mstk48t02_regs);
161#ifdef CONFIG_SUN_AUXIO
162EXPORT_SYMBOL(set_auxio);
163EXPORT_SYMBOL(get_auxio);
164#endif
165EXPORT_SYMBOL(request_fast_irq);
166EXPORT_SYMBOL(io_remap_page_range);
167EXPORT_SYMBOL(io_remap_pfn_range);
168 /* P3: iounit_xxx may be needed, sun4d users */
169/* EXPORT_SYMBOL(iounit_map_dma_init); */
170/* EXPORT_SYMBOL(iounit_map_dma_page); */
171
172#ifndef CONFIG_SMP
173EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
174#else
175EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
176#endif
177EXPORT_SYMBOL(BTFIXUP_CALL(enable_irq));
178EXPORT_SYMBOL(BTFIXUP_CALL(disable_irq));
179EXPORT_SYMBOL(BTFIXUP_CALL(__irq_itoa));
180EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
181EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea));
182EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
183EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
184EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
185EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
186
187#ifdef CONFIG_SBUS
188EXPORT_SYMBOL(sbus_root);
189EXPORT_SYMBOL(dma_chain);
190EXPORT_SYMBOL(sbus_set_sbus64);
191EXPORT_SYMBOL(sbus_alloc_consistent);
192EXPORT_SYMBOL(sbus_free_consistent);
193EXPORT_SYMBOL(sbus_map_single);
194EXPORT_SYMBOL(sbus_unmap_single);
195EXPORT_SYMBOL(sbus_map_sg);
196EXPORT_SYMBOL(sbus_unmap_sg);
197EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
198EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
199EXPORT_SYMBOL(sbus_dma_sync_sg_for_cpu);
200EXPORT_SYMBOL(sbus_dma_sync_sg_for_device);
201EXPORT_SYMBOL(sbus_iounmap);
202EXPORT_SYMBOL(sbus_ioremap);
203#endif
204#ifdef CONFIG_PCI
205EXPORT_SYMBOL(ebus_chain);
206EXPORT_SYMBOL(insb);
207EXPORT_SYMBOL(outsb);
208EXPORT_SYMBOL(insw);
209EXPORT_SYMBOL(outsw);
210EXPORT_SYMBOL(insl);
211EXPORT_SYMBOL(outsl);
212EXPORT_SYMBOL(pci_alloc_consistent);
213EXPORT_SYMBOL(pci_free_consistent);
214EXPORT_SYMBOL(pci_map_single);
215EXPORT_SYMBOL(pci_unmap_single);
216EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
217EXPORT_SYMBOL(pci_dma_sync_single_for_device);
218EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
219EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
220EXPORT_SYMBOL(pci_map_sg);
221EXPORT_SYMBOL(pci_unmap_sg);
222EXPORT_SYMBOL(pci_map_page);
223EXPORT_SYMBOL(pci_unmap_page);
224/* Actually, ioremap/iounmap are not PCI specific. But it is ok for drivers. */
225EXPORT_SYMBOL(ioremap);
226EXPORT_SYMBOL(iounmap);
227#endif
228
229/* in arch/sparc/mm/highmem.c */
230#ifdef CONFIG_HIGHMEM
231EXPORT_SYMBOL(kmap_atomic);
232EXPORT_SYMBOL(kunmap_atomic);
233#endif
234
235/* Solaris/SunOS binary compatibility */
236EXPORT_SYMBOL(svr4_setcontext);
237EXPORT_SYMBOL(svr4_getcontext);
238EXPORT_SYMBOL(_sigpause_common);
239
240EXPORT_SYMBOL(dump_thread);
241
242/* prom symbols */
243EXPORT_SYMBOL(idprom);
244EXPORT_SYMBOL(prom_root_node);
245EXPORT_SYMBOL(prom_getchild);
246EXPORT_SYMBOL(prom_getsibling);
247EXPORT_SYMBOL(prom_searchsiblings);
248EXPORT_SYMBOL(prom_firstprop);
249EXPORT_SYMBOL(prom_nextprop);
250EXPORT_SYMBOL(prom_getproplen);
251EXPORT_SYMBOL(prom_getproperty);
252EXPORT_SYMBOL(prom_node_has_property);
253EXPORT_SYMBOL(prom_setprop);
254EXPORT_SYMBOL(saved_command_line);
255EXPORT_SYMBOL(prom_apply_obio_ranges);
256EXPORT_SYMBOL(prom_getname);
257EXPORT_SYMBOL(prom_feval);
258EXPORT_SYMBOL(prom_getbool);
259EXPORT_SYMBOL(prom_getstring);
260EXPORT_SYMBOL(prom_getint);
261EXPORT_SYMBOL(prom_getintdefault);
262EXPORT_SYMBOL(prom_finddevice);
263EXPORT_SYMBOL(romvec);
264EXPORT_SYMBOL(__prom_getchild);
265EXPORT_SYMBOL(__prom_getsibling);
266
267/* sparc library symbols */
268EXPORT_SYMBOL(memchr);
269EXPORT_SYMBOL(memscan);
270EXPORT_SYMBOL(strlen);
271EXPORT_SYMBOL(strnlen);
272EXPORT_SYMBOL(strcpy);
273EXPORT_SYMBOL(strncpy);
274EXPORT_SYMBOL(strcat);
275EXPORT_SYMBOL(strncat);
276EXPORT_SYMBOL(strcmp);
277EXPORT_SYMBOL(strncmp);
278EXPORT_SYMBOL(strchr);
279EXPORT_SYMBOL(strrchr);
280EXPORT_SYMBOL(strpbrk);
281EXPORT_SYMBOL(strstr);
282EXPORT_SYMBOL(page_kernel);
283
284/* Special internal versions of library functions. */
285EXPORT_SYMBOL(__copy_1page);
286EXPORT_SYMBOL(__memcpy);
287EXPORT_SYMBOL(__memset);
288EXPORT_SYMBOL(bzero_1page);
289EXPORT_SYMBOL(__bzero);
290EXPORT_SYMBOL(__memscan_zero);
291EXPORT_SYMBOL(__memscan_generic);
292EXPORT_SYMBOL(__memcmp);
293EXPORT_SYMBOL(__strncmp);
294EXPORT_SYMBOL(__memmove);
295
296/* Moving data to/from userspace. */
297EXPORT_SYMBOL(__copy_user);
298EXPORT_SYMBOL(__strncpy_from_user);
299
300/* Networking helper routines. */
301EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
302EXPORT_SYMBOL(csum_partial);
303
304/* Cache flushing. */
305EXPORT_SYMBOL(sparc_flush_page_to_ram);
306
307/* For when serial stuff is built as modules. */
308EXPORT_SYMBOL(sun_do_break);
309
310EXPORT_SYMBOL(__ret_efault);
311
312EXPORT_SYMBOL(memcmp);
313EXPORT_SYMBOL(memcpy);
314EXPORT_SYMBOL(memset);
315EXPORT_SYMBOL(memmove);
316EXPORT_SYMBOL(__ashrdi3);
317EXPORT_SYMBOL(__ashldi3);
318EXPORT_SYMBOL(__lshrdi3);
319EXPORT_SYMBOL(__muldi3);
320EXPORT_SYMBOL(__divdi3);
321
322EXPORT_SYMBOL(rem);
323EXPORT_SYMBOL(urem);
324EXPORT_SYMBOL(mul);
325EXPORT_SYMBOL(umul);
326EXPORT_SYMBOL(div);
327EXPORT_SYMBOL(udiv);
328
329#ifdef CONFIG_DEBUG_BUGVERBOSE
330EXPORT_SYMBOL(do_BUG);
331#endif
332
333/* Sun Power Management Idle Handler */
334EXPORT_SYMBOL(pm_idle);
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
new file mode 100644
index 000000000000..3d6a99073c42
--- /dev/null
+++ b/arch/sparc/kernel/sun4c_irq.c
@@ -0,0 +1,250 @@
1/* sun4c_irq.c
2 * arch/sparc/kernel/sun4c_irq.c:
3 *
4 * djhr: Hacked out of irq.c into a CPU dependent version.
5 *
6 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
7 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com)
9 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
10 */
11
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/linkage.h>
15#include <linux/kernel_stat.h>
16#include <linux/signal.h>
17#include <linux/sched.h>
18#include <linux/ptrace.h>
19#include <linux/interrupt.h>
20#include <linux/slab.h>
21#include <linux/init.h>
22
23#include <asm/ptrace.h>
24#include <asm/processor.h>
25#include <asm/system.h>
26#include <asm/psr.h>
27#include <asm/vaddrs.h>
28#include <asm/timer.h>
29#include <asm/openprom.h>
30#include <asm/oplib.h>
31#include <asm/traps.h>
32#include <asm/irq.h>
33#include <asm/io.h>
34#include <asm/sun4paddr.h>
35#include <asm/idprom.h>
36#include <asm/machines.h>
37#include <asm/sbus.h>
38
39#if 0
40static struct resource sun4c_timer_eb = { "sun4c_timer" };
41static struct resource sun4c_intr_eb = { "sun4c_intr" };
42#endif
43
44/* Pointer to the interrupt enable byte
45 *
46 * Dave Redman (djhr@tadpole.co.uk)
47 * What you may not be aware of is that entry.S requires this variable.
48 *
49 * --- linux_trap_nmi_sun4c --
50 *
51 * so don't go making it static, like I tried. sigh.
52 */
53unsigned char *interrupt_enable = NULL;
54
55static int sun4c_pil_map[] = { 0, 1, 2, 3, 5, 7, 8, 9 };
56
57unsigned int sun4c_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
58{
59 if (sbint >= sizeof(sun4c_pil_map)) {
60 printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint);
61 BUG();
62 }
63 return sun4c_pil_map[sbint];
64}
65
66static void sun4c_disable_irq(unsigned int irq_nr)
67{
68 unsigned long flags;
69 unsigned char current_mask, new_mask;
70
71 local_irq_save(flags);
72 irq_nr &= (NR_IRQS - 1);
73 current_mask = *interrupt_enable;
74 switch(irq_nr) {
75 case 1:
76 new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
77 break;
78 case 8:
79 new_mask = ((current_mask) & (~(SUN4C_INT_E8)));
80 break;
81 case 10:
82 new_mask = ((current_mask) & (~(SUN4C_INT_E10)));
83 break;
84 case 14:
85 new_mask = ((current_mask) & (~(SUN4C_INT_E14)));
86 break;
87 default:
88 local_irq_restore(flags);
89 return;
90 }
91 *interrupt_enable = new_mask;
92 local_irq_restore(flags);
93}
94
95static void sun4c_enable_irq(unsigned int irq_nr)
96{
97 unsigned long flags;
98 unsigned char current_mask, new_mask;
99
100 local_irq_save(flags);
101 irq_nr &= (NR_IRQS - 1);
102 current_mask = *interrupt_enable;
103 switch(irq_nr) {
104 case 1:
105 new_mask = ((current_mask) | SUN4C_INT_E1);
106 break;
107 case 8:
108 new_mask = ((current_mask) | SUN4C_INT_E8);
109 break;
110 case 10:
111 new_mask = ((current_mask) | SUN4C_INT_E10);
112 break;
113 case 14:
114 new_mask = ((current_mask) | SUN4C_INT_E14);
115 break;
116 default:
117 local_irq_restore(flags);
118 return;
119 }
120 *interrupt_enable = new_mask;
121 local_irq_restore(flags);
122}
123
124#define TIMER_IRQ 10 /* Also at level 14, but we ignore that one. */
125#define PROFILE_IRQ 14 /* Level14 ticker.. used by OBP for polling */
126
127volatile struct sun4c_timer_info *sun4c_timers;
128
129#ifdef CONFIG_SUN4
130/* This is an ugly hack to work around the
131 current timer code, and make it work with
132 the sun4/260 intersil
133 */
134volatile struct sun4c_timer_info sun4_timer;
135#endif
136
137static void sun4c_clear_clock_irq(void)
138{
139 volatile unsigned int clear_intr;
140#ifdef CONFIG_SUN4
141 if (idprom->id_machtype == (SM_SUN4 | SM_4_260))
142 clear_intr = sun4_timer.timer_limit10;
143 else
144#endif
145 clear_intr = sun4c_timers->timer_limit10;
146}
147
148static void sun4c_clear_profile_irq(int cpu)
149{
150 /* Errm.. not sure how to do this.. */
151}
152
153static void sun4c_load_profile_irq(int cpu, unsigned int limit)
154{
155 /* Errm.. not sure how to do this.. */
156}
157
158static void __init sun4c_init_timers(irqreturn_t (*counter_fn)(int, void *, struct pt_regs *))
159{
160 int irq;
161
162 /* Map the Timer chip, this is implemented in hardware inside
163 * the cache chip on the sun4c.
164 */
165#ifdef CONFIG_SUN4
166 if (idprom->id_machtype == (SM_SUN4 | SM_4_260))
167 sun4c_timers = &sun4_timer;
168 else
169#endif
170 sun4c_timers = ioremap(SUN_TIMER_PHYSADDR,
171 sizeof(struct sun4c_timer_info));
172
173 /* Have the level 10 timer tick at 100HZ. We don't touch the
174 * level 14 timer limit since we are letting the prom handle
175 * them until we have a real console driver so L1-A works.
176 */
177 sun4c_timers->timer_limit10 = (((1000000/HZ) + 1) << 10);
178 master_l10_counter = &sun4c_timers->cur_count10;
179 master_l10_limit = &sun4c_timers->timer_limit10;
180
181 irq = request_irq(TIMER_IRQ,
182 counter_fn,
183 (SA_INTERRUPT | SA_STATIC_ALLOC),
184 "timer", NULL);
185 if (irq) {
186 prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
187 prom_halt();
188 }
189
190#if 0
191 /* This does not work on 4/330 */
192 sun4c_enable_irq(10);
193#endif
194 claim_ticker14(NULL, PROFILE_IRQ, 0);
195}
196
197#ifdef CONFIG_SMP
198static void sun4c_nop(void) {}
199#endif
200
201extern char *sun4m_irq_itoa(unsigned int irq);
202
203void __init sun4c_init_IRQ(void)
204{
205 struct linux_prom_registers int_regs[2];
206 int ie_node;
207
208 if (ARCH_SUN4) {
209 interrupt_enable = (char *)
210 ioremap(sun4_ie_physaddr, PAGE_SIZE);
211 } else {
212 struct resource phyres;
213
214 ie_node = prom_searchsiblings (prom_getchild(prom_root_node),
215 "interrupt-enable");
216 if(ie_node == 0)
217 panic("Cannot find /interrupt-enable node");
218
219 /* Depending on the "address" property is bad news... */
220 interrupt_enable = NULL;
221 if (prom_getproperty(ie_node, "reg", (char *) int_regs,
222 sizeof(int_regs)) != -1) {
223 memset(&phyres, 0, sizeof(struct resource));
224 phyres.flags = int_regs[0].which_io;
225 phyres.start = int_regs[0].phys_addr;
226 interrupt_enable = (char *) sbus_ioremap(&phyres, 0,
227 int_regs[0].reg_size, "sun4c_intr");
228 }
229 }
230 if (!interrupt_enable)
231 panic("Cannot map interrupt_enable");
232
233 BTFIXUPSET_CALL(sbint_to_irq, sun4c_sbint_to_irq, BTFIXUPCALL_NORM);
234 BTFIXUPSET_CALL(enable_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
235 BTFIXUPSET_CALL(disable_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
236 BTFIXUPSET_CALL(enable_pil_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
237 BTFIXUPSET_CALL(disable_pil_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
238 BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
239 BTFIXUPSET_CALL(clear_profile_irq, sun4c_clear_profile_irq, BTFIXUPCALL_NOP);
240 BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
241 BTFIXUPSET_CALL(__irq_itoa, sun4m_irq_itoa, BTFIXUPCALL_NORM);
242 sparc_init_timers = sun4c_init_timers;
243#ifdef CONFIG_SMP
244 BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
245 BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
246 BTFIXUPSET_CALL(set_irq_udt, sun4c_nop, BTFIXUPCALL_NOP);
247#endif
248 *interrupt_enable = (SUN4C_INT_ENABLE);
249 /* Cannot enable interrupts until OBP ticker is disabled. */
250}
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
new file mode 100644
index 000000000000..52621348a56c
--- /dev/null
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -0,0 +1,594 @@
1/* $Id: sun4d_irq.c,v 1.29 2001/12/11 04:55:51 davem Exp $
2 * arch/sparc/kernel/sun4d_irq.c:
3 * SS1000/SC2000 interrupt handling.
4 *
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Heavily based on arch/sparc/kernel/irq.c.
7 */
8
9#include <linux/config.h>
10#include <linux/errno.h>
11#include <linux/linkage.h>
12#include <linux/kernel_stat.h>
13#include <linux/signal.h>
14#include <linux/sched.h>
15#include <linux/ptrace.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/random.h>
19#include <linux/init.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/spinlock.h>
23#include <linux/seq_file.h>
24
25#include <asm/ptrace.h>
26#include <asm/processor.h>
27#include <asm/system.h>
28#include <asm/psr.h>
29#include <asm/smp.h>
30#include <asm/vaddrs.h>
31#include <asm/timer.h>
32#include <asm/openprom.h>
33#include <asm/oplib.h>
34#include <asm/traps.h>
35#include <asm/irq.h>
36#include <asm/io.h>
37#include <asm/pgalloc.h>
38#include <asm/pgtable.h>
39#include <asm/sbus.h>
40#include <asm/sbi.h>
41#include <asm/cacheflush.h>
42
43/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
44/* #define DISTRIBUTE_IRQS */
45
46struct sun4d_timer_regs *sun4d_timers;
47#define TIMER_IRQ 10
48
49#define MAX_STATIC_ALLOC 4
50extern struct irqaction static_irqaction[MAX_STATIC_ALLOC];
51extern int static_irq_count;
52unsigned char cpu_leds[32];
53#ifdef CONFIG_SMP
54unsigned char sbus_tid[32];
55#endif
56
57extern struct irqaction *irq_action[];
58extern spinlock_t irq_action_lock;
59
60struct sbus_action {
61 struct irqaction *action;
62 /* For SMP this needs to be extended */
63} *sbus_actions;
64
65static int pil_to_sbus[] = {
66 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
67};
68
69static int sbus_to_pil[] = {
70 0, 2, 3, 5, 7, 9, 11, 13,
71};
72
73static int nsbi;
74#ifdef CONFIG_SMP
75DEFINE_SPINLOCK(sun4d_imsk_lock);
76#endif
77
78int show_sun4d_interrupts(struct seq_file *p, void *v)
79{
80 int i = *(loff_t *) v, j = 0, k = 0, sbusl;
81 struct irqaction * action;
82 unsigned long flags;
83#ifdef CONFIG_SMP
84 int x;
85#endif
86
87 spin_lock_irqsave(&irq_action_lock, flags);
88 if (i < NR_IRQS) {
89 sbusl = pil_to_sbus[i];
90 if (!sbusl) {
91 action = *(i + irq_action);
92 if (!action)
93 goto out_unlock;
94 } else {
95 for (j = 0; j < nsbi; j++) {
96 for (k = 0; k < 4; k++)
97 if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
98 goto found_it;
99 }
100 goto out_unlock;
101 }
102found_it: seq_printf(p, "%3d: ", i);
103#ifndef CONFIG_SMP
104 seq_printf(p, "%10u ", kstat_irqs(i));
105#else
106 for (x = 0; x < NR_CPUS; x++) {
107 if (cpu_online(x))
108 seq_printf(p, "%10u ",
109 kstat_cpu(cpu_logical_map(x)).irqs[i]);
110 }
111#endif
112 seq_printf(p, "%c %s",
113 (action->flags & SA_INTERRUPT) ? '+' : ' ',
114 action->name);
115 action = action->next;
116 for (;;) {
117 for (; action; action = action->next) {
118 seq_printf(p, ",%s %s",
119 (action->flags & SA_INTERRUPT) ? " +" : "",
120 action->name);
121 }
122 if (!sbusl) break;
123 k++;
124 if (k < 4)
125 action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
126 else {
127 j++;
128 if (j == nsbi) break;
129 k = 0;
130 action = sbus_actions [(j << 5) + (sbusl << 2)].action;
131 }
132 }
133 seq_putc(p, '\n');
134 }
135out_unlock:
136 spin_unlock_irqrestore(&irq_action_lock, flags);
137 return 0;
138}
139
140void sun4d_free_irq(unsigned int irq, void *dev_id)
141{
142 struct irqaction *action, **actionp;
143 struct irqaction *tmp = NULL;
144 unsigned long flags;
145
146 spin_lock_irqsave(&irq_action_lock, flags);
147 if (irq < 15)
148 actionp = irq + irq_action;
149 else
150 actionp = &(sbus_actions[irq - (1 << 5)].action);
151 action = *actionp;
152 if (!action) {
153 printk("Trying to free free IRQ%d\n",irq);
154 goto out_unlock;
155 }
156 if (dev_id) {
157 for (; action; action = action->next) {
158 if (action->dev_id == dev_id)
159 break;
160 tmp = action;
161 }
162 if (!action) {
163 printk("Trying to free free shared IRQ%d\n",irq);
164 goto out_unlock;
165 }
166 } else if (action->flags & SA_SHIRQ) {
167 printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
168 goto out_unlock;
169 }
170 if (action->flags & SA_STATIC_ALLOC)
171 {
172 /* This interrupt is marked as specially allocated
173 * so it is a bad idea to free it.
174 */
175 printk("Attempt to free statically allocated IRQ%d (%s)\n",
176 irq, action->name);
177 goto out_unlock;
178 }
179
180 if (action && tmp)
181 tmp->next = action->next;
182 else
183 *actionp = action->next;
184
185 spin_unlock_irqrestore(&irq_action_lock, flags);
186
187 synchronize_irq(irq);
188
189 spin_lock_irqsave(&irq_action_lock, flags);
190
191 kfree(action);
192
193 if (!(*actionp))
194 disable_irq(irq);
195
196out_unlock:
197 spin_unlock_irqrestore(&irq_action_lock, flags);
198}
199
200extern void unexpected_irq(int, void *, struct pt_regs *);
201
202void sun4d_handler_irq(int irq, struct pt_regs * regs)
203{
204 struct irqaction * action;
205 int cpu = smp_processor_id();
206 /* SBUS IRQ level (1 - 7) */
207 int sbusl = pil_to_sbus[irq];
208
209 /* FIXME: Is this necessary?? */
210 cc_get_ipen();
211
212 cc_set_iclr(1 << irq);
213
214 irq_enter();
215 kstat_cpu(cpu).irqs[irq]++;
216 if (!sbusl) {
217 action = *(irq + irq_action);
218 if (!action)
219 unexpected_irq(irq, NULL, regs);
220 do {
221 action->handler(irq, action->dev_id, regs);
222 action = action->next;
223 } while (action);
224 } else {
225 int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
226 int sbino;
227 struct sbus_action *actionp;
228 unsigned mask, slot;
229 int sbil = (sbusl << 2);
230
231 bw_clear_intr_mask(sbusl, bus_mask);
232
233 /* Loop for each pending SBI */
234 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
235 if (bus_mask & 1) {
236 mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
237 mask &= (0xf << sbil);
238 actionp = sbus_actions + (sbino << 5) + (sbil);
239 /* Loop for each pending SBI slot */
240 for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
241 if (mask & slot) {
242 mask &= ~slot;
243 action = actionp->action;
244
245 if (!action)
246 unexpected_irq(irq, NULL, regs);
247 do {
248 action->handler(irq, action->dev_id, regs);
249 action = action->next;
250 } while (action);
251 release_sbi(SBI2DEVID(sbino), slot);
252 }
253 }
254 }
255 irq_exit();
256}
257
258unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq)
259{
260 int sbusl = pil_to_sbus[irq];
261
262 if (sbusl)
263 return ((sdev->bus->board + 1) << 5) + (sbusl << 2) + sdev->slot;
264 else
265 return irq;
266}
267
268unsigned int sun4d_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
269{
270 if (sbint >= sizeof(sbus_to_pil)) {
271 printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint);
272 BUG();
273 }
274 return sun4d_build_irq(sdev, sbus_to_pil[sbint]);
275}
276
277int sun4d_request_irq(unsigned int irq,
278 irqreturn_t (*handler)(int, void *, struct pt_regs *),
279 unsigned long irqflags, const char * devname, void *dev_id)
280{
281 struct irqaction *action, *tmp = NULL, **actionp;
282 unsigned long flags;
283 int ret;
284
285 if(irq > 14 && irq < (1 << 5)) {
286 ret = -EINVAL;
287 goto out;
288 }
289
290 if (!handler) {
291 ret = -EINVAL;
292 goto out;
293 }
294
295 spin_lock_irqsave(&irq_action_lock, flags);
296
297 if (irq >= (1 << 5))
298 actionp = &(sbus_actions[irq - (1 << 5)].action);
299 else
300 actionp = irq + irq_action;
301 action = *actionp;
302
303 if (action) {
304 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
305 for (tmp = action; tmp->next; tmp = tmp->next);
306 } else {
307 ret = -EBUSY;
308 goto out_unlock;
309 }
310 if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
311 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
312 ret = -EBUSY;
313 goto out_unlock;
314 }
315 action = NULL; /* Or else! */
316 }
317
318 /* If this is flagged as statically allocated then we use our
319 * private struct which is never freed.
320 */
321 if (irqflags & SA_STATIC_ALLOC) {
322 if (static_irq_count < MAX_STATIC_ALLOC)
323 action = &static_irqaction[static_irq_count++];
324 else
325 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
326 }
327
328 if (action == NULL)
329 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
330 GFP_ATOMIC);
331
332 if (!action) {
333 ret = -ENOMEM;
334 goto out_unlock;
335 }
336
337 action->handler = handler;
338 action->flags = irqflags;
339 cpus_clear(action->mask);
340 action->name = devname;
341 action->next = NULL;
342 action->dev_id = dev_id;
343
344 if (tmp)
345 tmp->next = action;
346 else
347 *actionp = action;
348
349 enable_irq(irq);
350
351 ret = 0;
352out_unlock:
353 spin_unlock_irqrestore(&irq_action_lock, flags);
354out:
355 return ret;
356}
357
358static void sun4d_disable_irq(unsigned int irq)
359{
360#ifdef CONFIG_SMP
361 int tid = sbus_tid[(irq >> 5) - 1];
362 unsigned long flags;
363#endif
364
365 if (irq < NR_IRQS) return;
366#ifdef CONFIG_SMP
367 spin_lock_irqsave(&sun4d_imsk_lock, flags);
368 cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7]));
369 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
370#else
371 cc_set_imsk(cc_get_imsk() | (1 << sbus_to_pil[(irq >> 2) & 7]));
372#endif
373}
374
375static void sun4d_enable_irq(unsigned int irq)
376{
377#ifdef CONFIG_SMP
378 int tid = sbus_tid[(irq >> 5) - 1];
379 unsigned long flags;
380#endif
381
382 if (irq < NR_IRQS) return;
383#ifdef CONFIG_SMP
384 spin_lock_irqsave(&sun4d_imsk_lock, flags);
385 cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
386 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
387#else
388 cc_set_imsk(cc_get_imsk() & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
389#endif
390}
391
392#ifdef CONFIG_SMP
393static void sun4d_set_cpu_int(int cpu, int level)
394{
395 sun4d_send_ipi(cpu, level);
396}
397
398static void sun4d_clear_ipi(int cpu, int level)
399{
400}
401
402static void sun4d_set_udt(int cpu)
403{
404}
405
406/* Setup IRQ distribution scheme. */
407void __init sun4d_distribute_irqs(void)
408{
409#ifdef DISTRIBUTE_IRQS
410 struct sbus_bus *sbus;
411 unsigned long sbus_serving_map;
412
413 sbus_serving_map = cpu_present_map;
414 for_each_sbus(sbus) {
415 if ((sbus->board * 2) == boot_cpu_id && (cpu_present_map & (1 << (sbus->board * 2 + 1))))
416 sbus_tid[sbus->board] = (sbus->board * 2 + 1);
417 else if (cpu_present_map & (1 << (sbus->board * 2)))
418 sbus_tid[sbus->board] = (sbus->board * 2);
419 else if (cpu_present_map & (1 << (sbus->board * 2 + 1)))
420 sbus_tid[sbus->board] = (sbus->board * 2 + 1);
421 else
422 sbus_tid[sbus->board] = 0xff;
423 if (sbus_tid[sbus->board] != 0xff)
424 sbus_serving_map &= ~(1 << sbus_tid[sbus->board]);
425 }
426 for_each_sbus(sbus)
427 if (sbus_tid[sbus->board] == 0xff) {
428 int i = 31;
429
430 if (!sbus_serving_map)
431 sbus_serving_map = cpu_present_map;
432 while (!(sbus_serving_map & (1 << i)))
433 i--;
434 sbus_tid[sbus->board] = i;
435 sbus_serving_map &= ~(1 << i);
436 }
437 for_each_sbus(sbus) {
438 printk("sbus%d IRQs directed to CPU%d\n", sbus->board, sbus_tid[sbus->board]);
439 set_sbi_tid(sbus->devid, sbus_tid[sbus->board] << 3);
440 }
441#else
442 struct sbus_bus *sbus;
443 int cpuid = cpu_logical_map(1);
444
445 if (cpuid == -1)
446 cpuid = cpu_logical_map(0);
447 for_each_sbus(sbus) {
448 sbus_tid[sbus->board] = cpuid;
449 set_sbi_tid(sbus->devid, cpuid << 3);
450 }
451 printk("All sbus IRQs directed to CPU%d\n", cpuid);
452#endif
453}
454#endif
455
456static void sun4d_clear_clock_irq(void)
457{
458 volatile unsigned int clear_intr;
459 clear_intr = sun4d_timers->l10_timer_limit;
460}
461
462static void sun4d_clear_profile_irq(int cpu)
463{
464 bw_get_prof_limit(cpu);
465}
466
467static void sun4d_load_profile_irq(int cpu, unsigned int limit)
468{
469 bw_set_prof_limit(cpu, limit);
470}
471
472static void __init sun4d_init_timers(irqreturn_t (*counter_fn)(int, void *, struct pt_regs *))
473{
474 int irq;
475 int cpu;
476 struct resource r;
477 int mid;
478
479 /* Map the User Timer registers. */
480 memset(&r, 0, sizeof(r));
481#ifdef CONFIG_SMP
482 r.start = CSR_BASE(boot_cpu_id)+BW_TIMER_LIMIT;
483#else
484 r.start = CSR_BASE(0)+BW_TIMER_LIMIT;
485#endif
486 r.flags = 0xf;
487 sun4d_timers = (struct sun4d_timer_regs *) sbus_ioremap(&r, 0,
488 PAGE_SIZE, "user timer");
489
490 sun4d_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10);
491 master_l10_counter = &sun4d_timers->l10_cur_count;
492 master_l10_limit = &sun4d_timers->l10_timer_limit;
493
494 irq = request_irq(TIMER_IRQ,
495 counter_fn,
496 (SA_INTERRUPT | SA_STATIC_ALLOC),
497 "timer", NULL);
498 if (irq) {
499 prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
500 prom_halt();
501 }
502
503 /* Enable user timer free run for CPU 0 in BW */
504 /* bw_set_ctrl(0, bw_get_ctrl(0) | BW_CTRL_USER_TIMER); */
505
506 cpu = 0;
507 while (!cpu_find_by_instance(cpu, NULL, &mid)) {
508 sun4d_load_profile_irq(mid >> 3, 0);
509 cpu++;
510 }
511
512#ifdef CONFIG_SMP
513 {
514 unsigned long flags;
515 extern unsigned long lvl14_save[4];
516 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
517 extern unsigned int real_irq_entry[], smp4d_ticker[];
518 extern unsigned int patchme_maybe_smp_msg[];
519
520 /* Adjust so that we jump directly to smp4d_ticker */
521 lvl14_save[2] += smp4d_ticker - real_irq_entry;
522
523 /* For SMP we use the level 14 ticker, however the bootup code
524 * has copied the firmwares level 14 vector into boot cpu's
525 * trap table, we must fix this now or we get squashed.
526 */
527 local_irq_save(flags);
528 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
529 trap_table->inst_one = lvl14_save[0];
530 trap_table->inst_two = lvl14_save[1];
531 trap_table->inst_three = lvl14_save[2];
532 trap_table->inst_four = lvl14_save[3];
533 local_flush_cache_all();
534 local_irq_restore(flags);
535 }
536#endif
537}
538
539void __init sun4d_init_sbi_irq(void)
540{
541 struct sbus_bus *sbus;
542 unsigned mask;
543
544 nsbi = 0;
545 for_each_sbus(sbus)
546 nsbi++;
547 sbus_actions = (struct sbus_action *)kmalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
548 memset (sbus_actions, 0, (nsbi * 8 * 4 * sizeof(struct sbus_action)));
549 for_each_sbus(sbus) {
550#ifdef CONFIG_SMP
551 extern unsigned char boot_cpu_id;
552
553 set_sbi_tid(sbus->devid, boot_cpu_id << 3);
554 sbus_tid[sbus->board] = boot_cpu_id;
555#endif
556 /* Get rid of pending irqs from PROM */
557 mask = acquire_sbi(sbus->devid, 0xffffffff);
558 if (mask) {
559 printk ("Clearing pending IRQs %08x on SBI %d\n", mask, sbus->board);
560 release_sbi(sbus->devid, mask);
561 }
562 }
563}
564
565static char *sun4d_irq_itoa(unsigned int irq)
566{
567 static char buff[16];
568
569 if (irq < (1 << 5))
570 sprintf(buff, "%d", irq);
571 else
572 sprintf(buff, "%d,%x", sbus_to_pil[(irq >> 2) & 7], irq);
573 return buff;
574}
575
576void __init sun4d_init_IRQ(void)
577{
578 local_irq_disable();
579
580 BTFIXUPSET_CALL(sbint_to_irq, sun4d_sbint_to_irq, BTFIXUPCALL_NORM);
581 BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
582 BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
583 BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
584 BTFIXUPSET_CALL(clear_profile_irq, sun4d_clear_profile_irq, BTFIXUPCALL_NORM);
585 BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
586 BTFIXUPSET_CALL(__irq_itoa, sun4d_irq_itoa, BTFIXUPCALL_NORM);
587 sparc_init_timers = sun4d_init_timers;
588#ifdef CONFIG_SMP
589 BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
590 BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
591 BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP);
592#endif
593 /* Cannot enable interrupts until OBP ticker is disabled. */
594}
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
new file mode 100644
index 000000000000..cc1fc898495c
--- /dev/null
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -0,0 +1,486 @@
1/* sun4d_smp.c: Sparc SS1000/SC2000 SMP support.
2 *
3 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
4 *
5 * Based on sun4m's smp.c, which is:
6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 */
8
9#include <asm/head.h>
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/threads.h>
14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/profile.h>
23
24#include <asm/ptrace.h>
25#include <asm/atomic.h>
26
27#include <asm/delay.h>
28#include <asm/irq.h>
29#include <asm/page.h>
30#include <asm/pgalloc.h>
31#include <asm/pgtable.h>
32#include <asm/oplib.h>
33#include <asm/sbus.h>
34#include <asm/sbi.h>
35#include <asm/tlbflush.h>
36#include <asm/cacheflush.h>
37#include <asm/cpudata.h>
38
39#define IRQ_CROSS_CALL 15
40
41extern ctxd_t *srmmu_ctx_table_phys;
42
43extern void calibrate_delay(void);
44
45extern volatile int smp_processors_ready;
46extern int smp_num_cpus;
47static int smp_highest_cpu;
48extern volatile unsigned long cpu_callin_map[NR_CPUS];
49extern struct cpuinfo_sparc cpu_data[NR_CPUS];
50extern unsigned char boot_cpu_id;
51extern int smp_activated;
52extern volatile int __cpu_number_map[NR_CPUS];
53extern volatile int __cpu_logical_map[NR_CPUS];
54extern volatile unsigned long ipi_count;
55extern volatile int smp_process_available;
56extern volatile int smp_commenced;
57extern int __smp4d_processor_id(void);
58
59/* #define SMP_DEBUG */
60
61#ifdef SMP_DEBUG
62#define SMP_PRINTK(x) printk x
63#else
64#define SMP_PRINTK(x)
65#endif
66
67static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
68{
69 __asm__ __volatile__("swap [%1], %0\n\t" :
70 "=&r" (val), "=&r" (ptr) :
71 "0" (val), "1" (ptr));
72 return val;
73}
74
75static void smp_setup_percpu_timer(void);
76extern void cpu_probe(void);
77extern void sun4d_distribute_irqs(void);
78
79void __init smp4d_callin(void)
80{
81 int cpuid = hard_smp4d_processor_id();
82 extern spinlock_t sun4d_imsk_lock;
83 unsigned long flags;
84
85 /* Show we are alive */
86 cpu_leds[cpuid] = 0x6;
87 show_leds(cpuid);
88
89 /* Enable level15 interrupt, disable level14 interrupt for now */
90 cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
91
92 local_flush_cache_all();
93 local_flush_tlb_all();
94
95 /*
96 * Unblock the master CPU _only_ when the scheduler state
97 * of all secondary CPUs will be up-to-date, so after
98 * the SMP initialization the master will be just allowed
99 * to call the scheduler code.
100 */
101 /* Get our local ticker going. */
102 smp_setup_percpu_timer();
103
104 calibrate_delay();
105 smp_store_cpu_info(cpuid);
106 local_flush_cache_all();
107 local_flush_tlb_all();
108
109 /* Allow master to continue. */
110 swap((unsigned long *)&cpu_callin_map[cpuid], 1);
111 local_flush_cache_all();
112 local_flush_tlb_all();
113
114 cpu_probe();
115
116 while((unsigned long)current_set[cpuid] < PAGE_OFFSET)
117 barrier();
118
119 while(current_set[cpuid]->cpu != cpuid)
120 barrier();
121
122 /* Fix idle thread fields. */
123 __asm__ __volatile__("ld [%0], %%g6\n\t"
124 : : "r" (&current_set[cpuid])
125 : "memory" /* paranoid */);
126
127 cpu_leds[cpuid] = 0x9;
128 show_leds(cpuid);
129
130 /* Attach to the address space of init_task. */
131 atomic_inc(&init_mm.mm_count);
132 current->active_mm = &init_mm;
133
134 local_flush_cache_all();
135 local_flush_tlb_all();
136
137 local_irq_enable(); /* We don't allow PIL 14 yet */
138
139 while(!smp_commenced)
140 barrier();
141
142 spin_lock_irqsave(&sun4d_imsk_lock, flags);
143 cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
144 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
145}
146
147extern void init_IRQ(void);
148extern void cpu_panic(void);
149
150/*
151 * Cycle through the processors asking the PROM to start each one.
152 */
153
154extern struct linux_prom_registers smp_penguin_ctable;
155extern unsigned long trapbase_cpu1[];
156extern unsigned long trapbase_cpu2[];
157extern unsigned long trapbase_cpu3[];
158
159void __init smp4d_boot_cpus(void)
160{
161 int cpucount = 0;
162 int i, mid;
163
164 printk("Entering SMP Mode...\n");
165
166 if (boot_cpu_id)
167 current_set[0] = NULL;
168
169 local_irq_enable();
170 cpus_clear(cpu_present_map);
171
172 /* XXX This whole thing has to go. See sparc64. */
173 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
174 cpu_set(mid, cpu_present_map);
175 SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0]));
176 for(i=0; i < NR_CPUS; i++)
177 __cpu_number_map[i] = -1;
178 for(i=0; i < NR_CPUS; i++)
179 __cpu_logical_map[i] = -1;
180 __cpu_number_map[boot_cpu_id] = 0;
181 __cpu_logical_map[0] = boot_cpu_id;
182 current_thread_info()->cpu = boot_cpu_id;
183 smp_store_cpu_info(boot_cpu_id);
184 smp_setup_percpu_timer();
185 local_flush_cache_all();
186 if (cpu_find_by_instance(1, NULL, NULL))
187 return; /* Not an MP box. */
188 SMP_PRINTK(("Iterating over CPUs\n"));
189 for(i = 0; i < NR_CPUS; i++) {
190 if(i == boot_cpu_id)
191 continue;
192
193 if (cpu_isset(i, cpu_present_map)) {
194 extern unsigned long sun4d_cpu_startup;
195 unsigned long *entry = &sun4d_cpu_startup;
196 struct task_struct *p;
197 int timeout;
198 int no;
199
200 /* Cook up an idler for this guy. */
201 p = fork_idle(i);
202 cpucount++;
203 current_set[i] = p->thread_info;
204 for (no = 0; !cpu_find_by_instance(no, NULL, &mid)
205 && mid != i; no++) ;
206
207 /*
208 * Initialize the contexts table
209 * Since the call to prom_startcpu() trashes the structure,
210 * we need to re-initialize it for each cpu
211 */
212 smp_penguin_ctable.which_io = 0;
213 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
214 smp_penguin_ctable.reg_size = 0;
215
216 /* whirrr, whirrr, whirrrrrrrrr... */
217 SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, cpu_data(no).prom_node));
218 local_flush_cache_all();
219 prom_startcpu(cpu_data(no).prom_node,
220 &smp_penguin_ctable, 0, (char *)entry);
221
222 SMP_PRINTK(("prom_startcpu returned :)\n"));
223
224 /* wheee... it's going... */
225 for(timeout = 0; timeout < 10000; timeout++) {
226 if(cpu_callin_map[i])
227 break;
228 udelay(200);
229 }
230
231 if(cpu_callin_map[i]) {
232 /* Another "Red Snapper". */
233 __cpu_number_map[i] = cpucount;
234 __cpu_logical_map[cpucount] = i;
235 } else {
236 cpucount--;
237 printk("Processor %d is stuck.\n", i);
238 }
239 }
240 if(!(cpu_callin_map[i])) {
241 cpu_clear(i, cpu_present_map);
242 __cpu_number_map[i] = -1;
243 }
244 }
245 local_flush_cache_all();
246 if(cpucount == 0) {
247 printk("Error: only one Processor found.\n");
248 cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id());
249 } else {
250 unsigned long bogosum = 0;
251
252 for(i = 0; i < NR_CPUS; i++) {
253 if (cpu_isset(i, cpu_present_map)) {
254 bogosum += cpu_data(i).udelay_val;
255 smp_highest_cpu = i;
256 }
257 }
258 SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100));
259 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
260 cpucount + 1,
261 bogosum/(500000/HZ),
262 (bogosum/(5000/HZ))%100);
263 smp_activated = 1;
264 smp_num_cpus = cpucount + 1;
265 }
266
267 /* Free unneeded trap tables */
268 ClearPageReserved(virt_to_page(trapbase_cpu1));
269 set_page_count(virt_to_page(trapbase_cpu1), 1);
270 free_page((unsigned long)trapbase_cpu1);
271 totalram_pages++;
272 num_physpages++;
273
274 ClearPageReserved(virt_to_page(trapbase_cpu2));
275 set_page_count(virt_to_page(trapbase_cpu2), 1);
276 free_page((unsigned long)trapbase_cpu2);
277 totalram_pages++;
278 num_physpages++;
279
280 ClearPageReserved(virt_to_page(trapbase_cpu3));
281 set_page_count(virt_to_page(trapbase_cpu3), 1);
282 free_page((unsigned long)trapbase_cpu3);
283 totalram_pages++;
284 num_physpages++;
285
286 /* Ok, they are spinning and ready to go. */
287 smp_processors_ready = 1;
288 sun4d_distribute_irqs();
289}
290
291static struct smp_funcall {
292 smpfunc_t func;
293 unsigned long arg1;
294 unsigned long arg2;
295 unsigned long arg3;
296 unsigned long arg4;
297 unsigned long arg5;
298 unsigned char processors_in[NR_CPUS]; /* Set when ipi entered. */
299 unsigned char processors_out[NR_CPUS]; /* Set when ipi exited. */
300} ccall_info __attribute__((aligned(8)));
301
302static DEFINE_SPINLOCK(cross_call_lock);
303
304/* Cross calls must be serialized, at least currently. */
305void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
306 unsigned long arg3, unsigned long arg4, unsigned long arg5)
307{
308 if(smp_processors_ready) {
309 register int high = smp_highest_cpu;
310 unsigned long flags;
311
312 spin_lock_irqsave(&cross_call_lock, flags);
313
314 {
315 /* If you make changes here, make sure gcc generates proper code... */
316 register smpfunc_t f asm("i0") = func;
317 register unsigned long a1 asm("i1") = arg1;
318 register unsigned long a2 asm("i2") = arg2;
319 register unsigned long a3 asm("i3") = arg3;
320 register unsigned long a4 asm("i4") = arg4;
321 register unsigned long a5 asm("i5") = arg5;
322
323 __asm__ __volatile__(
324 "std %0, [%6]\n\t"
325 "std %2, [%6 + 8]\n\t"
326 "std %4, [%6 + 16]\n\t" : :
327 "r"(f), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
328 "r" (&ccall_info.func));
329 }
330
331 /* Init receive/complete mapping, plus fire the IPI's off. */
332 {
333 cpumask_t mask;
334 register int i;
335
336 mask = cpumask_of_cpu(hard_smp4d_processor_id());
337 cpus_andnot(mask, cpu_present_map, mask);
338 for(i = 0; i <= high; i++) {
339 if (cpu_isset(i, mask)) {
340 ccall_info.processors_in[i] = 0;
341 ccall_info.processors_out[i] = 0;
342 sun4d_send_ipi(i, IRQ_CROSS_CALL);
343 }
344 }
345 }
346
347 {
348 register int i;
349
350 i = 0;
351 do {
352 while(!ccall_info.processors_in[i])
353 barrier();
354 } while(++i <= high);
355
356 i = 0;
357 do {
358 while(!ccall_info.processors_out[i])
359 barrier();
360 } while(++i <= high);
361 }
362
363 spin_unlock_irqrestore(&cross_call_lock, flags);
364 }
365}
366
367/* Running cross calls. */
368void smp4d_cross_call_irq(void)
369{
370 int i = hard_smp4d_processor_id();
371
372 ccall_info.processors_in[i] = 1;
373 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
374 ccall_info.arg4, ccall_info.arg5);
375 ccall_info.processors_out[i] = 1;
376}
377
378static int smp4d_stop_cpu_sender;
379
380static void smp4d_stop_cpu(void)
381{
382 int me = hard_smp4d_processor_id();
383
384 if (me != smp4d_stop_cpu_sender)
385 while(1) barrier();
386}
387
388/* Cross calls, in order to work efficiently and atomically do all
389 * the message passing work themselves, only stopcpu and reschedule
390 * messages come through here.
391 */
392void smp4d_message_pass(int target, int msg, unsigned long data, int wait)
393{
394 int me = hard_smp4d_processor_id();
395
396 SMP_PRINTK(("smp4d_message_pass %d %d %08lx %d\n", target, msg, data, wait));
397 if (msg == MSG_STOP_CPU && target == MSG_ALL_BUT_SELF) {
398 unsigned long flags;
399 static DEFINE_SPINLOCK(stop_cpu_lock);
400 spin_lock_irqsave(&stop_cpu_lock, flags);
401 smp4d_stop_cpu_sender = me;
402 smp4d_cross_call((smpfunc_t)smp4d_stop_cpu, 0, 0, 0, 0, 0);
403 spin_unlock_irqrestore(&stop_cpu_lock, flags);
404 }
405 printk("Yeeee, trying to send SMP msg(%d) to %d on cpu %d\n", msg, target, me);
406 panic("Bogon SMP message pass.");
407}
408
409void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
410{
411 int cpu = hard_smp4d_processor_id();
412 static int cpu_tick[NR_CPUS];
413 static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
414
415 bw_get_prof_limit(cpu);
416 bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */
417
418 cpu_tick[cpu]++;
419 if (!(cpu_tick[cpu] & 15)) {
420 if (cpu_tick[cpu] == 0x60)
421 cpu_tick[cpu] = 0;
422 cpu_leds[cpu] = led_mask[cpu_tick[cpu] >> 4];
423 show_leds(cpu);
424 }
425
426 profile_tick(CPU_PROFILING, regs);
427
428 if(!--prof_counter(cpu)) {
429 int user = user_mode(regs);
430
431 irq_enter();
432 update_process_times(user);
433 irq_exit();
434
435 prof_counter(cpu) = prof_multiplier(cpu);
436 }
437}
438
439extern unsigned int lvl14_resolution;
440
441static void __init smp_setup_percpu_timer(void)
442{
443 int cpu = hard_smp4d_processor_id();
444
445 prof_counter(cpu) = prof_multiplier(cpu) = 1;
446 load_profile_irq(cpu, lvl14_resolution);
447}
448
449void __init smp4d_blackbox_id(unsigned *addr)
450{
451 int rd = *addr & 0x3e000000;
452
453 addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
454 addr[1] = 0x01000000; /* nop */
455 addr[2] = 0x01000000; /* nop */
456}
457
458void __init smp4d_blackbox_current(unsigned *addr)
459{
460 int rd = *addr & 0x3e000000;
461
462 addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
463 addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */
464 addr[4] = 0x01000000; /* nop */
465}
466
467void __init sun4d_init_smp(void)
468{
469 int i;
470 extern unsigned int t_nmi[], linux_trap_ipi15_sun4d[], linux_trap_ipi15_sun4m[];
471
472 /* Patch ipi15 trap table */
473 t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
474
475 /* And set btfixup... */
476 BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
477 BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
478 BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
479 BTFIXUPSET_CALL(smp_message_pass, smp4d_message_pass, BTFIXUPCALL_NORM);
480 BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
481
482 for (i = 0; i < NR_CPUS; i++) {
483 ccall_info.processors_in[i] = 1;
484 ccall_info.processors_out[i] = 1;
485 }
486}
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
new file mode 100644
index 000000000000..39d712c3c809
--- /dev/null
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -0,0 +1,399 @@
1/* sun4m_irq.c
2 * arch/sparc/kernel/sun4m_irq.c:
3 *
4 * djhr: Hacked out of irq.c into a CPU dependent version.
5 *
6 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
7 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com)
9 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
10 */
11
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/linkage.h>
15#include <linux/kernel_stat.h>
16#include <linux/signal.h>
17#include <linux/sched.h>
18#include <linux/ptrace.h>
19#include <linux/smp.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/ioport.h>
24
25#include <asm/ptrace.h>
26#include <asm/processor.h>
27#include <asm/system.h>
28#include <asm/psr.h>
29#include <asm/vaddrs.h>
30#include <asm/timer.h>
31#include <asm/openprom.h>
32#include <asm/oplib.h>
33#include <asm/traps.h>
34#include <asm/pgalloc.h>
35#include <asm/pgtable.h>
36#include <asm/smp.h>
37#include <asm/irq.h>
38#include <asm/io.h>
39#include <asm/sbus.h>
40#include <asm/cacheflush.h>
41
42static unsigned long dummy;
43
44struct sun4m_intregs *sun4m_interrupts;
45unsigned long *irq_rcvreg = &dummy;
46
47/* These tables only apply for interrupts greater than 15..
48 *
49 * any intr value below 0x10 is considered to be a soft-int
50 * this may be useful or it may not.. but that's how I've done it.
51 * and it won't clash with what OBP is telling us about devices.
52 *
53 * take an encoded intr value and lookup if it's valid
54 * then get the mask bits that match from irq_mask
55 *
56 * P3: Translation from irq 0x0d to mask 0x2000 is for MrCoffee.
57 */
58static unsigned char irq_xlate[32] = {
59 /* 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f */
60 0, 0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 5, 6, 14, 0, 7,
61 0, 0, 8, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 0
62};
63
64static unsigned long irq_mask[] = {
65 0, /* illegal index */
66 SUN4M_INT_SCSI, /* 1 irq 4 */
67 SUN4M_INT_ETHERNET, /* 2 irq 6 */
68 SUN4M_INT_VIDEO, /* 3 irq 8 */
69 SUN4M_INT_REALTIME, /* 4 irq 10 */
70 SUN4M_INT_FLOPPY, /* 5 irq 11 */
71 (SUN4M_INT_SERIAL | SUN4M_INT_KBDMS), /* 6 irq 12 */
72 SUN4M_INT_MODULE_ERR, /* 7 irq 15 */
73 SUN4M_INT_SBUS(0), /* 8 irq 2 */
74 SUN4M_INT_SBUS(1), /* 9 irq 3 */
75 SUN4M_INT_SBUS(2), /* 10 irq 5 */
76 SUN4M_INT_SBUS(3), /* 11 irq 7 */
77 SUN4M_INT_SBUS(4), /* 12 irq 9 */
78 SUN4M_INT_SBUS(5), /* 13 irq 11 */
79 SUN4M_INT_SBUS(6) /* 14 irq 13 */
80};
81
82static int sun4m_pil_map[] = { 0, 2, 3, 5, 7, 9, 11, 13 };
83
84unsigned int sun4m_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
85{
86 if (sbint >= sizeof(sun4m_pil_map)) {
87 printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint);
88 BUG();
89 }
90 return sun4m_pil_map[sbint] | 0x30;
91}
92
93inline unsigned long sun4m_get_irqmask(unsigned int irq)
94{
95 unsigned long mask;
96
97 if (irq > 0x20) {
98 /* OBIO/SBUS interrupts */
99 irq &= 0x1f;
100 mask = irq_mask[irq_xlate[irq]];
101 if (!mask)
102 printk("sun4m_get_irqmask: IRQ%d has no valid mask!\n",irq);
103 } else {
104 /* Soft Interrupts will come here.
105 * Currently there is no way to trigger them but I'm sure
106 * something could be cooked up.
107 */
108 irq &= 0xf;
109 mask = SUN4M_SOFT_INT(irq);
110 }
111 return mask;
112}
113
114static void sun4m_disable_irq(unsigned int irq_nr)
115{
116 unsigned long mask, flags;
117 int cpu = smp_processor_id();
118
119 mask = sun4m_get_irqmask(irq_nr);
120 local_irq_save(flags);
121 if (irq_nr > 15)
122 sun4m_interrupts->set = mask;
123 else
124 sun4m_interrupts->cpu_intregs[cpu].set = mask;
125 local_irq_restore(flags);
126}
127
128static void sun4m_enable_irq(unsigned int irq_nr)
129{
130 unsigned long mask, flags;
131 int cpu = smp_processor_id();
132
133 /* Dreadful floppy hack. When we use 0x2b instead of
134 * 0x0b the system blows (it starts to whistle!).
135 * So we continue to use 0x0b. Fixme ASAP. --P3
136 */
137 if (irq_nr != 0x0b) {
138 mask = sun4m_get_irqmask(irq_nr);
139 local_irq_save(flags);
140 if (irq_nr > 15)
141 sun4m_interrupts->clear = mask;
142 else
143 sun4m_interrupts->cpu_intregs[cpu].clear = mask;
144 local_irq_restore(flags);
145 } else {
146 local_irq_save(flags);
147 sun4m_interrupts->clear = SUN4M_INT_FLOPPY;
148 local_irq_restore(flags);
149 }
150}
151
152static unsigned long cpu_pil_to_imask[16] = {
153/*0*/ 0x00000000,
154/*1*/ 0x00000000,
155/*2*/ SUN4M_INT_SBUS(0) | SUN4M_INT_VME(0),
156/*3*/ SUN4M_INT_SBUS(1) | SUN4M_INT_VME(1),
157/*4*/ SUN4M_INT_SCSI,
158/*5*/ SUN4M_INT_SBUS(2) | SUN4M_INT_VME(2),
159/*6*/ SUN4M_INT_ETHERNET,
160/*7*/ SUN4M_INT_SBUS(3) | SUN4M_INT_VME(3),
161/*8*/ SUN4M_INT_VIDEO,
162/*9*/ SUN4M_INT_SBUS(4) | SUN4M_INT_VME(4) | SUN4M_INT_MODULE_ERR,
163/*10*/ SUN4M_INT_REALTIME,
164/*11*/ SUN4M_INT_SBUS(5) | SUN4M_INT_VME(5) | SUN4M_INT_FLOPPY,
165/*12*/ SUN4M_INT_SERIAL | SUN4M_INT_KBDMS,
166/*13*/ SUN4M_INT_AUDIO,
167/*14*/ SUN4M_INT_E14,
168/*15*/ 0x00000000
169};
170
171/* We assume the caller has disabled local interrupts when these are called,
172 * or else very bizarre behavior will result.
173 */
174static void sun4m_disable_pil_irq(unsigned int pil)
175{
176 sun4m_interrupts->set = cpu_pil_to_imask[pil];
177}
178
179static void sun4m_enable_pil_irq(unsigned int pil)
180{
181 sun4m_interrupts->clear = cpu_pil_to_imask[pil];
182}
183
184#ifdef CONFIG_SMP
185static void sun4m_send_ipi(int cpu, int level)
186{
187 unsigned long mask;
188
189 mask = sun4m_get_irqmask(level);
190 sun4m_interrupts->cpu_intregs[cpu].set = mask;
191}
192
193static void sun4m_clear_ipi(int cpu, int level)
194{
195 unsigned long mask;
196
197 mask = sun4m_get_irqmask(level);
198 sun4m_interrupts->cpu_intregs[cpu].clear = mask;
199}
200
201static void sun4m_set_udt(int cpu)
202{
203 sun4m_interrupts->undirected_target = cpu;
204}
205#endif
206
207#define OBIO_INTR 0x20
208#define TIMER_IRQ (OBIO_INTR | 10)
209#define PROFILE_IRQ (OBIO_INTR | 14)
210
211struct sun4m_timer_regs *sun4m_timers;
212unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
213
214static void sun4m_clear_clock_irq(void)
215{
216 volatile unsigned int clear_intr;
217 clear_intr = sun4m_timers->l10_timer_limit;
218}
219
220static void sun4m_clear_profile_irq(int cpu)
221{
222 volatile unsigned int clear;
223
224 clear = sun4m_timers->cpu_timers[cpu].l14_timer_limit;
225}
226
227static void sun4m_load_profile_irq(int cpu, unsigned int limit)
228{
229 sun4m_timers->cpu_timers[cpu].l14_timer_limit = limit;
230}
231
232char *sun4m_irq_itoa(unsigned int irq)
233{
234 static char buff[16];
235 sprintf(buff, "%d", irq);
236 return buff;
237}
238
239static void __init sun4m_init_timers(irqreturn_t (*counter_fn)(int, void *, struct pt_regs *))
240{
241 int reg_count, irq, cpu;
242 struct linux_prom_registers cnt_regs[PROMREG_MAX];
243 int obio_node, cnt_node;
244 struct resource r;
245
246 cnt_node = 0;
247 if((obio_node =
248 prom_searchsiblings (prom_getchild(prom_root_node), "obio")) == 0 ||
249 (obio_node = prom_getchild (obio_node)) == 0 ||
250 (cnt_node = prom_searchsiblings (obio_node, "counter")) == 0) {
251 prom_printf("Cannot find /obio/counter node\n");
252 prom_halt();
253 }
254 reg_count = prom_getproperty(cnt_node, "reg",
255 (void *) cnt_regs, sizeof(cnt_regs));
256 reg_count = (reg_count/sizeof(struct linux_prom_registers));
257
258 /* Apply the obio ranges to the timer registers. */
259 prom_apply_obio_ranges(cnt_regs, reg_count);
260
261 cnt_regs[4].phys_addr = cnt_regs[reg_count-1].phys_addr;
262 cnt_regs[4].reg_size = cnt_regs[reg_count-1].reg_size;
263 cnt_regs[4].which_io = cnt_regs[reg_count-1].which_io;
264 for(obio_node = 1; obio_node < 4; obio_node++) {
265 cnt_regs[obio_node].phys_addr =
266 cnt_regs[obio_node-1].phys_addr + PAGE_SIZE;
267 cnt_regs[obio_node].reg_size = cnt_regs[obio_node-1].reg_size;
268 cnt_regs[obio_node].which_io = cnt_regs[obio_node-1].which_io;
269 }
270
271 memset((char*)&r, 0, sizeof(struct resource));
272 /* Map the per-cpu Counter registers. */
273 r.flags = cnt_regs[0].which_io;
274 r.start = cnt_regs[0].phys_addr;
275 sun4m_timers = (struct sun4m_timer_regs *) sbus_ioremap(&r, 0,
276 PAGE_SIZE*SUN4M_NCPUS, "sun4m_cpu_cnt");
277 /* Map the system Counter register. */
278 /* XXX Here we expect consequent calls to yeld adjusent maps. */
279 r.flags = cnt_regs[4].which_io;
280 r.start = cnt_regs[4].phys_addr;
281 sbus_ioremap(&r, 0, cnt_regs[4].reg_size, "sun4m_sys_cnt");
282
283 sun4m_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10);
284 master_l10_counter = &sun4m_timers->l10_cur_count;
285 master_l10_limit = &sun4m_timers->l10_timer_limit;
286
287 irq = request_irq(TIMER_IRQ,
288 counter_fn,
289 (SA_INTERRUPT | SA_STATIC_ALLOC),
290 "timer", NULL);
291 if (irq) {
292 prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
293 prom_halt();
294 }
295
296 if (!cpu_find_by_instance(1, NULL, NULL)) {
297 for(cpu = 0; cpu < 4; cpu++)
298 sun4m_timers->cpu_timers[cpu].l14_timer_limit = 0;
299 sun4m_interrupts->set = SUN4M_INT_E14;
300 } else {
301 sun4m_timers->cpu_timers[0].l14_timer_limit = 0;
302 }
303#ifdef CONFIG_SMP
304 {
305 unsigned long flags;
306 extern unsigned long lvl14_save[4];
307 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
308
309 /* For SMP we use the level 14 ticker, however the bootup code
310 * has copied the firmwares level 14 vector into boot cpu's
311 * trap table, we must fix this now or we get squashed.
312 */
313 local_irq_save(flags);
314 trap_table->inst_one = lvl14_save[0];
315 trap_table->inst_two = lvl14_save[1];
316 trap_table->inst_three = lvl14_save[2];
317 trap_table->inst_four = lvl14_save[3];
318 local_flush_cache_all();
319 local_irq_restore(flags);
320 }
321#endif
322}
323
324void __init sun4m_init_IRQ(void)
325{
326 int ie_node,i;
327 struct linux_prom_registers int_regs[PROMREG_MAX];
328 int num_regs;
329 struct resource r;
330 int mid;
331
332 local_irq_disable();
333 if((ie_node = prom_searchsiblings(prom_getchild(prom_root_node), "obio")) == 0 ||
334 (ie_node = prom_getchild (ie_node)) == 0 ||
335 (ie_node = prom_searchsiblings (ie_node, "interrupt")) == 0) {
336 prom_printf("Cannot find /obio/interrupt node\n");
337 prom_halt();
338 }
339 num_regs = prom_getproperty(ie_node, "reg", (char *) int_regs,
340 sizeof(int_regs));
341 num_regs = (num_regs/sizeof(struct linux_prom_registers));
342
343 /* Apply the obio ranges to these registers. */
344 prom_apply_obio_ranges(int_regs, num_regs);
345
346 int_regs[4].phys_addr = int_regs[num_regs-1].phys_addr;
347 int_regs[4].reg_size = int_regs[num_regs-1].reg_size;
348 int_regs[4].which_io = int_regs[num_regs-1].which_io;
349 for(ie_node = 1; ie_node < 4; ie_node++) {
350 int_regs[ie_node].phys_addr = int_regs[ie_node-1].phys_addr + PAGE_SIZE;
351 int_regs[ie_node].reg_size = int_regs[ie_node-1].reg_size;
352 int_regs[ie_node].which_io = int_regs[ie_node-1].which_io;
353 }
354
355 memset((char *)&r, 0, sizeof(struct resource));
356 /* Map the interrupt registers for all possible cpus. */
357 r.flags = int_regs[0].which_io;
358 r.start = int_regs[0].phys_addr;
359 sun4m_interrupts = (struct sun4m_intregs *) sbus_ioremap(&r, 0,
360 PAGE_SIZE*SUN4M_NCPUS, "interrupts_percpu");
361
362 /* Map the system interrupt control registers. */
363 r.flags = int_regs[4].which_io;
364 r.start = int_regs[4].phys_addr;
365 sbus_ioremap(&r, 0, int_regs[4].reg_size, "interrupts_system");
366
367 sun4m_interrupts->set = ~SUN4M_INT_MASKALL;
368 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
369 sun4m_interrupts->cpu_intregs[mid].clear = ~0x17fff;
370
371 if (!cpu_find_by_instance(1, NULL, NULL)) {
372 /* system wide interrupts go to cpu 0, this should always
373 * be safe because it is guaranteed to be fitted or OBP doesn't
374 * come up
375 *
376 * Not sure, but writing here on SLAVIO systems may puke
377 * so I don't do it unless there is more than 1 cpu.
378 */
379 irq_rcvreg = (unsigned long *)
380 &sun4m_interrupts->undirected_target;
381 sun4m_interrupts->undirected_target = 0;
382 }
383 BTFIXUPSET_CALL(sbint_to_irq, sun4m_sbint_to_irq, BTFIXUPCALL_NORM);
384 BTFIXUPSET_CALL(enable_irq, sun4m_enable_irq, BTFIXUPCALL_NORM);
385 BTFIXUPSET_CALL(disable_irq, sun4m_disable_irq, BTFIXUPCALL_NORM);
386 BTFIXUPSET_CALL(enable_pil_irq, sun4m_enable_pil_irq, BTFIXUPCALL_NORM);
387 BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM);
388 BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
389 BTFIXUPSET_CALL(clear_profile_irq, sun4m_clear_profile_irq, BTFIXUPCALL_NORM);
390 BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
391 BTFIXUPSET_CALL(__irq_itoa, sun4m_irq_itoa, BTFIXUPCALL_NORM);
392 sparc_init_timers = sun4m_init_timers;
393#ifdef CONFIG_SMP
394 BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
395 BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
396 BTFIXUPSET_CALL(set_irq_udt, sun4m_set_udt, BTFIXUPCALL_NORM);
397#endif
398 /* Cannot enable interrupts until OBP ticker is disabled. */
399}
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
new file mode 100644
index 000000000000..f113422a3727
--- /dev/null
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -0,0 +1,451 @@
1/* sun4m_smp.c: Sparc SUN4M SMP support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <asm/head.h>
7
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/threads.h>
11#include <linux/smp.h>
12#include <linux/smp_lock.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/profile.h>
20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22
23#include <asm/ptrace.h>
24#include <asm/atomic.h>
25
26#include <asm/delay.h>
27#include <asm/irq.h>
28#include <asm/page.h>
29#include <asm/pgalloc.h>
30#include <asm/pgtable.h>
31#include <asm/oplib.h>
32#include <asm/cpudata.h>
33
34#define IRQ_RESCHEDULE 13
35#define IRQ_STOP_CPU 14
36#define IRQ_CROSS_CALL 15
37
38extern ctxd_t *srmmu_ctx_table_phys;
39
40extern void calibrate_delay(void);
41
42extern volatile int smp_processors_ready;
43extern int smp_num_cpus;
44extern volatile unsigned long cpu_callin_map[NR_CPUS];
45extern unsigned char boot_cpu_id;
46extern int smp_activated;
47extern volatile int __cpu_number_map[NR_CPUS];
48extern volatile int __cpu_logical_map[NR_CPUS];
49extern volatile unsigned long ipi_count;
50extern volatile int smp_process_available;
51extern volatile int smp_commenced;
52extern int __smp4m_processor_id(void);
53
54/*#define SMP_DEBUG*/
55
56#ifdef SMP_DEBUG
57#define SMP_PRINTK(x) printk x
58#else
59#define SMP_PRINTK(x)
60#endif
61
62static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
63{
64 __asm__ __volatile__("swap [%1], %0\n\t" :
65 "=&r" (val), "=&r" (ptr) :
66 "0" (val), "1" (ptr));
67 return val;
68}
69
70static void smp_setup_percpu_timer(void);
71extern void cpu_probe(void);
72
73void __init smp4m_callin(void)
74{
75 int cpuid = hard_smp_processor_id();
76
77 local_flush_cache_all();
78 local_flush_tlb_all();
79
80 set_irq_udt(boot_cpu_id);
81
82 /* Get our local ticker going. */
83 smp_setup_percpu_timer();
84
85 calibrate_delay();
86 smp_store_cpu_info(cpuid);
87
88 local_flush_cache_all();
89 local_flush_tlb_all();
90
91 /*
92 * Unblock the master CPU _only_ when the scheduler state
93 * of all secondary CPUs will be up-to-date, so after
94 * the SMP initialization the master will be just allowed
95 * to call the scheduler code.
96 */
97 /* Allow master to continue. */
98 swap((unsigned long *)&cpu_callin_map[cpuid], 1);
99
100 local_flush_cache_all();
101 local_flush_tlb_all();
102
103 cpu_probe();
104
105 /* Fix idle thread fields. */
106 __asm__ __volatile__("ld [%0], %%g6\n\t"
107 : : "r" (&current_set[cpuid])
108 : "memory" /* paranoid */);
109
110 /* Attach to the address space of init_task. */
111 atomic_inc(&init_mm.mm_count);
112 current->active_mm = &init_mm;
113
114 while(!smp_commenced)
115 barrier();
116
117 local_flush_cache_all();
118 local_flush_tlb_all();
119
120 local_irq_enable();
121}
122
123extern void init_IRQ(void);
124extern void cpu_panic(void);
125
126/*
127 * Cycle through the processors asking the PROM to start each one.
128 */
129
130extern struct linux_prom_registers smp_penguin_ctable;
131extern unsigned long trapbase_cpu1[];
132extern unsigned long trapbase_cpu2[];
133extern unsigned long trapbase_cpu3[];
134
135void __init smp4m_boot_cpus(void)
136{
137 int cpucount = 0;
138 int i, mid;
139
140 printk("Entering SMP Mode...\n");
141
142 local_irq_enable();
143 cpus_clear(cpu_present_map);
144
145 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
146 cpu_set(mid, cpu_present_map);
147
148 for(i=0; i < NR_CPUS; i++) {
149 __cpu_number_map[i] = -1;
150 __cpu_logical_map[i] = -1;
151 }
152
153 __cpu_number_map[boot_cpu_id] = 0;
154 __cpu_logical_map[0] = boot_cpu_id;
155 current_thread_info()->cpu = boot_cpu_id;
156
157 smp_store_cpu_info(boot_cpu_id);
158 set_irq_udt(boot_cpu_id);
159 smp_setup_percpu_timer();
160 local_flush_cache_all();
161 if(cpu_find_by_instance(1, NULL, NULL))
162 return; /* Not an MP box. */
163 for(i = 0; i < NR_CPUS; i++) {
164 if(i == boot_cpu_id)
165 continue;
166
167 if (cpu_isset(i, cpu_present_map)) {
168 extern unsigned long sun4m_cpu_startup;
169 unsigned long *entry = &sun4m_cpu_startup;
170 struct task_struct *p;
171 int timeout;
172
173 /* Cook up an idler for this guy. */
174 p = fork_idle(i);
175 cpucount++;
176 current_set[i] = p->thread_info;
177 /* See trampoline.S for details... */
178 entry += ((i-1) * 3);
179
180 /*
181 * Initialize the contexts table
182 * Since the call to prom_startcpu() trashes the structure,
183 * we need to re-initialize it for each cpu
184 */
185 smp_penguin_ctable.which_io = 0;
186 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
187 smp_penguin_ctable.reg_size = 0;
188
189 /* whirrr, whirrr, whirrrrrrrrr... */
190 printk("Starting CPU %d at %p\n", i, entry);
191 local_flush_cache_all();
192 prom_startcpu(cpu_data(i).prom_node,
193 &smp_penguin_ctable, 0, (char *)entry);
194
195 /* wheee... it's going... */
196 for(timeout = 0; timeout < 10000; timeout++) {
197 if(cpu_callin_map[i])
198 break;
199 udelay(200);
200 }
201 if(cpu_callin_map[i]) {
202 /* Another "Red Snapper". */
203 __cpu_number_map[i] = i;
204 __cpu_logical_map[i] = i;
205 } else {
206 cpucount--;
207 printk("Processor %d is stuck.\n", i);
208 }
209 }
210 if(!(cpu_callin_map[i])) {
211 cpu_clear(i, cpu_present_map);
212 __cpu_number_map[i] = -1;
213 }
214 }
215 local_flush_cache_all();
216 if(cpucount == 0) {
217 printk("Error: only one Processor found.\n");
218 cpu_present_map = cpumask_of_cpu(smp_processor_id());
219 } else {
220 unsigned long bogosum = 0;
221 for(i = 0; i < NR_CPUS; i++) {
222 if (cpu_isset(i, cpu_present_map))
223 bogosum += cpu_data(i).udelay_val;
224 }
225 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
226 cpucount + 1,
227 bogosum/(500000/HZ),
228 (bogosum/(5000/HZ))%100);
229 smp_activated = 1;
230 smp_num_cpus = cpucount + 1;
231 }
232
233 /* Free unneeded trap tables */
234 if (!cpu_isset(i, cpu_present_map)) {
235 ClearPageReserved(virt_to_page(trapbase_cpu1));
236 set_page_count(virt_to_page(trapbase_cpu1), 1);
237 free_page((unsigned long)trapbase_cpu1);
238 totalram_pages++;
239 num_physpages++;
240 }
241 if (!cpu_isset(2, cpu_present_map)) {
242 ClearPageReserved(virt_to_page(trapbase_cpu2));
243 set_page_count(virt_to_page(trapbase_cpu2), 1);
244 free_page((unsigned long)trapbase_cpu2);
245 totalram_pages++;
246 num_physpages++;
247 }
248 if (!cpu_isset(3, cpu_present_map)) {
249 ClearPageReserved(virt_to_page(trapbase_cpu3));
250 set_page_count(virt_to_page(trapbase_cpu3), 1);
251 free_page((unsigned long)trapbase_cpu3);
252 totalram_pages++;
253 num_physpages++;
254 }
255
256 /* Ok, they are spinning and ready to go. */
257 smp_processors_ready = 1;
258}
259
260/* At each hardware IRQ, we get this called to forward IRQ reception
261 * to the next processor. The caller must disable the IRQ level being
262 * serviced globally so that there are no double interrupts received.
263 *
264 * XXX See sparc64 irq.c.
265 */
266void smp4m_irq_rotate(int cpu)
267{
268}
269
270/* Cross calls, in order to work efficiently and atomically do all
271 * the message passing work themselves, only stopcpu and reschedule
272 * messages come through here.
273 */
274void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
275{
276 static unsigned long smp_cpu_in_msg[NR_CPUS];
277 cpumask_t mask;
278 int me = smp_processor_id();
279 int irq, i;
280
281 if(msg == MSG_RESCHEDULE) {
282 irq = IRQ_RESCHEDULE;
283
284 if(smp_cpu_in_msg[me])
285 return;
286 } else if(msg == MSG_STOP_CPU) {
287 irq = IRQ_STOP_CPU;
288 } else {
289 goto barf;
290 }
291
292 smp_cpu_in_msg[me]++;
293 if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
294 mask = cpu_present_map;
295 if(target == MSG_ALL_BUT_SELF)
296 cpu_clear(me, mask);
297 for(i = 0; i < 4; i++) {
298 if (cpu_isset(i, mask))
299 set_cpu_int(i, irq);
300 }
301 } else {
302 set_cpu_int(target, irq);
303 }
304 smp_cpu_in_msg[me]--;
305
306 return;
307barf:
308 printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
309 panic("Bogon SMP message pass.");
310}
311
312static struct smp_funcall {
313 smpfunc_t func;
314 unsigned long arg1;
315 unsigned long arg2;
316 unsigned long arg3;
317 unsigned long arg4;
318 unsigned long arg5;
319 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
320 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
321} ccall_info;
322
323static DEFINE_SPINLOCK(cross_call_lock);
324
325/* Cross calls must be serialized, at least currently. */
326void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
327 unsigned long arg3, unsigned long arg4, unsigned long arg5)
328{
329 if(smp_processors_ready) {
330 register int ncpus = smp_num_cpus;
331 unsigned long flags;
332
333 spin_lock_irqsave(&cross_call_lock, flags);
334
335 /* Init function glue. */
336 ccall_info.func = func;
337 ccall_info.arg1 = arg1;
338 ccall_info.arg2 = arg2;
339 ccall_info.arg3 = arg3;
340 ccall_info.arg4 = arg4;
341 ccall_info.arg5 = arg5;
342
343 /* Init receive/complete mapping, plus fire the IPI's off. */
344 {
345 cpumask_t mask = cpu_present_map;
346 register int i;
347
348 cpu_clear(smp_processor_id(), mask);
349 for(i = 0; i < ncpus; i++) {
350 if (cpu_isset(i, mask)) {
351 ccall_info.processors_in[i] = 0;
352 ccall_info.processors_out[i] = 0;
353 set_cpu_int(i, IRQ_CROSS_CALL);
354 } else {
355 ccall_info.processors_in[i] = 1;
356 ccall_info.processors_out[i] = 1;
357 }
358 }
359 }
360
361 {
362 register int i;
363
364 i = 0;
365 do {
366 while(!ccall_info.processors_in[i])
367 barrier();
368 } while(++i < ncpus);
369
370 i = 0;
371 do {
372 while(!ccall_info.processors_out[i])
373 barrier();
374 } while(++i < ncpus);
375 }
376
377 spin_unlock_irqrestore(&cross_call_lock, flags);
378 }
379}
380
381/* Running cross calls. */
382void smp4m_cross_call_irq(void)
383{
384 int i = smp_processor_id();
385
386 ccall_info.processors_in[i] = 1;
387 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
388 ccall_info.arg4, ccall_info.arg5);
389 ccall_info.processors_out[i] = 1;
390}
391
392void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
393{
394 int cpu = smp_processor_id();
395
396 clear_profile_irq(cpu);
397
398 profile_tick(CPU_PROFILING, regs);
399
400 if(!--prof_counter(cpu)) {
401 int user = user_mode(regs);
402
403 irq_enter();
404 update_process_times(user);
405 irq_exit();
406
407 prof_counter(cpu) = prof_multiplier(cpu);
408 }
409}
410
411extern unsigned int lvl14_resolution;
412
413static void __init smp_setup_percpu_timer(void)
414{
415 int cpu = smp_processor_id();
416
417 prof_counter(cpu) = prof_multiplier(cpu) = 1;
418 load_profile_irq(cpu, lvl14_resolution);
419
420 if(cpu == boot_cpu_id)
421 enable_pil_irq(14);
422}
423
424void __init smp4m_blackbox_id(unsigned *addr)
425{
426 int rd = *addr & 0x3e000000;
427 int rs1 = rd >> 11;
428
429 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
430 addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
431 addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
432}
433
434void __init smp4m_blackbox_current(unsigned *addr)
435{
436 int rd = *addr & 0x3e000000;
437 int rs1 = rd >> 11;
438
439 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
440 addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
441 addr[4] = 0x8008200c | rd | rs1; /* and reg, 3, reg */
442}
443
444void __init sun4m_init_smp(void)
445{
446 BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
447 BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
448 BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
449 BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
450 BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
451}
diff --git a/arch/sparc/kernel/sun4setup.c b/arch/sparc/kernel/sun4setup.c
new file mode 100644
index 000000000000..229a52f55f16
--- /dev/null
+++ b/arch/sparc/kernel/sun4setup.c
@@ -0,0 +1,75 @@
1/* sun4setup.c: Setup the hardware address of various items in the sun4
2 * architecture. Called from idprom_init
3 *
4 * Copyright (C) 1998 Chris G. Davis (cdavis@cois.on.ca)
5 */
6
7#include <asm/page.h>
8#include <asm/oplib.h>
9#include <asm/idprom.h>
10#include <asm/sun4paddr.h>
11#include <asm/machines.h>
12
13int sun4_memreg_physaddr;
14int sun4_ie_physaddr;
15int sun4_clock_physaddr;
16int sun4_timer_physaddr;
17int sun4_eth_physaddr;
18int sun4_si_physaddr;
19int sun4_bwtwo_physaddr;
20int sun4_zs0_physaddr;
21int sun4_zs1_physaddr;
22int sun4_dma_physaddr;
23int sun4_esp_physaddr;
24int sun4_ie_physaddr;
25
26void __init sun4setup(void)
27{
28 printk("Sun4 Hardware Setup v1.0 18/May/98 Chris Davis (cdavis@cois.on.ca). ");
29 /*
30 setup standard sun4 info
31 */
32 sun4_ie_physaddr=SUN4_IE_PHYSADDR;
33
34 /*
35 setup model specific info
36 */
37 switch(idprom->id_machtype) {
38 case (SM_SUN4 | SM_4_260 ):
39 printk("Setup for a SUN4/260\n");
40 sun4_memreg_physaddr=SUN4_200_MEMREG_PHYSADDR;
41 sun4_clock_physaddr=SUN4_200_CLOCK_PHYSADDR;
42 sun4_timer_physaddr=SUN4_UNUSED_PHYSADDR;
43 sun4_eth_physaddr=SUN4_200_ETH_PHYSADDR;
44 sun4_si_physaddr=SUN4_200_SI_PHYSADDR;
45 sun4_bwtwo_physaddr=SUN4_200_BWTWO_PHYSADDR;
46 sun4_dma_physaddr=SUN4_UNUSED_PHYSADDR;
47 sun4_esp_physaddr=SUN4_UNUSED_PHYSADDR;
48 break;
49 case (SM_SUN4 | SM_4_330 ):
50 printk("Setup for a SUN4/330\n");
51 sun4_memreg_physaddr=SUN4_300_MEMREG_PHYSADDR;
52 sun4_clock_physaddr=SUN4_300_CLOCK_PHYSADDR;
53 sun4_timer_physaddr=SUN4_300_TIMER_PHYSADDR;
54 sun4_eth_physaddr=SUN4_300_ETH_PHYSADDR;
55 sun4_si_physaddr=SUN4_UNUSED_PHYSADDR;
56 sun4_bwtwo_physaddr=SUN4_300_BWTWO_PHYSADDR;
57 sun4_dma_physaddr=SUN4_300_DMA_PHYSADDR;
58 sun4_esp_physaddr=SUN4_300_ESP_PHYSADDR;
59 break;
60 case (SM_SUN4 | SM_4_470 ):
61 printk("Setup for a SUN4/470\n");
62 sun4_memreg_physaddr=SUN4_400_MEMREG_PHYSADDR;
63 sun4_clock_physaddr=SUN4_400_CLOCK_PHYSADDR;
64 sun4_timer_physaddr=SUN4_400_TIMER_PHYSADDR;
65 sun4_eth_physaddr=SUN4_400_ETH_PHYSADDR;
66 sun4_si_physaddr=SUN4_UNUSED_PHYSADDR;
67 sun4_bwtwo_physaddr=SUN4_400_BWTWO_PHYSADDR;
68 sun4_dma_physaddr=SUN4_400_DMA_PHYSADDR;
69 sun4_esp_physaddr=SUN4_400_ESP_PHYSADDR;
70 break;
71 default:
72 ;
73 }
74}
75
diff --git a/arch/sparc/kernel/sunos_asm.S b/arch/sparc/kernel/sunos_asm.S
new file mode 100644
index 000000000000..07fe86014fb5
--- /dev/null
+++ b/arch/sparc/kernel/sunos_asm.S
@@ -0,0 +1,67 @@
1/* $Id: sunos_asm.S,v 1.15 2000/01/11 17:33:21 jj Exp $
2 * sunos_asm.S: SunOS system calls which must have a low-level
3 * entry point to operate correctly.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * Based upon preliminary work which is:
8 *
9 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
10 */
11
12#include <asm/ptrace.h>
13
14 .text
15 .align 4
16
17 /* When calling ret_sys_call, %o0 should contain the same
18 * value as in [%sp + STACKFRAME_SZ + PT_I0] */
19
20 /* SunOS getpid() returns pid in %o0 and ppid in %o1 */
21 .globl sunos_getpid
22sunos_getpid:
23 call sys_getppid
24 nop
25
26 call sys_getpid
27 st %o0, [%sp + STACKFRAME_SZ + PT_I1]
28
29 b ret_sys_call
30 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
31
32 /* SunOS getuid() returns uid in %o0 and euid in %o1 */
33 .globl sunos_getuid
34sunos_getuid:
35 call sys_geteuid16
36 nop
37
38 call sys_getuid16
39 st %o0, [%sp + STACKFRAME_SZ + PT_I1]
40
41 b ret_sys_call
42 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
43
44 /* SunOS getgid() returns gid in %o0 and egid in %o1 */
45 .globl sunos_getgid
46sunos_getgid:
47 call sys_getegid16
48 nop
49
50 call sys_getgid16
51 st %o0, [%sp + STACKFRAME_SZ + PT_I1]
52
53 b ret_sys_call
54 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
55
56 /* SunOS's execv() call only specifies the argv argument, the
57 * environment settings are the same as the calling processes.
58 */
59 .globl sunos_execv
60sunos_execv:
61 st %g0, [%sp + STACKFRAME_SZ + PT_I2]
62
63 call sparc_execve
64 add %sp, STACKFRAME_SZ, %o0
65
66 b ret_sys_call
67 ld [%sp + STACKFRAME_SZ + PT_I0], %o0
diff --git a/arch/sparc/kernel/sunos_ioctl.c b/arch/sparc/kernel/sunos_ioctl.c
new file mode 100644
index 000000000000..df1c0b31a930
--- /dev/null
+++ b/arch/sparc/kernel/sunos_ioctl.c
@@ -0,0 +1,231 @@
1/* $Id: sunos_ioctl.c,v 1.34 2000/09/03 14:10:56 anton Exp $
2 * sunos_ioctl.c: The Linux Operating system: SunOS ioctl compatibility.
3 *
4 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <asm/uaccess.h>
9
10#include <linux/sched.h>
11#include <linux/errno.h>
12#include <linux/string.h>
13#include <linux/termios.h>
14#include <linux/ioctl.h>
15#include <linux/route.h>
16#include <linux/sockios.h>
17#include <linux/if.h>
18#include <linux/netdevice.h>
19#include <linux/if_arp.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/syscalls.h>
25#include <linux/file.h>
26#include <asm/kbio.h>
27
28#if 0
29extern char sunkbd_type;
30extern char sunkbd_layout;
31#endif
32
33/* NR_OPEN is now larger and dynamic in recent kernels. */
34#define SUNOS_NR_OPEN 256
35
36asmlinkage int sunos_ioctl (int fd, unsigned long cmd, unsigned long arg)
37{
38 int ret = -EBADF;
39
40 if (fd >= SUNOS_NR_OPEN || !fcheck(fd))
41 goto out;
42
43 /* First handle an easy compat. case for tty ldisc. */
44 if (cmd == TIOCSETD) {
45 int __user *p;
46 int ntty = N_TTY, tmp;
47 mm_segment_t oldfs;
48
49 p = (int __user *) arg;
50 ret = -EFAULT;
51 if (get_user(tmp, p))
52 goto out;
53 if (tmp == 2) {
54 oldfs = get_fs();
55 set_fs(KERNEL_DS);
56 ret = sys_ioctl(fd, cmd, (unsigned long) &ntty);
57 set_fs(oldfs);
58 ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
59 goto out;
60 }
61 }
62
63 /* Binary compatibility is good American knowhow fuckin' up. */
64 if (cmd == TIOCNOTTY) {
65 ret = sys_setsid();
66 goto out;
67 }
68
69 /* SunOS networking ioctls. */
70 switch (cmd) {
71 case _IOW('r', 10, struct rtentry):
72 ret = sys_ioctl(fd, SIOCADDRT, arg);
73 goto out;
74 case _IOW('r', 11, struct rtentry):
75 ret = sys_ioctl(fd, SIOCDELRT, arg);
76 goto out;
77 case _IOW('i', 12, struct ifreq):
78 ret = sys_ioctl(fd, SIOCSIFADDR, arg);
79 goto out;
80 case _IOWR('i', 13, struct ifreq):
81 ret = sys_ioctl(fd, SIOCGIFADDR, arg);
82 goto out;
83 case _IOW('i', 14, struct ifreq):
84 ret = sys_ioctl(fd, SIOCSIFDSTADDR, arg);
85 goto out;
86 case _IOWR('i', 15, struct ifreq):
87 ret = sys_ioctl(fd, SIOCGIFDSTADDR, arg);
88 goto out;
89 case _IOW('i', 16, struct ifreq):
90 ret = sys_ioctl(fd, SIOCSIFFLAGS, arg);
91 goto out;
92 case _IOWR('i', 17, struct ifreq):
93 ret = sys_ioctl(fd, SIOCGIFFLAGS, arg);
94 goto out;
95 case _IOW('i', 18, struct ifreq):
96 ret = sys_ioctl(fd, SIOCSIFMEM, arg);
97 goto out;
98 case _IOWR('i', 19, struct ifreq):
99 ret = sys_ioctl(fd, SIOCGIFMEM, arg);
100 goto out;
101 case _IOWR('i', 20, struct ifconf):
102 ret = sys_ioctl(fd, SIOCGIFCONF, arg);
103 goto out;
104 case _IOW('i', 21, struct ifreq): /* SIOCSIFMTU */
105 ret = sys_ioctl(fd, SIOCSIFMTU, arg);
106 goto out;
107 case _IOWR('i', 22, struct ifreq): /* SIOCGIFMTU */
108 ret = sys_ioctl(fd, SIOCGIFMTU, arg);
109 goto out;
110
111 case _IOWR('i', 23, struct ifreq):
112 ret = sys_ioctl(fd, SIOCGIFBRDADDR, arg);
113 goto out;
114 case _IOW('i', 24, struct ifreq):
115 ret = sys_ioctl(fd, SIOCSIFBRDADDR, arg);
116 goto out;
117 case _IOWR('i', 25, struct ifreq):
118 ret = sys_ioctl(fd, SIOCGIFNETMASK, arg);
119 goto out;
120 case _IOW('i', 26, struct ifreq):
121 ret = sys_ioctl(fd, SIOCSIFNETMASK, arg);
122 goto out;
123 case _IOWR('i', 27, struct ifreq):
124 ret = sys_ioctl(fd, SIOCGIFMETRIC, arg);
125 goto out;
126 case _IOW('i', 28, struct ifreq):
127 ret = sys_ioctl(fd, SIOCSIFMETRIC, arg);
128 goto out;
129
130 case _IOW('i', 30, struct arpreq):
131 ret = sys_ioctl(fd, SIOCSARP, arg);
132 goto out;
133 case _IOWR('i', 31, struct arpreq):
134 ret = sys_ioctl(fd, SIOCGARP, arg);
135 goto out;
136 case _IOW('i', 32, struct arpreq):
137 ret = sys_ioctl(fd, SIOCDARP, arg);
138 goto out;
139
140 case _IOW('i', 40, struct ifreq): /* SIOCUPPER */
141 case _IOW('i', 41, struct ifreq): /* SIOCLOWER */
142 case _IOW('i', 44, struct ifreq): /* SIOCSETSYNC */
143 case _IOW('i', 45, struct ifreq): /* SIOCGETSYNC */
144 case _IOW('i', 46, struct ifreq): /* SIOCSSDSTATS */
145 case _IOW('i', 47, struct ifreq): /* SIOCSSESTATS */
146 case _IOW('i', 48, struct ifreq): /* SIOCSPROMISC */
147 ret = -EOPNOTSUPP;
148 goto out;
149
150 case _IOW('i', 49, struct ifreq):
151 ret = sys_ioctl(fd, SIOCADDMULTI, arg);
152 goto out;
153 case _IOW('i', 50, struct ifreq):
154 ret = sys_ioctl(fd, SIOCDELMULTI, arg);
155 goto out;
156
157 /* FDDI interface ioctls, unsupported. */
158
159 case _IOW('i', 51, struct ifreq): /* SIOCFDRESET */
160 case _IOW('i', 52, struct ifreq): /* SIOCFDSLEEP */
161 case _IOW('i', 53, struct ifreq): /* SIOCSTRTFMWAR */
162 case _IOW('i', 54, struct ifreq): /* SIOCLDNSTRTFW */
163 case _IOW('i', 55, struct ifreq): /* SIOCGETFDSTAT */
164 case _IOW('i', 56, struct ifreq): /* SIOCFDNMIINT */
165 case _IOW('i', 57, struct ifreq): /* SIOCFDEXUSER */
166 case _IOW('i', 58, struct ifreq): /* SIOCFDGNETMAP */
167 case _IOW('i', 59, struct ifreq): /* SIOCFDGIOCTL */
168 printk("FDDI ioctl, returning EOPNOTSUPP\n");
169 ret = -EOPNOTSUPP;
170 goto out;
171
172 case _IOW('t', 125, int):
173 /* More stupid tty sunos ioctls, just
174 * say it worked.
175 */
176 ret = 0;
177 goto out;
178 /* Non posix grp */
179 case _IOW('t', 118, int): {
180 int oldval, newval, __user *ptr;
181
182 cmd = TIOCSPGRP;
183 ptr = (int __user *) arg;
184 ret = -EFAULT;
185 if (get_user(oldval, ptr))
186 goto out;
187 ret = sys_ioctl(fd, cmd, arg);
188 __get_user(newval, ptr);
189 if (newval == -1) {
190 __put_user(oldval, ptr);
191 ret = -EIO;
192 }
193 if (ret == -ENOTTY)
194 ret = -EIO;
195 goto out;
196 }
197
198 case _IOR('t', 119, int): {
199 int oldval, newval, __user *ptr;
200
201 cmd = TIOCGPGRP;
202 ptr = (int __user *) arg;
203 ret = -EFAULT;
204 if (get_user(oldval, ptr))
205 goto out;
206 ret = sys_ioctl(fd, cmd, arg);
207 __get_user(newval, ptr);
208 if (newval == -1) {
209 __put_user(oldval, ptr);
210 ret = -EIO;
211 }
212 if (ret == -ENOTTY)
213 ret = -EIO;
214 goto out;
215 }
216 }
217
218#if 0
219 if ((cmd & 0xff00) == ('k' << 8)) {
220 printk ("[[KBIO: %8.8x\n", (unsigned int) cmd);
221 }
222#endif
223
224 ret = sys_ioctl(fd, cmd, arg);
225 /* so stupid... */
226 ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
227out:
228 return ret;
229}
230
231
diff --git a/arch/sparc/kernel/sys_solaris.c b/arch/sparc/kernel/sys_solaris.c
new file mode 100644
index 000000000000..fb7578554c78
--- /dev/null
+++ b/arch/sparc/kernel/sys_solaris.c
@@ -0,0 +1,37 @@
1/*
2 * linux/arch/sparc/sys_solaris.c
3 *
4 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
5 */
6
7#include <linux/config.h>
8#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/personality.h>
13#include <linux/ptrace.h>
14#include <linux/mm.h>
15#include <linux/smp.h>
16#include <linux/smp_lock.h>
17#include <linux/module.h>
18
19asmlinkage int
20do_solaris_syscall (struct pt_regs *regs)
21{
22 static int cnt = 0;
23 if (++cnt < 10) printk ("No solaris handler\n");
24 force_sig(SIGSEGV, current);
25 return 0;
26}
27
28#ifndef CONFIG_SUNOS_EMUL
29asmlinkage int
30do_sunos_syscall (struct pt_regs *regs)
31{
32 static int cnt = 0;
33 if (++cnt < 10) printk ("SunOS binary emulation not compiled in\n");
34 force_sig (SIGSEGV, current);
35 return 0;
36}
37#endif
diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c
new file mode 100644
index 000000000000..0cdfc9d294b4
--- /dev/null
+++ b/arch/sparc/kernel/sys_sparc.c
@@ -0,0 +1,485 @@
1/* $Id: sys_sparc.c,v 1.70 2001/04/14 01:12:02 davem Exp $
2 * linux/arch/sparc/kernel/sys_sparc.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
6 * platform.
7 */
8
9#include <linux/errno.h>
10#include <linux/types.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/file.h>
15#include <linux/sem.h>
16#include <linux/msg.h>
17#include <linux/shm.h>
18#include <linux/stat.h>
19#include <linux/syscalls.h>
20#include <linux/mman.h>
21#include <linux/utsname.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24
25#include <asm/uaccess.h>
26#include <asm/ipc.h>
27
28/* #define DEBUG_UNIMP_SYSCALL */
29
30/* XXX Make this per-binary type, this way we can detect the type of
31 * XXX a binary. Every Sparc executable calls this very early on.
32 */
33asmlinkage unsigned long sys_getpagesize(void)
34{
35 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
36}
37
38#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
39
40unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
41{
42 struct vm_area_struct * vmm;
43
44 if (flags & MAP_FIXED) {
45 /* We do not accept a shared mapping if it would violate
46 * cache aliasing constraints.
47 */
48 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
49 return -EINVAL;
50 return addr;
51 }
52
53 /* See asm-sparc/uaccess.h */
54 if (len > TASK_SIZE - PAGE_SIZE)
55 return -ENOMEM;
56 if (ARCH_SUN4C_SUN4 && len > 0x20000000)
57 return -ENOMEM;
58 if (!addr)
59 addr = TASK_UNMAPPED_BASE;
60
61 if (flags & MAP_SHARED)
62 addr = COLOUR_ALIGN(addr);
63 else
64 addr = PAGE_ALIGN(addr);
65
66 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
67 /* At this point: (!vmm || addr < vmm->vm_end). */
68 if (ARCH_SUN4C_SUN4 && addr < 0xe0000000 && 0x20000000 - len < addr) {
69 addr = PAGE_OFFSET;
70 vmm = find_vma(current->mm, PAGE_OFFSET);
71 }
72 if (TASK_SIZE - PAGE_SIZE - len < addr)
73 return -ENOMEM;
74 if (!vmm || addr + len <= vmm->vm_start)
75 return addr;
76 addr = vmm->vm_end;
77 if (flags & MAP_SHARED)
78 addr = COLOUR_ALIGN(addr);
79 }
80}
81
82asmlinkage unsigned long sparc_brk(unsigned long brk)
83{
84 if(ARCH_SUN4C_SUN4) {
85 if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000))
86 return current->mm->brk;
87 }
88 return sys_brk(brk);
89}
90
91/*
92 * sys_pipe() is the normal C calling standard for creating
93 * a pipe. It's not the way unix traditionally does this, though.
94 */
95asmlinkage int sparc_pipe(struct pt_regs *regs)
96{
97 int fd[2];
98 int error;
99
100 error = do_pipe(fd);
101 if (error)
102 goto out;
103 regs->u_regs[UREG_I1] = fd[1];
104 error = fd[0];
105out:
106 return error;
107}
108
109/*
110 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
111 *
112 * This is really horribly ugly.
113 */
114
115asmlinkage int sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth)
116{
117 int version, err;
118
119 version = call >> 16; /* hack for backward compatibility */
120 call &= 0xffff;
121
122 if (call <= SEMCTL)
123 switch (call) {
124 case SEMOP:
125 err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL);
126 goto out;
127 case SEMTIMEDOP:
128 err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, (const struct timespec __user *) fifth);
129 goto out;
130 case SEMGET:
131 err = sys_semget (first, second, third);
132 goto out;
133 case SEMCTL: {
134 union semun fourth;
135 err = -EINVAL;
136 if (!ptr)
137 goto out;
138 err = -EFAULT;
139 if (get_user(fourth.__pad,
140 (void __user * __user *)ptr))
141 goto out;
142 err = sys_semctl (first, second, third, fourth);
143 goto out;
144 }
145 default:
146 err = -ENOSYS;
147 goto out;
148 }
149 if (call <= MSGCTL)
150 switch (call) {
151 case MSGSND:
152 err = sys_msgsnd (first, (struct msgbuf __user *) ptr,
153 second, third);
154 goto out;
155 case MSGRCV:
156 switch (version) {
157 case 0: {
158 struct ipc_kludge tmp;
159 err = -EINVAL;
160 if (!ptr)
161 goto out;
162 err = -EFAULT;
163 if (copy_from_user(&tmp, (struct ipc_kludge __user *) ptr, sizeof (tmp)))
164 goto out;
165 err = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
166 goto out;
167 }
168 case 1: default:
169 err = sys_msgrcv (first,
170 (struct msgbuf __user *) ptr,
171 second, fifth, third);
172 goto out;
173 }
174 case MSGGET:
175 err = sys_msgget ((key_t) first, second);
176 goto out;
177 case MSGCTL:
178 err = sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
179 goto out;
180 default:
181 err = -ENOSYS;
182 goto out;
183 }
184 if (call <= SHMCTL)
185 switch (call) {
186 case SHMAT:
187 switch (version) {
188 case 0: default: {
189 ulong raddr;
190 err = do_shmat (first, (char __user *) ptr, second, &raddr);
191 if (err)
192 goto out;
193 err = -EFAULT;
194 if (put_user (raddr, (ulong __user *) third))
195 goto out;
196 err = 0;
197 goto out;
198 }
199 case 1: /* iBCS2 emulator entry point */
200 err = -EINVAL;
201 goto out;
202 }
203 case SHMDT:
204 err = sys_shmdt ((char __user *)ptr);
205 goto out;
206 case SHMGET:
207 err = sys_shmget (first, second, third);
208 goto out;
209 case SHMCTL:
210 err = sys_shmctl (first, second, (struct shmid_ds __user *) ptr);
211 goto out;
212 default:
213 err = -ENOSYS;
214 goto out;
215 }
216 else
217 err = -ENOSYS;
218out:
219 return err;
220}
221
222/* Linux version of mmap */
223static unsigned long do_mmap2(unsigned long addr, unsigned long len,
224 unsigned long prot, unsigned long flags, unsigned long fd,
225 unsigned long pgoff)
226{
227 struct file * file = NULL;
228 unsigned long retval = -EBADF;
229
230 if (!(flags & MAP_ANONYMOUS)) {
231 file = fget(fd);
232 if (!file)
233 goto out;
234 }
235
236 retval = -EINVAL;
237 len = PAGE_ALIGN(len);
238 if (ARCH_SUN4C_SUN4 &&
239 (len > 0x20000000 ||
240 ((flags & MAP_FIXED) &&
241 addr < 0xe0000000 && addr + len > 0x20000000)))
242 goto out_putf;
243
244 /* See asm-sparc/uaccess.h */
245 if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
246 goto out_putf;
247
248 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
249
250 down_write(&current->mm->mmap_sem);
251 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
252 up_write(&current->mm->mmap_sem);
253
254out_putf:
255 if (file)
256 fput(file);
257out:
258 return retval;
259}
260
261asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
262 unsigned long prot, unsigned long flags, unsigned long fd,
263 unsigned long pgoff)
264{
265 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
266 we have. */
267 return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
268}
269
270asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
271 unsigned long prot, unsigned long flags, unsigned long fd,
272 unsigned long off)
273{
274 return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
275}
276
277long sparc_remap_file_pages(unsigned long start, unsigned long size,
278 unsigned long prot, unsigned long pgoff,
279 unsigned long flags)
280{
281 /* This works on an existing mmap so we don't need to validate
282 * the range as that was done at the original mmap call.
283 */
284 return sys_remap_file_pages(start, size, prot,
285 (pgoff >> (PAGE_SHIFT - 12)), flags);
286}
287
288extern unsigned long do_mremap(unsigned long addr,
289 unsigned long old_len, unsigned long new_len,
290 unsigned long flags, unsigned long new_addr);
291
292asmlinkage unsigned long sparc_mremap(unsigned long addr,
293 unsigned long old_len, unsigned long new_len,
294 unsigned long flags, unsigned long new_addr)
295{
296 struct vm_area_struct *vma;
297 unsigned long ret = -EINVAL;
298 if (ARCH_SUN4C_SUN4) {
299 if (old_len > 0x20000000 || new_len > 0x20000000)
300 goto out;
301 if (addr < 0xe0000000 && addr + old_len > 0x20000000)
302 goto out;
303 }
304 if (old_len > TASK_SIZE - PAGE_SIZE ||
305 new_len > TASK_SIZE - PAGE_SIZE)
306 goto out;
307 down_write(&current->mm->mmap_sem);
308 if (flags & MREMAP_FIXED) {
309 if (ARCH_SUN4C_SUN4 &&
310 new_addr < 0xe0000000 &&
311 new_addr + new_len > 0x20000000)
312 goto out_sem;
313 if (new_addr + new_len > TASK_SIZE - PAGE_SIZE)
314 goto out_sem;
315 } else if ((ARCH_SUN4C_SUN4 && addr < 0xe0000000 &&
316 addr + new_len > 0x20000000) ||
317 addr + new_len > TASK_SIZE - PAGE_SIZE) {
318 unsigned long map_flags = 0;
319 struct file *file = NULL;
320
321 ret = -ENOMEM;
322 if (!(flags & MREMAP_MAYMOVE))
323 goto out_sem;
324
325 vma = find_vma(current->mm, addr);
326 if (vma) {
327 if (vma->vm_flags & VM_SHARED)
328 map_flags |= MAP_SHARED;
329 file = vma->vm_file;
330 }
331
332 new_addr = get_unmapped_area(file, addr, new_len,
333 vma ? vma->vm_pgoff : 0,
334 map_flags);
335 ret = new_addr;
336 if (new_addr & ~PAGE_MASK)
337 goto out_sem;
338 flags |= MREMAP_FIXED;
339 }
340 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
341out_sem:
342 up_write(&current->mm->mmap_sem);
343out:
344 return ret;
345}
346
347/* we come to here via sys_nis_syscall so it can setup the regs argument */
348asmlinkage unsigned long
349c_sys_nis_syscall (struct pt_regs *regs)
350{
351 static int count = 0;
352
353 if (count++ > 5)
354 return -ENOSYS;
355 printk ("%s[%d]: Unimplemented SPARC system call %d\n",
356 current->comm, current->pid, (int)regs->u_regs[1]);
357#ifdef DEBUG_UNIMP_SYSCALL
358 show_regs (regs);
359#endif
360 return -ENOSYS;
361}
362
363/* #define DEBUG_SPARC_BREAKPOINT */
364
365asmlinkage void
366sparc_breakpoint (struct pt_regs *regs)
367{
368 siginfo_t info;
369
370 lock_kernel();
371#ifdef DEBUG_SPARC_BREAKPOINT
372 printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
373#endif
374 info.si_signo = SIGTRAP;
375 info.si_errno = 0;
376 info.si_code = TRAP_BRKPT;
377 info.si_addr = (void __user *)regs->pc;
378 info.si_trapno = 0;
379 force_sig_info(SIGTRAP, &info, current);
380
381#ifdef DEBUG_SPARC_BREAKPOINT
382 printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
383#endif
384 unlock_kernel();
385}
386
387asmlinkage int
388sparc_sigaction (int sig, const struct old_sigaction __user *act,
389 struct old_sigaction __user *oact)
390{
391 struct k_sigaction new_ka, old_ka;
392 int ret;
393
394 if (sig < 0) {
395 current->thread.new_signal = 1;
396 sig = -sig;
397 }
398
399 if (act) {
400 unsigned long mask;
401
402 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
403 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
404 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
405 return -EFAULT;
406 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
407 __get_user(mask, &act->sa_mask);
408 siginitset(&new_ka.sa.sa_mask, mask);
409 new_ka.ka_restorer = NULL;
410 }
411
412 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
413
414 if (!ret && oact) {
415 /* In the clone() case we could copy half consistent
416 * state to the user, however this could sleep and
417 * deadlock us if we held the signal lock on SMP. So for
418 * now I take the easy way out and do no locking.
419 */
420 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
421 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
422 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
423 return -EFAULT;
424 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
425 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
426 }
427
428 return ret;
429}
430
431asmlinkage long
432sys_rt_sigaction(int sig,
433 const struct sigaction __user *act,
434 struct sigaction __user *oact,
435 void __user *restorer,
436 size_t sigsetsize)
437{
438 struct k_sigaction new_ka, old_ka;
439 int ret;
440
441 /* XXX: Don't preclude handling different sized sigset_t's. */
442 if (sigsetsize != sizeof(sigset_t))
443 return -EINVAL;
444
445 /* All tasks which use RT signals (effectively) use
446 * new style signals.
447 */
448 current->thread.new_signal = 1;
449
450 if (act) {
451 new_ka.ka_restorer = restorer;
452 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
453 return -EFAULT;
454 }
455
456 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
457
458 if (!ret && oact) {
459 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
460 return -EFAULT;
461 }
462
463 return ret;
464}
465
466asmlinkage int sys_getdomainname(char __user *name, int len)
467{
468 int nlen;
469 int err = -EFAULT;
470
471 down_read(&uts_sem);
472
473 nlen = strlen(system_utsname.domainname) + 1;
474
475 if (nlen < len)
476 len = nlen;
477 if (len > __NEW_UTS_LEN)
478 goto done;
479 if (copy_to_user(name, system_utsname.domainname, len))
480 goto done;
481 err = 0;
482done:
483 up_read(&uts_sem);
484 return err;
485}
diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c
new file mode 100644
index 000000000000..81c894acd0db
--- /dev/null
+++ b/arch/sparc/kernel/sys_sunos.c
@@ -0,0 +1,1194 @@
1/* $Id: sys_sunos.c,v 1.137 2002/02/08 03:57:14 davem Exp $
2 * sys_sunos.c: SunOS specific syscall compatibility support.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 *
7 * Based upon preliminary work which is:
8 *
9 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/types.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/resource.h>
22#include <linux/ipc.h>
23#include <linux/shm.h>
24#include <linux/msg.h>
25#include <linux/sem.h>
26#include <linux/signal.h>
27#include <linux/uio.h>
28#include <linux/utsname.h>
29#include <linux/major.h>
30#include <linux/stat.h>
31#include <linux/slab.h>
32#include <linux/pagemap.h>
33#include <linux/errno.h>
34#include <linux/smp.h>
35#include <linux/smp_lock.h>
36#include <linux/syscalls.h>
37
38#include <net/sock.h>
39
40#include <asm/uaccess.h>
41#ifndef KERNEL_DS
42#include <linux/segment.h>
43#endif
44
45#include <asm/page.h>
46#include <asm/pgtable.h>
47#include <asm/pconf.h>
48#include <asm/idprom.h> /* for gethostid() */
49#include <asm/unistd.h>
50#include <asm/system.h>
51
52/* For the nfs mount emulation */
53#include <linux/socket.h>
54#include <linux/in.h>
55#include <linux/nfs.h>
56#include <linux/nfs2.h>
57#include <linux/nfs_mount.h>
58
59/* for sunos_select */
60#include <linux/time.h>
61#include <linux/personality.h>
62
63/* NR_OPEN is now larger and dynamic in recent kernels. */
64#define SUNOS_NR_OPEN 256
65
66/* We use the SunOS mmap() semantics. */
67asmlinkage unsigned long sunos_mmap(unsigned long addr, unsigned long len,
68 unsigned long prot, unsigned long flags,
69 unsigned long fd, unsigned long off)
70{
71 struct file * file = NULL;
72 unsigned long retval, ret_type;
73
74 if (flags & MAP_NORESERVE) {
75 static int cnt;
76 if (cnt++ < 10)
77 printk("%s: unimplemented SunOS MAP_NORESERVE mmap() flag\n",
78 current->comm);
79 flags &= ~MAP_NORESERVE;
80 }
81 retval = -EBADF;
82 if (!(flags & MAP_ANONYMOUS)) {
83 if (fd >= SUNOS_NR_OPEN)
84 goto out;
85 file = fget(fd);
86 if (!file)
87 goto out;
88 }
89
90 retval = -EINVAL;
91 /* If this is ld.so or a shared library doing an mmap
92 * of /dev/zero, transform it into an anonymous mapping.
93 * SunOS is so stupid some times... hmph!
94 */
95 if (file) {
96 if (imajor(file->f_dentry->d_inode) == MEM_MAJOR &&
97 iminor(file->f_dentry->d_inode) == 5) {
98 flags |= MAP_ANONYMOUS;
99 fput(file);
100 file = NULL;
101 }
102 }
103 ret_type = flags & _MAP_NEW;
104 flags &= ~_MAP_NEW;
105
106 if (!(flags & MAP_FIXED))
107 addr = 0;
108 else {
109 if (ARCH_SUN4C_SUN4 &&
110 (len > 0x20000000 ||
111 ((flags & MAP_FIXED) &&
112 addr < 0xe0000000 && addr + len > 0x20000000)))
113 goto out_putf;
114
115 /* See asm-sparc/uaccess.h */
116 if (len > TASK_SIZE - PAGE_SIZE ||
117 addr + len > TASK_SIZE - PAGE_SIZE)
118 goto out_putf;
119 }
120
121 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
122 down_write(&current->mm->mmap_sem);
123 retval = do_mmap(file, addr, len, prot, flags, off);
124 up_write(&current->mm->mmap_sem);
125 if (!ret_type)
126 retval = ((retval < PAGE_OFFSET) ? 0 : retval);
127
128out_putf:
129 if (file)
130 fput(file);
131out:
132 return retval;
133}
134
135/* lmbench calls this, just say "yeah, ok" */
136asmlinkage int sunos_mctl(unsigned long addr, unsigned long len, int function, char *arg)
137{
138 return 0;
139}
140
141/* SunOS is completely broken... it returns 0 on success, otherwise
142 * ENOMEM. For sys_sbrk() it wants the old brk value as a return
143 * on success and ENOMEM as before on failure.
144 */
145asmlinkage int sunos_brk(unsigned long brk)
146{
147 int freepages, retval = -ENOMEM;
148 unsigned long rlim;
149 unsigned long newbrk, oldbrk;
150
151 down_write(&current->mm->mmap_sem);
152 if (ARCH_SUN4C_SUN4) {
153 if (brk >= 0x20000000 && brk < 0xe0000000) {
154 goto out;
155 }
156 }
157
158 if (brk < current->mm->end_code)
159 goto out;
160
161 newbrk = PAGE_ALIGN(brk);
162 oldbrk = PAGE_ALIGN(current->mm->brk);
163 retval = 0;
164 if (oldbrk == newbrk) {
165 current->mm->brk = brk;
166 goto out;
167 }
168
169 /*
170 * Always allow shrinking brk
171 */
172 if (brk <= current->mm->brk) {
173 current->mm->brk = brk;
174 do_munmap(current->mm, newbrk, oldbrk-newbrk);
175 goto out;
176 }
177 /*
178 * Check against rlimit and stack..
179 */
180 retval = -ENOMEM;
181 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
182 if (rlim >= RLIM_INFINITY)
183 rlim = ~0;
184 if (brk - current->mm->end_code > rlim)
185 goto out;
186
187 /*
188 * Check against existing mmap mappings.
189 */
190 if (find_vma_intersection(current->mm, oldbrk, newbrk+PAGE_SIZE))
191 goto out;
192
193 /*
194 * stupid algorithm to decide if we have enough memory: while
195 * simple, it hopefully works in most obvious cases.. Easy to
196 * fool it, but this should catch most mistakes.
197 */
198 freepages = get_page_cache_size();
199 freepages >>= 1;
200 freepages += nr_free_pages();
201 freepages += nr_swap_pages;
202 freepages -= num_physpages >> 4;
203 freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
204 if (freepages < 0)
205 goto out;
206 /*
207 * Ok, we have probably got enough memory - let it rip.
208 */
209 current->mm->brk = brk;
210 do_brk(oldbrk, newbrk-oldbrk);
211 retval = 0;
212out:
213 up_write(&current->mm->mmap_sem);
214 return retval;
215}
216
217asmlinkage unsigned long sunos_sbrk(int increment)
218{
219 int error;
220 unsigned long oldbrk;
221
222 /* This should do it hopefully... */
223 lock_kernel();
224 oldbrk = current->mm->brk;
225 error = sunos_brk(((int) current->mm->brk) + increment);
226 if (!error)
227 error = oldbrk;
228 unlock_kernel();
229 return error;
230}
231
232/* XXX Completely undocumented, and completely magic...
233 * XXX I believe it is to increase the size of the stack by
234 * XXX argument 'increment' and return the new end of stack
235 * XXX area. Wheee...
236 */
237asmlinkage unsigned long sunos_sstk(int increment)
238{
239 lock_kernel();
240 printk("%s: Call to sunos_sstk(increment<%d>) is unsupported\n",
241 current->comm, increment);
242 unlock_kernel();
243 return -1;
244}
245
246/* Give hints to the kernel as to what paging strategy to use...
247 * Completely bogus, don't remind me.
248 */
249#define VA_NORMAL 0 /* Normal vm usage expected */
250#define VA_ABNORMAL 1 /* Abnormal/random vm usage probable */
251#define VA_SEQUENTIAL 2 /* Accesses will be of a sequential nature */
252#define VA_INVALIDATE 3 /* Page table entries should be flushed ??? */
253static char *vstrings[] = {
254 "VA_NORMAL",
255 "VA_ABNORMAL",
256 "VA_SEQUENTIAL",
257 "VA_INVALIDATE",
258};
259
260asmlinkage void sunos_vadvise(unsigned long strategy)
261{
262 /* I wanna see who uses this... */
263 lock_kernel();
264 printk("%s: Advises us to use %s paging strategy\n",
265 current->comm,
266 strategy <= 3 ? vstrings[strategy] : "BOGUS");
267 unlock_kernel();
268}
269
270/* This just wants the soft limit (ie. rlim_cur element) of the RLIMIT_NOFILE
271 * resource limit and is for backwards compatibility with older sunos
272 * revs.
273 */
274asmlinkage long sunos_getdtablesize(void)
275{
276 return SUNOS_NR_OPEN;
277}
278
279#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
280
281asmlinkage unsigned long sunos_sigblock(unsigned long blk_mask)
282{
283 unsigned long old;
284
285 spin_lock_irq(&current->sighand->siglock);
286 old = current->blocked.sig[0];
287 current->blocked.sig[0] |= (blk_mask & _BLOCKABLE);
288 recalc_sigpending();
289 spin_unlock_irq(&current->sighand->siglock);
290 return old;
291}
292
293asmlinkage unsigned long sunos_sigsetmask(unsigned long newmask)
294{
295 unsigned long retval;
296
297 spin_lock_irq(&current->sighand->siglock);
298 retval = current->blocked.sig[0];
299 current->blocked.sig[0] = (newmask & _BLOCKABLE);
300 recalc_sigpending();
301 spin_unlock_irq(&current->sighand->siglock);
302 return retval;
303}
304
305/* SunOS getdents is very similar to the newer Linux (iBCS2 compliant) */
306/* getdents system call, the format of the structure just has a different */
307/* layout (d_off+d_ino instead of d_ino+d_off) */
308struct sunos_dirent {
309 long d_off;
310 unsigned long d_ino;
311 unsigned short d_reclen;
312 unsigned short d_namlen;
313 char d_name[1];
314};
315
316struct sunos_dirent_callback {
317 struct sunos_dirent __user *curr;
318 struct sunos_dirent __user *previous;
319 int count;
320 int error;
321};
322
323#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
324#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
325
326static int sunos_filldir(void * __buf, const char * name, int namlen,
327 loff_t offset, ino_t ino, unsigned int d_type)
328{
329 struct sunos_dirent __user *dirent;
330 struct sunos_dirent_callback * buf = __buf;
331 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
332
333 buf->error = -EINVAL; /* only used if we fail.. */
334 if (reclen > buf->count)
335 return -EINVAL;
336 dirent = buf->previous;
337 if (dirent)
338 put_user(offset, &dirent->d_off);
339 dirent = buf->curr;
340 buf->previous = dirent;
341 put_user(ino, &dirent->d_ino);
342 put_user(namlen, &dirent->d_namlen);
343 put_user(reclen, &dirent->d_reclen);
344 copy_to_user(dirent->d_name, name, namlen);
345 put_user(0, dirent->d_name + namlen);
346 dirent = (void __user *) dirent + reclen;
347 buf->curr = dirent;
348 buf->count -= reclen;
349 return 0;
350}
351
352asmlinkage int sunos_getdents(unsigned int fd, void __user *dirent, int cnt)
353{
354 struct file * file;
355 struct sunos_dirent __user *lastdirent;
356 struct sunos_dirent_callback buf;
357 int error = -EBADF;
358
359 if (fd >= SUNOS_NR_OPEN)
360 goto out;
361
362 file = fget(fd);
363 if (!file)
364 goto out;
365
366 error = -EINVAL;
367 if (cnt < (sizeof(struct sunos_dirent) + 255))
368 goto out_putf;
369
370 buf.curr = (struct sunos_dirent __user *) dirent;
371 buf.previous = NULL;
372 buf.count = cnt;
373 buf.error = 0;
374
375 error = vfs_readdir(file, sunos_filldir, &buf);
376 if (error < 0)
377 goto out_putf;
378
379 lastdirent = buf.previous;
380 error = buf.error;
381 if (lastdirent) {
382 put_user(file->f_pos, &lastdirent->d_off);
383 error = cnt - buf.count;
384 }
385
386out_putf:
387 fput(file);
388out:
389 return error;
390}
391
392/* Old sunos getdirentries, severely broken compatibility stuff here. */
393struct sunos_direntry {
394 unsigned long d_ino;
395 unsigned short d_reclen;
396 unsigned short d_namlen;
397 char d_name[1];
398};
399
400struct sunos_direntry_callback {
401 struct sunos_direntry __user *curr;
402 struct sunos_direntry __user *previous;
403 int count;
404 int error;
405};
406
407static int sunos_filldirentry(void * __buf, const char * name, int namlen,
408 loff_t offset, ino_t ino, unsigned int d_type)
409{
410 struct sunos_direntry __user *dirent;
411 struct sunos_direntry_callback *buf = __buf;
412 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
413
414 buf->error = -EINVAL; /* only used if we fail.. */
415 if (reclen > buf->count)
416 return -EINVAL;
417 dirent = buf->previous;
418 dirent = buf->curr;
419 buf->previous = dirent;
420 put_user(ino, &dirent->d_ino);
421 put_user(namlen, &dirent->d_namlen);
422 put_user(reclen, &dirent->d_reclen);
423 copy_to_user(dirent->d_name, name, namlen);
424 put_user(0, dirent->d_name + namlen);
425 dirent = (void __user *) dirent + reclen;
426 buf->curr = dirent;
427 buf->count -= reclen;
428 return 0;
429}
430
431asmlinkage int sunos_getdirentries(unsigned int fd, void __user *dirent,
432 int cnt, unsigned int __user *basep)
433{
434 struct file * file;
435 struct sunos_direntry __user *lastdirent;
436 struct sunos_direntry_callback buf;
437 int error = -EBADF;
438
439 if (fd >= SUNOS_NR_OPEN)
440 goto out;
441
442 file = fget(fd);
443 if (!file)
444 goto out;
445
446 error = -EINVAL;
447 if (cnt < (sizeof(struct sunos_direntry) + 255))
448 goto out_putf;
449
450 buf.curr = (struct sunos_direntry __user *) dirent;
451 buf.previous = NULL;
452 buf.count = cnt;
453 buf.error = 0;
454
455 error = vfs_readdir(file, sunos_filldirentry, &buf);
456 if (error < 0)
457 goto out_putf;
458
459 lastdirent = buf.previous;
460 error = buf.error;
461 if (lastdirent) {
462 put_user(file->f_pos, basep);
463 error = cnt - buf.count;
464 }
465
466out_putf:
467 fput(file);
468out:
469 return error;
470}
471
472struct sunos_utsname {
473 char sname[9];
474 char nname[9];
475 char nnext[56];
476 char rel[9];
477 char ver[9];
478 char mach[9];
479};
480
481asmlinkage int sunos_uname(struct sunos_utsname __user *name)
482{
483 int ret;
484 down_read(&uts_sem);
485 ret = copy_to_user(&name->sname[0], &system_utsname.sysname[0], sizeof(name->sname) - 1);
486 if (!ret) {
487 ret |= __copy_to_user(&name->nname[0], &system_utsname.nodename[0], sizeof(name->nname) - 1);
488 ret |= __put_user('\0', &name->nname[8]);
489 ret |= __copy_to_user(&name->rel[0], &system_utsname.release[0], sizeof(name->rel) - 1);
490 ret |= __copy_to_user(&name->ver[0], &system_utsname.version[0], sizeof(name->ver) - 1);
491 ret |= __copy_to_user(&name->mach[0], &system_utsname.machine[0], sizeof(name->mach) - 1);
492 }
493 up_read(&uts_sem);
494 return ret ? -EFAULT : 0;
495}
496
497asmlinkage int sunos_nosys(void)
498{
499 struct pt_regs *regs;
500 siginfo_t info;
501 static int cnt;
502
503 lock_kernel();
504 regs = current->thread.kregs;
505 info.si_signo = SIGSYS;
506 info.si_errno = 0;
507 info.si_code = __SI_FAULT|0x100;
508 info.si_addr = (void __user *)regs->pc;
509 info.si_trapno = regs->u_regs[UREG_G1];
510 send_sig_info(SIGSYS, &info, current);
511 if (cnt++ < 4) {
512 printk("Process makes ni_syscall number %d, register dump:\n",
513 (int) regs->u_regs[UREG_G1]);
514 show_regs(regs);
515 }
516 unlock_kernel();
517 return -ENOSYS;
518}
519
520/* This is not a real and complete implementation yet, just to keep
521 * the easy SunOS binaries happy.
522 */
523asmlinkage int sunos_fpathconf(int fd, int name)
524{
525 int ret;
526
527 switch(name) {
528 case _PCONF_LINK:
529 ret = LINK_MAX;
530 break;
531 case _PCONF_CANON:
532 ret = MAX_CANON;
533 break;
534 case _PCONF_INPUT:
535 ret = MAX_INPUT;
536 break;
537 case _PCONF_NAME:
538 ret = NAME_MAX;
539 break;
540 case _PCONF_PATH:
541 ret = PATH_MAX;
542 break;
543 case _PCONF_PIPE:
544 ret = PIPE_BUF;
545 break;
546 case _PCONF_CHRESTRICT: /* XXX Investigate XXX */
547 ret = 1;
548 break;
549 case _PCONF_NOTRUNC: /* XXX Investigate XXX */
550 case _PCONF_VDISABLE:
551 ret = 0;
552 break;
553 default:
554 ret = -EINVAL;
555 break;
556 }
557 return ret;
558}
559
560asmlinkage int sunos_pathconf(char __user *path, int name)
561{
562 int ret;
563
564 ret = sunos_fpathconf(0, name); /* XXX cheese XXX */
565 return ret;
566}
567
568/* SunOS mount system call emulation */
569
570asmlinkage int sunos_select(int width, fd_set __user *inp, fd_set __user *outp,
571 fd_set __user *exp, struct timeval __user *tvp)
572{
573 int ret;
574
575 /* SunOS binaries expect that select won't change the tvp contents */
576 ret = sys_select (width, inp, outp, exp, tvp);
577 if (ret == -EINTR && tvp) {
578 time_t sec, usec;
579
580 __get_user(sec, &tvp->tv_sec);
581 __get_user(usec, &tvp->tv_usec);
582
583 if (sec == 0 && usec == 0)
584 ret = 0;
585 }
586 return ret;
587}
588
589asmlinkage void sunos_nop(void)
590{
591 return;
592}
593
594/* SunOS mount/umount. */
595#define SMNT_RDONLY 1
596#define SMNT_NOSUID 2
597#define SMNT_NEWTYPE 4
598#define SMNT_GRPID 8
599#define SMNT_REMOUNT 16
600#define SMNT_NOSUB 32
601#define SMNT_MULTI 64
602#define SMNT_SYS5 128
603
604struct sunos_fh_t {
605 char fh_data [NFS_FHSIZE];
606};
607
608struct sunos_nfs_mount_args {
609 struct sockaddr_in __user *addr; /* file server address */
610 struct nfs_fh __user *fh; /* File handle to be mounted */
611 int flags; /* flags */
612 int wsize; /* write size in bytes */
613 int rsize; /* read size in bytes */
614 int timeo; /* initial timeout in .1 secs */
615 int retrans; /* times to retry send */
616 char __user *hostname; /* server's hostname */
617 int acregmin; /* attr cache file min secs */
618 int acregmax; /* attr cache file max secs */
619 int acdirmin; /* attr cache dir min secs */
620 int acdirmax; /* attr cache dir max secs */
621 char __user *netname; /* server's netname */
622};
623
624
625/* Bind the socket on a local reserved port and connect it to the
626 * remote server. This on Linux/i386 is done by the mount program,
627 * not by the kernel.
628 */
629static int
630sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
631{
632 struct sockaddr_in local;
633 struct sockaddr_in server;
634 int try_port;
635 struct socket *socket;
636 struct inode *inode;
637 struct file *file;
638 int ret, result = 0;
639
640 file = fget(fd);
641 if (!file)
642 goto out;
643
644 inode = file->f_dentry->d_inode;
645
646 socket = SOCKET_I(inode);
647 local.sin_family = AF_INET;
648 local.sin_addr.s_addr = INADDR_ANY;
649
650 /* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
651 try_port = 1024;
652 do {
653 local.sin_port = htons (--try_port);
654 ret = socket->ops->bind(socket, (struct sockaddr*)&local,
655 sizeof(local));
656 } while (ret && try_port > (1024 / 2));
657
658 if (ret)
659 goto out_putf;
660
661 server.sin_family = AF_INET;
662 server.sin_addr = addr->sin_addr;
663 server.sin_port = NFS_PORT;
664
665 /* Call sys_connect */
666 ret = socket->ops->connect (socket, (struct sockaddr *) &server,
667 sizeof (server), file->f_flags);
668 if (ret >= 0)
669 result = 1;
670
671out_putf:
672 fput(file);
673out:
674 return result;
675}
676
677static int get_default (int value, int def_value)
678{
679 if (value)
680 return value;
681 else
682 return def_value;
683}
684
685static int sunos_nfs_mount(char *dir_name, int linux_flags, void __user *data)
686{
687 int server_fd, err;
688 char *the_name, *mount_page;
689 struct nfs_mount_data linux_nfs_mount;
690 struct sunos_nfs_mount_args sunos_mount;
691
692 /* Ok, here comes the fun part: Linux's nfs mount needs a
693 * socket connection to the server, but SunOS mount does not
694 * require this, so we use the information on the destination
695 * address to create a socket and bind it to a reserved
696 * port on this system
697 */
698 if (copy_from_user(&sunos_mount, data, sizeof(sunos_mount)))
699 return -EFAULT;
700
701 server_fd = sys_socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP);
702 if (server_fd < 0)
703 return -ENXIO;
704
705 if (copy_from_user(&linux_nfs_mount.addr,sunos_mount.addr,
706 sizeof(*sunos_mount.addr)) ||
707 copy_from_user(&linux_nfs_mount.root,sunos_mount.fh,
708 sizeof(*sunos_mount.fh))) {
709 sys_close (server_fd);
710 return -EFAULT;
711 }
712
713 if (!sunos_nfs_get_server_fd (server_fd, &linux_nfs_mount.addr)){
714 sys_close (server_fd);
715 return -ENXIO;
716 }
717
718 /* Now, bind it to a locally reserved port */
719 linux_nfs_mount.version = NFS_MOUNT_VERSION;
720 linux_nfs_mount.flags = sunos_mount.flags;
721 linux_nfs_mount.fd = server_fd;
722
723 linux_nfs_mount.rsize = get_default (sunos_mount.rsize, 8192);
724 linux_nfs_mount.wsize = get_default (sunos_mount.wsize, 8192);
725 linux_nfs_mount.timeo = get_default (sunos_mount.timeo, 10);
726 linux_nfs_mount.retrans = sunos_mount.retrans;
727
728 linux_nfs_mount.acregmin = sunos_mount.acregmin;
729 linux_nfs_mount.acregmax = sunos_mount.acregmax;
730 linux_nfs_mount.acdirmin = sunos_mount.acdirmin;
731 linux_nfs_mount.acdirmax = sunos_mount.acdirmax;
732
733 the_name = getname(sunos_mount.hostname);
734 if (IS_ERR(the_name))
735 return PTR_ERR(the_name);
736
737 strlcpy(linux_nfs_mount.hostname, the_name,
738 sizeof(linux_nfs_mount.hostname));
739 putname (the_name);
740
741 mount_page = (char *) get_zeroed_page(GFP_KERNEL);
742 if (!mount_page)
743 return -ENOMEM;
744
745 memcpy(mount_page, &linux_nfs_mount, sizeof(linux_nfs_mount));
746
747 err = do_mount("", dir_name, "nfs", linux_flags, mount_page);
748
749 free_page((unsigned long) mount_page);
750 return err;
751}
752
753asmlinkage int
754sunos_mount(char __user *type, char __user *dir, int flags, void __user *data)
755{
756 int linux_flags = 0;
757 int ret = -EINVAL;
758 char *dev_fname = NULL;
759 char *dir_page, *type_page;
760
761 if (!capable (CAP_SYS_ADMIN))
762 return -EPERM;
763
764 lock_kernel();
765 /* We don't handle the integer fs type */
766 if ((flags & SMNT_NEWTYPE) == 0)
767 goto out;
768
769 /* Do not allow for those flags we don't support */
770 if (flags & (SMNT_GRPID|SMNT_NOSUB|SMNT_MULTI|SMNT_SYS5))
771 goto out;
772
773 if (flags & SMNT_REMOUNT)
774 linux_flags |= MS_REMOUNT;
775 if (flags & SMNT_RDONLY)
776 linux_flags |= MS_RDONLY;
777 if (flags & SMNT_NOSUID)
778 linux_flags |= MS_NOSUID;
779
780 dir_page = getname(dir);
781 ret = PTR_ERR(dir_page);
782 if (IS_ERR(dir_page))
783 goto out;
784
785 type_page = getname(type);
786 ret = PTR_ERR(type_page);
787 if (IS_ERR(type_page))
788 goto out1;
789
790 if (strcmp(type_page, "ext2") == 0) {
791 dev_fname = getname(data);
792 } else if (strcmp(type_page, "iso9660") == 0) {
793 dev_fname = getname(data);
794 } else if (strcmp(type_page, "minix") == 0) {
795 dev_fname = getname(data);
796 } else if (strcmp(type_page, "nfs") == 0) {
797 ret = sunos_nfs_mount (dir_page, flags, data);
798 goto out2;
799 } else if (strcmp(type_page, "ufs") == 0) {
800 printk("Warning: UFS filesystem mounts unsupported.\n");
801 ret = -ENODEV;
802 goto out2;
803 } else if (strcmp(type_page, "proc")) {
804 ret = -ENODEV;
805 goto out2;
806 }
807 ret = PTR_ERR(dev_fname);
808 if (IS_ERR(dev_fname))
809 goto out2;
810 ret = do_mount(dev_fname, dir_page, type_page, linux_flags, NULL);
811 if (dev_fname)
812 putname(dev_fname);
813out2:
814 putname(type_page);
815out1:
816 putname(dir_page);
817out:
818 unlock_kernel();
819 return ret;
820}
821
822
823asmlinkage int sunos_setpgrp(pid_t pid, pid_t pgid)
824{
825 int ret;
826
827 /* So stupid... */
828 if ((!pid || pid == current->pid) &&
829 !pgid) {
830 sys_setsid();
831 ret = 0;
832 } else {
833 ret = sys_setpgid(pid, pgid);
834 }
835 return ret;
836}
837
838/* So stupid... */
839asmlinkage int sunos_wait4(pid_t pid, unsigned int __user *stat_addr,
840 int options, struct rusage __user*ru)
841{
842 int ret;
843
844 ret = sys_wait4((pid ? pid : -1), stat_addr, options, ru);
845 return ret;
846}
847
848extern int kill_pg(int, int, int);
849asmlinkage int sunos_killpg(int pgrp, int sig)
850{
851 int ret;
852
853 lock_kernel();
854 ret = kill_pg(pgrp, sig, 0);
855 unlock_kernel();
856 return ret;
857}
858
859asmlinkage int sunos_audit(void)
860{
861 lock_kernel();
862 printk ("sys_audit\n");
863 unlock_kernel();
864 return -1;
865}
866
867asmlinkage unsigned long sunos_gethostid(void)
868{
869 unsigned long ret;
870
871 lock_kernel();
872 ret = ((unsigned long)idprom->id_machtype << 24) |
873 (unsigned long)idprom->id_sernum;
874 unlock_kernel();
875 return ret;
876}
877
878/* sysconf options, for SunOS compatibility */
879#define _SC_ARG_MAX 1
880#define _SC_CHILD_MAX 2
881#define _SC_CLK_TCK 3
882#define _SC_NGROUPS_MAX 4
883#define _SC_OPEN_MAX 5
884#define _SC_JOB_CONTROL 6
885#define _SC_SAVED_IDS 7
886#define _SC_VERSION 8
887
888asmlinkage long sunos_sysconf (int name)
889{
890 long ret;
891
892 switch (name){
893 case _SC_ARG_MAX:
894 ret = ARG_MAX;
895 break;
896 case _SC_CHILD_MAX:
897 ret = CHILD_MAX;
898 break;
899 case _SC_CLK_TCK:
900 ret = HZ;
901 break;
902 case _SC_NGROUPS_MAX:
903 ret = NGROUPS_MAX;
904 break;
905 case _SC_OPEN_MAX:
906 ret = OPEN_MAX;
907 break;
908 case _SC_JOB_CONTROL:
909 ret = 1; /* yes, we do support job control */
910 break;
911 case _SC_SAVED_IDS:
912 ret = 1; /* yes, we do support saved uids */
913 break;
914 case _SC_VERSION:
915 /* mhm, POSIX_VERSION is in /usr/include/unistd.h
916 * should it go on /usr/include/linux?
917 */
918 ret = 199009L;
919 break;
920 default:
921 ret = -1;
922 break;
923 };
924 return ret;
925}
926
927asmlinkage int sunos_semsys(int op, unsigned long arg1, unsigned long arg2,
928 unsigned long arg3, void *ptr)
929{
930 union semun arg4;
931 int ret;
932
933 switch (op) {
934 case 0:
935 /* Most arguments match on a 1:1 basis but cmd doesn't */
936 switch(arg3) {
937 case 4:
938 arg3=GETPID; break;
939 case 5:
940 arg3=GETVAL; break;
941 case 6:
942 arg3=GETALL; break;
943 case 3:
944 arg3=GETNCNT; break;
945 case 7:
946 arg3=GETZCNT; break;
947 case 8:
948 arg3=SETVAL; break;
949 case 9:
950 arg3=SETALL; break;
951 }
952 /* sys_semctl(): */
953 /* value to modify semaphore to */
954 arg4.__pad = (void __user *) ptr;
955 ret = sys_semctl((int)arg1, (int)arg2, (int)arg3, arg4 );
956 break;
957 case 1:
958 /* sys_semget(): */
959 ret = sys_semget((key_t)arg1, (int)arg2, (int)arg3);
960 break;
961 case 2:
962 /* sys_semop(): */
963 ret = sys_semop((int)arg1, (struct sembuf __user *)arg2, (unsigned)arg3);
964 break;
965 default:
966 ret = -EINVAL;
967 break;
968 };
969 return ret;
970}
971
972asmlinkage int sunos_msgsys(int op, unsigned long arg1, unsigned long arg2,
973 unsigned long arg3, unsigned long arg4)
974{
975 struct sparc_stackf *sp;
976 unsigned long arg5;
977 int rval;
978
979 switch(op) {
980 case 0:
981 rval = sys_msgget((key_t)arg1, (int)arg2);
982 break;
983 case 1:
984 rval = sys_msgctl((int)arg1, (int)arg2,
985 (struct msqid_ds __user *)arg3);
986 break;
987 case 2:
988 lock_kernel();
989 sp = (struct sparc_stackf *)current->thread.kregs->u_regs[UREG_FP];
990 arg5 = sp->xxargs[0];
991 unlock_kernel();
992 rval = sys_msgrcv((int)arg1, (struct msgbuf __user *)arg2,
993 (size_t)arg3, (long)arg4, (int)arg5);
994 break;
995 case 3:
996 rval = sys_msgsnd((int)arg1, (struct msgbuf __user *)arg2,
997 (size_t)arg3, (int)arg4);
998 break;
999 default:
1000 rval = -EINVAL;
1001 break;
1002 }
1003 return rval;
1004}
1005
1006asmlinkage int sunos_shmsys(int op, unsigned long arg1, unsigned long arg2,
1007 unsigned long arg3)
1008{
1009 unsigned long raddr;
1010 int rval;
1011
1012 switch(op) {
1013 case 0:
1014 /* do_shmat(): attach a shared memory area */
1015 rval = do_shmat((int)arg1,(char __user *)arg2,(int)arg3,&raddr);
1016 if (!rval)
1017 rval = (int) raddr;
1018 break;
1019 case 1:
1020 /* sys_shmctl(): modify shared memory area attr. */
1021 rval = sys_shmctl((int)arg1,(int)arg2,(struct shmid_ds __user *)arg3);
1022 break;
1023 case 2:
1024 /* sys_shmdt(): detach a shared memory area */
1025 rval = sys_shmdt((char __user *)arg1);
1026 break;
1027 case 3:
1028 /* sys_shmget(): get a shared memory area */
1029 rval = sys_shmget((key_t)arg1,(int)arg2,(int)arg3);
1030 break;
1031 default:
1032 rval = -EINVAL;
1033 break;
1034 };
1035 return rval;
1036}
1037
1038#define SUNOS_EWOULDBLOCK 35
1039
1040/* see the sunos man page read(2v) for an explanation
1041 of this garbage. We use O_NDELAY to mark
1042 file descriptors that have been set non-blocking
1043 using 4.2BSD style calls. (tridge) */
1044
1045static inline int check_nonblock(int ret, int fd)
1046{
1047 if (ret == -EAGAIN) {
1048 struct file * file = fget(fd);
1049 if (file) {
1050 if (file->f_flags & O_NDELAY)
1051 ret = -SUNOS_EWOULDBLOCK;
1052 fput(file);
1053 }
1054 }
1055 return ret;
1056}
1057
1058asmlinkage int sunos_read(unsigned int fd, char __user *buf, int count)
1059{
1060 int ret;
1061
1062 ret = check_nonblock(sys_read(fd,buf,count),fd);
1063 return ret;
1064}
1065
1066asmlinkage int sunos_readv(unsigned long fd, const struct iovec __user *vector,
1067 long count)
1068{
1069 int ret;
1070
1071 ret = check_nonblock(sys_readv(fd,vector,count),fd);
1072 return ret;
1073}
1074
1075asmlinkage int sunos_write(unsigned int fd, char __user *buf, int count)
1076{
1077 int ret;
1078
1079 ret = check_nonblock(sys_write(fd,buf,count),fd);
1080 return ret;
1081}
1082
1083asmlinkage int sunos_writev(unsigned long fd,
1084 const struct iovec __user *vector, long count)
1085{
1086 int ret;
1087
1088 ret = check_nonblock(sys_writev(fd,vector,count),fd);
1089 return ret;
1090}
1091
1092asmlinkage int sunos_recv(int fd, void __user *ubuf, int size, unsigned flags)
1093{
1094 int ret;
1095
1096 ret = check_nonblock(sys_recv(fd,ubuf,size,flags),fd);
1097 return ret;
1098}
1099
1100asmlinkage int sunos_send(int fd, void __user *buff, int len, unsigned flags)
1101{
1102 int ret;
1103
1104 ret = check_nonblock(sys_send(fd,buff,len,flags),fd);
1105 return ret;
1106}
1107
1108asmlinkage int sunos_accept(int fd, struct sockaddr __user *sa,
1109 int __user *addrlen)
1110{
1111 int ret;
1112
1113 while (1) {
1114 ret = check_nonblock(sys_accept(fd,sa,addrlen),fd);
1115 if (ret != -ENETUNREACH && ret != -EHOSTUNREACH)
1116 break;
1117 }
1118
1119 return ret;
1120}
1121
1122#define SUNOS_SV_INTERRUPT 2
1123
1124asmlinkage int
1125sunos_sigaction(int sig, const struct old_sigaction __user *act,
1126 struct old_sigaction __user *oact)
1127{
1128 struct k_sigaction new_ka, old_ka;
1129 int ret;
1130
1131 if (act) {
1132 old_sigset_t mask;
1133
1134 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
1135 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
1136 __get_user(new_ka.sa.sa_flags, &act->sa_flags))
1137 return -EFAULT;
1138 __get_user(mask, &act->sa_mask);
1139 new_ka.sa.sa_restorer = NULL;
1140 new_ka.ka_restorer = NULL;
1141 siginitset(&new_ka.sa.sa_mask, mask);
1142 new_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
1143 }
1144
1145 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
1146
1147 if (!ret && oact) {
1148 /* In the clone() case we could copy half consistent
1149 * state to the user, however this could sleep and
1150 * deadlock us if we held the signal lock on SMP. So for
1151 * now I take the easy way out and do no locking.
1152 * But then again we don't support SunOS lwp's anyways ;-)
1153 */
1154 old_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
1155 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
1156 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
1157 __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
1158 return -EFAULT;
1159 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
1160 }
1161
1162 return ret;
1163}
1164
1165
1166asmlinkage int sunos_setsockopt(int fd, int level, int optname,
1167 char __user *optval, int optlen)
1168{
1169 int tr_opt = optname;
1170 int ret;
1171
1172 if (level == SOL_IP) {
1173 /* Multicast socketopts (ttl, membership) */
1174 if (tr_opt >=2 && tr_opt <= 6)
1175 tr_opt += 30;
1176 }
1177 ret = sys_setsockopt(fd, level, tr_opt, optval, optlen);
1178 return ret;
1179}
1180
1181asmlinkage int sunos_getsockopt(int fd, int level, int optname,
1182 char __user *optval, int __user *optlen)
1183{
1184 int tr_opt = optname;
1185 int ret;
1186
1187 if (level == SOL_IP) {
1188 /* Multicast socketopts (ttl, membership) */
1189 if (tr_opt >=2 && tr_opt <= 6)
1190 tr_opt += 30;
1191 }
1192 ret = sys_getsockopt(fd, level, tr_opt, optval, optlen);
1193 return ret;
1194}
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
new file mode 100644
index 000000000000..928ffeb0fabb
--- /dev/null
+++ b/arch/sparc/kernel/systbls.S
@@ -0,0 +1,186 @@
1/* $Id: systbls.S,v 1.103 2002/02/08 03:57:14 davem Exp $
2 * systbls.S: System call entry point tables for OS compatibility.
3 * The native Linux system call table lives here also.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * Based upon preliminary work which is:
8 *
9 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
10 */
11
12#include <linux/config.h>
13
14 .data
15 .align 4
16
17 /* First, the Linux native syscall table. */
18
19 .globl sys_call_table
20sys_call_table:
21/*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write
22/*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link
23/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
24/*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek
25/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
26/*25*/ .long sys_time, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
27/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
28/*35*/ .long sys_chown, sys_sync, sys_kill, sys_newstat, sys_sendfile
29/*40*/ .long sys_newlstat, sys_dup, sys_pipe, sys_times, sys_getuid
30/*45*/ .long sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
31/*50*/ .long sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, sys_ioctl
32/*55*/ .long sys_reboot, sys_mmap2, sys_symlink, sys_readlink, sys_execve
33/*60*/ .long sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
34/*65*/ .long sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_geteuid
35/*70*/ .long sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
36/*75*/ .long sys_madvise, sys_vhangup, sys_truncate64, sys_mincore, sys_getgroups16
37/*80*/ .long sys_setgroups16, sys_getpgrp, sys_setgroups, sys_setitimer, sys_ftruncate64
38/*85*/ .long sys_swapon, sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
39/*90*/ .long sys_dup2, sys_setfsuid, sys_fcntl, sys_select, sys_setfsgid
40/*95*/ .long sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
41/*100*/ .long sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
42/*105*/ .long sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
43/*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
44/*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_nis_syscall, sys_getcwd
45/*120*/ .long sys_readv, sys_writev, sys_settimeofday, sys_fchown16, sys_fchmod
46/*125*/ .long sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
47/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
48/*135*/ .long sys_nis_syscall, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
49/*140*/ .long sys_sendfile64, sys_nis_syscall, sys_futex, sys_gettid, sys_getrlimit
50/*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
51/*150*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
52/*155*/ .long sys_fcntl64, sys_ni_syscall, sys_statfs, sys_fstatfs, sys_oldumount
53/*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
54/*165*/ .long sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
55/*170*/ .long sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
56/*175*/ .long sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
57/*180*/ .long sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_sigpending, sys_ni_syscall
58/*185*/ .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname
59/*190*/ .long sys_init_module, sys_personality, sparc_remap_file_pages, sys_epoll_create, sys_epoll_ctl
60/*195*/ .long sys_epoll_wait, sys_nis_syscall, sys_getppid, sparc_sigaction, sys_sgetmask
61/*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, old_readdir
62/*205*/ .long sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
63/*210*/ .long sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
64/*215*/ .long sys_ipc, sys_sigreturn, sys_clone, sys_nis_syscall, sys_adjtimex
65/*220*/ .long sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
66/*225*/ .long sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16
67/*230*/ .long sys_select, sys_time, sys_nis_syscall, sys_stime, sys_statfs64
68 /* "We are the Knights of the Forest of Ni!!" */
69/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
70/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
71/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
72/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
73/*255*/ .long sys_nis_syscall, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
74/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
75/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
76/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
77/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
78/*280*/ .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
79
80#ifdef CONFIG_SUNOS_EMUL
81 /* Now the SunOS syscall table. */
82
83 .align 4
84 .globl sunos_sys_table
85sunos_sys_table:
86/*0*/ .long sunos_indir, sys_exit, sys_fork
87 .long sunos_read, sunos_write, sys_open
88 .long sys_close, sunos_wait4, sys_creat
89 .long sys_link, sys_unlink, sunos_execv
90 .long sys_chdir, sunos_nosys, sys_mknod
91 .long sys_chmod, sys_lchown16, sunos_brk
92 .long sunos_nosys, sys_lseek, sunos_getpid
93 .long sunos_nosys, sunos_nosys, sunos_nosys
94 .long sunos_getuid, sunos_nosys, sys_ptrace
95 .long sunos_nosys, sunos_nosys, sunos_nosys
96 .long sunos_nosys, sunos_nosys, sunos_nosys
97 .long sys_access, sunos_nosys, sunos_nosys
98 .long sys_sync, sys_kill, sys_newstat
99 .long sunos_nosys, sys_newlstat, sys_dup
100 .long sys_pipe, sunos_nosys, sunos_nosys
101 .long sunos_nosys, sunos_nosys, sunos_getgid
102 .long sunos_nosys, sunos_nosys
103/*50*/ .long sunos_nosys, sys_acct, sunos_nosys
104 .long sunos_mctl, sunos_ioctl, sys_reboot
105 .long sunos_nosys, sys_symlink, sys_readlink
106 .long sys_execve, sys_umask, sys_chroot
107 .long sys_newfstat, sunos_nosys, sys_getpagesize
108 .long sys_msync, sys_vfork, sunos_nosys
109 .long sunos_nosys, sunos_sbrk, sunos_sstk
110 .long sunos_mmap, sunos_vadvise, sys_munmap
111 .long sys_mprotect, sys_madvise, sys_vhangup
112 .long sunos_nosys, sys_mincore, sys_getgroups16
113 .long sys_setgroups16, sys_getpgrp, sunos_setpgrp
114 .long sys_setitimer, sunos_nosys, sys_swapon
115 .long sys_getitimer, sys_gethostname, sys_sethostname
116 .long sunos_getdtablesize, sys_dup2, sunos_nop
117 .long sys_fcntl, sunos_select, sunos_nop
118 .long sys_fsync, sys_setpriority, sys_socket
119 .long sys_connect, sunos_accept
120/*100*/ .long sys_getpriority, sunos_send, sunos_recv
121 .long sunos_nosys, sys_bind, sunos_setsockopt
122 .long sys_listen, sunos_nosys, sunos_sigaction
123 .long sunos_sigblock, sunos_sigsetmask, sys_sigpause
124 .long sys_sigstack, sys_recvmsg, sys_sendmsg
125 .long sunos_nosys, sys_gettimeofday, sys_getrusage
126 .long sunos_getsockopt, sunos_nosys, sunos_readv
127 .long sunos_writev, sys_settimeofday, sys_fchown16
128 .long sys_fchmod, sys_recvfrom, sys_setreuid16
129 .long sys_setregid16, sys_rename, sys_truncate
130 .long sys_ftruncate, sys_flock, sunos_nosys
131 .long sys_sendto, sys_shutdown, sys_socketpair
132 .long sys_mkdir, sys_rmdir, sys_utimes
133 .long sys_sigreturn, sunos_nosys, sys_getpeername
134 .long sunos_gethostid, sunos_nosys, sys_getrlimit
135 .long sys_setrlimit, sunos_killpg, sunos_nosys
136 .long sunos_nosys, sunos_nosys
137/*150*/ .long sys_getsockname, sunos_nosys, sunos_nosys
138 .long sys_poll, sunos_nosys, sunos_nosys
139 .long sunos_getdirentries, sys_statfs, sys_fstatfs
140 .long sys_oldumount, sunos_nosys, sunos_nosys
141 .long sys_getdomainname, sys_setdomainname
142 .long sunos_nosys, sys_quotactl, sunos_nosys
143 .long sunos_mount, sys_ustat, sunos_semsys
144 .long sunos_msgsys, sunos_shmsys, sunos_audit
145 .long sunos_nosys, sunos_getdents, sys_setsid
146 .long sys_fchdir, sunos_nosys, sunos_nosys
147 .long sunos_nosys, sunos_nosys, sunos_nosys
148 .long sunos_nosys, sys_sigpending, sunos_nosys
149 .long sys_setpgid, sunos_pathconf, sunos_fpathconf
150 .long sunos_sysconf, sunos_uname, sunos_nosys
151 .long sunos_nosys, sunos_nosys, sunos_nosys
152 .long sunos_nosys, sunos_nosys, sunos_nosys
153 .long sunos_nosys, sunos_nosys, sunos_nosys
154/*200*/ .long sunos_nosys, sunos_nosys, sunos_nosys
155 .long sunos_nosys, sunos_nosys, sunos_nosys
156 .long sunos_nosys, sunos_nosys, sunos_nosys
157 .long sunos_nosys, sunos_nosys, sunos_nosys
158 .long sunos_nosys, sunos_nosys, sunos_nosys
159 .long sunos_nosys, sunos_nosys, sunos_nosys
160 .long sunos_nosys, sunos_nosys, sunos_nosys
161 .long sunos_nosys, sunos_nosys, sunos_nosys
162 .long sunos_nosys, sunos_nosys, sunos_nosys
163 .long sunos_nosys, sunos_nosys, sunos_nosys
164 .long sunos_nosys, sunos_nosys, sunos_nosys
165 .long sunos_nosys, sunos_nosys, sunos_nosys
166 .long sunos_nosys, sunos_nosys, sunos_nosys
167 .long sunos_nosys, sunos_nosys, sunos_nosys
168 .long sunos_nosys, sunos_nosys, sunos_nosys
169 .long sunos_nosys, sunos_nosys, sunos_nosys
170 .long sunos_nosys, sunos_nosys
171/*250*/ .long sunos_nosys, sunos_nosys, sunos_nosys
172 .long sunos_nosys, sunos_nosys, sunos_nosys
173 .long sunos_nosys, sunos_nosys, sunos_nosys
174 .long sunos_nosys
175/*260*/ .long sunos_nosys, sunos_nosys, sunos_nosys
176 .long sunos_nosys, sunos_nosys, sunos_nosys
177 .long sunos_nosys, sunos_nosys, sunos_nosys
178 .long sunos_nosys
179/*270*/ .long sunos_nosys, sunos_nosys, sunos_nosys
180 .long sunos_nosys, sunos_nosys, sunos_nosys
181 .long sunos_nosys, sunos_nosys, sunos_nosys
182 .long sunos_nosys
183/*280*/ .long sunos_nosys, sunos_nosys, sunos_nosys
184 .long sunos_nosys
185
186#endif
diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c
new file mode 100644
index 000000000000..f476a5f4af6a
--- /dev/null
+++ b/arch/sparc/kernel/tadpole.c
@@ -0,0 +1,126 @@
1/* tadpole.c: Probing for the tadpole clock stopping h/w at boot time.
2 *
3 * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
4 */
5
6#include <linux/string.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/init.h>
10
11#include <asm/asi.h>
12#include <asm/oplib.h>
13#include <asm/io.h>
14
15#define MACIO_SCSI_CSR_ADDR 0x78400000
16#define MACIO_EN_DMA 0x00000200
17#define CLOCK_INIT_DONE 1
18
19static int clk_state;
20static volatile unsigned char *clk_ctrl;
21void (*cpu_pwr_save)(void);
22
23static inline unsigned int ldphys(unsigned int addr)
24{
25 unsigned long data;
26
27 __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
28 "=r" (data) :
29 "r" (addr), "i" (ASI_M_BYPASS));
30 return data;
31}
32
33static void clk_init(void)
34{
35 __asm__ __volatile__("mov 0x6c, %%g1\n\t"
36 "mov 0x4c, %%g2\n\t"
37 "mov 0xdf, %%g3\n\t"
38 "stb %%g1, [%0+3]\n\t"
39 "stb %%g2, [%0+3]\n\t"
40 "stb %%g3, [%0+3]\n\t" : :
41 "r" (clk_ctrl) :
42 "g1", "g2", "g3");
43}
44
45static void clk_slow(void)
46{
47 __asm__ __volatile__("mov 0xcc, %%g2\n\t"
48 "mov 0x4c, %%g3\n\t"
49 "mov 0xcf, %%g4\n\t"
50 "mov 0xdf, %%g5\n\t"
51 "stb %%g2, [%0+3]\n\t"
52 "stb %%g3, [%0+3]\n\t"
53 "stb %%g4, [%0+3]\n\t"
54 "stb %%g5, [%0+3]\n\t" : :
55 "r" (clk_ctrl) :
56 "g2", "g3", "g4", "g5");
57}
58
59/*
60 * Tadpole is guaranteed to be UP, using local_irq_save.
61 */
62static void tsu_clockstop(void)
63{
64 unsigned int mcsr;
65 unsigned long flags;
66
67 if (!clk_ctrl)
68 return;
69 if (!(clk_state & CLOCK_INIT_DONE)) {
70 local_irq_save(flags);
71 clk_init();
72 clk_state |= CLOCK_INIT_DONE; /* all done */
73 local_irq_restore(flags);
74 return;
75 }
76 if (!(clk_ctrl[2] & 1))
77 return; /* no speed up yet */
78
79 local_irq_save(flags);
80
81 /* if SCSI DMA in progress, don't slow clock */
82 mcsr = ldphys(MACIO_SCSI_CSR_ADDR);
83 if ((mcsr&MACIO_EN_DMA) != 0) {
84 local_irq_restore(flags);
85 return;
86 }
87 /* TODO... the minimum clock setting ought to increase the
88 * memory refresh interval..
89 */
90 clk_slow();
91 local_irq_restore(flags);
92}
93
94static void swift_clockstop(void)
95{
96 if (!clk_ctrl)
97 return;
98 clk_ctrl[0] = 0;
99}
100
101void __init clock_stop_probe(void)
102{
103 unsigned int node, clk_nd;
104 char name[20];
105
106 prom_getstring(prom_root_node, "name", name, sizeof(name));
107 if (strncmp(name, "Tadpole", 7))
108 return;
109 node = prom_getchild(prom_root_node);
110 node = prom_searchsiblings(node, "obio");
111 node = prom_getchild(node);
112 clk_nd = prom_searchsiblings(node, "clk-ctrl");
113 if (!clk_nd)
114 return;
115 printk("Clock Stopping h/w detected... ");
116 clk_ctrl = (char *) prom_getint(clk_nd, "address");
117 clk_state = 0;
118 if (name[10] == '\0') {
119 cpu_pwr_save = tsu_clockstop;
120 printk("enabled (S3)\n");
121 } else if ((name[10] == 'X') || (name[10] == 'G')) {
122 cpu_pwr_save = swift_clockstop;
123 printk("enabled (%s)\n",name+7);
124 } else
125 printk("disabled %s\n",name+7);
126}
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
new file mode 100644
index 000000000000..fd8005a3e6bd
--- /dev/null
+++ b/arch/sparc/kernel/tick14.c
@@ -0,0 +1,85 @@
1/* tick14.c
2 * linux/arch/sparc/kernel/tick14.c
3 *
4 * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
5 *
6 * This file handles the Sparc specific level14 ticker
7 * This is really useful for profiling OBP uses it for keyboard
8 * aborts and other stuff.
9 *
10 *
11 */
12#include <linux/errno.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/param.h>
16#include <linux/string.h>
17#include <linux/mm.h>
18#include <linux/timex.h>
19#include <linux/interrupt.h>
20
21#include <asm/oplib.h>
22#include <asm/segment.h>
23#include <asm/timer.h>
24#include <asm/mostek.h>
25#include <asm/system.h>
26#include <asm/irq.h>
27#include <asm/io.h>
28
29extern unsigned long lvl14_save[5];
30static unsigned long *linux_lvl14 = NULL;
31static unsigned long obp_lvl14[4];
32
33/*
34 * Call with timer IRQ closed.
35 * First time we do it with disable_irq, later prom code uses spin_lock_irq().
36 */
37void install_linux_ticker(void)
38{
39
40 if (!linux_lvl14)
41 return;
42 linux_lvl14[0] = lvl14_save[0];
43 linux_lvl14[1] = lvl14_save[1];
44 linux_lvl14[2] = lvl14_save[2];
45 linux_lvl14[3] = lvl14_save[3];
46}
47
48void install_obp_ticker(void)
49{
50
51 if (!linux_lvl14)
52 return;
53 linux_lvl14[0] = obp_lvl14[0];
54 linux_lvl14[1] = obp_lvl14[1];
55 linux_lvl14[2] = obp_lvl14[2];
56 linux_lvl14[3] = obp_lvl14[3];
57}
58
59void claim_ticker14(irqreturn_t (*handler)(int, void *, struct pt_regs *),
60 int irq_nr, unsigned int timeout )
61{
62 int cpu = smp_processor_id();
63
64 /* first we copy the obp handler instructions
65 */
66 disable_irq(irq_nr);
67 if (!handler)
68 return;
69
70 linux_lvl14 = (unsigned long *)lvl14_save[4];
71 obp_lvl14[0] = linux_lvl14[0];
72 obp_lvl14[1] = linux_lvl14[1];
73 obp_lvl14[2] = linux_lvl14[2];
74 obp_lvl14[3] = linux_lvl14[3];
75
76 if (!request_irq(irq_nr,
77 handler,
78 (SA_INTERRUPT | SA_STATIC_ALLOC),
79 "counter14",
80 NULL)) {
81 install_linux_ticker();
82 load_profile_irq(cpu, timeout);
83 enable_irq(irq_nr);
84 }
85}
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
new file mode 100644
index 000000000000..6486cbf2efe9
--- /dev/null
+++ b/arch/sparc/kernel/time.c
@@ -0,0 +1,641 @@
1/* $Id: time.c,v 1.60 2002/01/23 14:33:55 davem Exp $
2 * linux/arch/sparc/kernel/time.c
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
6 *
7 * Chris Davis (cdavis@cois.on.ca) 03/27/1998
8 * Added support for the intersil on the sun4/4200
9 *
10 * Gleb Raiko (rajko@mech.math.msu.su) 08/18/1998
11 * Support for MicroSPARC-IIep, PCI CPU.
12 *
13 * This file handles the Sparc specific time handling details.
14 *
15 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
16 * "A Kernel Model for Precision Timekeeping" by Dave Mills
17 */
18#include <linux/config.h>
19#include <linux/errno.h>
20#include <linux/module.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/param.h>
24#include <linux/string.h>
25#include <linux/mm.h>
26#include <linux/interrupt.h>
27#include <linux/time.h>
28#include <linux/timex.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/ioport.h>
32#include <linux/profile.h>
33
34#include <asm/oplib.h>
35#include <asm/segment.h>
36#include <asm/timer.h>
37#include <asm/mostek.h>
38#include <asm/system.h>
39#include <asm/irq.h>
40#include <asm/io.h>
41#include <asm/idprom.h>
42#include <asm/machines.h>
43#include <asm/sun4paddr.h>
44#include <asm/page.h>
45#include <asm/pcic.h>
46
47extern unsigned long wall_jiffies;
48
49u64 jiffies_64 = INITIAL_JIFFIES;
50
51EXPORT_SYMBOL(jiffies_64);
52
53DEFINE_SPINLOCK(rtc_lock);
54enum sparc_clock_type sp_clock_typ;
55DEFINE_SPINLOCK(mostek_lock);
56void __iomem *mstk48t02_regs = NULL;
57static struct mostek48t08 *mstk48t08_regs = NULL;
58static int set_rtc_mmss(unsigned long);
59static int sbus_do_settimeofday(struct timespec *tv);
60
61#ifdef CONFIG_SUN4
62struct intersil *intersil_clock;
63#define intersil_cmd(intersil_reg, intsil_cmd) intersil_reg->int_cmd_reg = \
64 (intsil_cmd)
65
66#define intersil_intr(intersil_reg, intsil_cmd) intersil_reg->int_intr_reg = \
67 (intsil_cmd)
68
69#define intersil_start(intersil_reg) intersil_cmd(intersil_reg, \
70 ( INTERSIL_START | INTERSIL_32K | INTERSIL_NORMAL | INTERSIL_24H |\
71 INTERSIL_INTR_ENABLE))
72
73#define intersil_stop(intersil_reg) intersil_cmd(intersil_reg, \
74 ( INTERSIL_STOP | INTERSIL_32K | INTERSIL_NORMAL | INTERSIL_24H |\
75 INTERSIL_INTR_ENABLE))
76
77#define intersil_read_intr(intersil_reg, towhere) towhere = \
78 intersil_reg->int_intr_reg
79
80#endif
81
82unsigned long profile_pc(struct pt_regs *regs)
83{
84 extern char __copy_user_begin[], __copy_user_end[];
85 extern char __atomic_begin[], __atomic_end[];
86 extern char __bzero_begin[], __bzero_end[];
87 extern char __bitops_begin[], __bitops_end[];
88
89 unsigned long pc = regs->pc;
90
91 if (in_lock_functions(pc) ||
92 (pc >= (unsigned long) __copy_user_begin &&
93 pc < (unsigned long) __copy_user_end) ||
94 (pc >= (unsigned long) __atomic_begin &&
95 pc < (unsigned long) __atomic_end) ||
96 (pc >= (unsigned long) __bzero_begin &&
97 pc < (unsigned long) __bzero_end) ||
98 (pc >= (unsigned long) __bitops_begin &&
99 pc < (unsigned long) __bitops_end))
100 pc = regs->u_regs[UREG_RETPC];
101 return pc;
102}
103
104__volatile__ unsigned int *master_l10_counter;
105__volatile__ unsigned int *master_l10_limit;
106
107/*
108 * timer_interrupt() needs to keep up the real-time clock,
109 * as well as call the "do_timer()" routine every clocktick
110 */
111
112#define TICK_SIZE (tick_nsec / 1000)
113
114irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
115{
116 /* last time the cmos clock got updated */
117 static long last_rtc_update;
118
119#ifndef CONFIG_SMP
120 profile_tick(CPU_PROFILING, regs);
121#endif
122
123 /* Protect counter clear so that do_gettimeoffset works */
124 write_seqlock(&xtime_lock);
125#ifdef CONFIG_SUN4
126 if((idprom->id_machtype == (SM_SUN4 | SM_4_260)) ||
127 (idprom->id_machtype == (SM_SUN4 | SM_4_110))) {
128 int temp;
129 intersil_read_intr(intersil_clock, temp);
130 /* re-enable the irq */
131 enable_pil_irq(10);
132 }
133#endif
134 clear_clock_irq();
135
136 do_timer(regs);
137#ifndef CONFIG_SMP
138 update_process_times(user_mode(regs));
139#endif
140
141
142 /* Determine when to update the Mostek clock. */
143 if ((time_status & STA_UNSYNC) == 0 &&
144 xtime.tv_sec > last_rtc_update + 660 &&
145 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
146 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
147 if (set_rtc_mmss(xtime.tv_sec) == 0)
148 last_rtc_update = xtime.tv_sec;
149 else
150 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
151 }
152 write_sequnlock(&xtime_lock);
153
154 return IRQ_HANDLED;
155}
156
157/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
158static void __init kick_start_clock(void)
159{
160 struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
161 unsigned char sec;
162 int i, count;
163
164 prom_printf("CLOCK: Clock was stopped. Kick start ");
165
166 spin_lock_irq(&mostek_lock);
167
168 /* Turn on the kick start bit to start the oscillator. */
169 regs->creg |= MSTK_CREG_WRITE;
170 regs->sec &= ~MSTK_STOP;
171 regs->hour |= MSTK_KICK_START;
172 regs->creg &= ~MSTK_CREG_WRITE;
173
174 spin_unlock_irq(&mostek_lock);
175
176 /* Delay to allow the clock oscillator to start. */
177 sec = MSTK_REG_SEC(regs);
178 for (i = 0; i < 3; i++) {
179 while (sec == MSTK_REG_SEC(regs))
180 for (count = 0; count < 100000; count++)
181 /* nothing */ ;
182 prom_printf(".");
183 sec = regs->sec;
184 }
185 prom_printf("\n");
186
187 spin_lock_irq(&mostek_lock);
188
189 /* Turn off kick start and set a "valid" time and date. */
190 regs->creg |= MSTK_CREG_WRITE;
191 regs->hour &= ~MSTK_KICK_START;
192 MSTK_SET_REG_SEC(regs,0);
193 MSTK_SET_REG_MIN(regs,0);
194 MSTK_SET_REG_HOUR(regs,0);
195 MSTK_SET_REG_DOW(regs,5);
196 MSTK_SET_REG_DOM(regs,1);
197 MSTK_SET_REG_MONTH(regs,8);
198 MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
199 regs->creg &= ~MSTK_CREG_WRITE;
200
201 spin_unlock_irq(&mostek_lock);
202
203 /* Ensure the kick start bit is off. If it isn't, turn it off. */
204 while (regs->hour & MSTK_KICK_START) {
205 prom_printf("CLOCK: Kick start still on!\n");
206
207 spin_lock_irq(&mostek_lock);
208 regs->creg |= MSTK_CREG_WRITE;
209 regs->hour &= ~MSTK_KICK_START;
210 regs->creg &= ~MSTK_CREG_WRITE;
211 spin_unlock_irq(&mostek_lock);
212 }
213
214 prom_printf("CLOCK: Kick start procedure successful.\n");
215}
216
217/* Return nonzero if the clock chip battery is low. */
218static __inline__ int has_low_battery(void)
219{
220 struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
221 unsigned char data1, data2;
222
223 spin_lock_irq(&mostek_lock);
224 data1 = regs->eeprom[0]; /* Read some data. */
225 regs->eeprom[0] = ~data1; /* Write back the complement. */
226 data2 = regs->eeprom[0]; /* Read back the complement. */
227 regs->eeprom[0] = data1; /* Restore the original value. */
228 spin_unlock_irq(&mostek_lock);
229
230 return (data1 == data2); /* Was the write blocked? */
231}
232
233/* Probe for the real time clock chip on Sun4 */
234static __inline__ void sun4_clock_probe(void)
235{
236#ifdef CONFIG_SUN4
237 int temp;
238 struct resource r;
239
240 memset(&r, 0, sizeof(r));
241 if( idprom->id_machtype == (SM_SUN4 | SM_4_330) ) {
242 sp_clock_typ = MSTK48T02;
243 r.start = sun4_clock_physaddr;
244 mstk48t02_regs = sbus_ioremap(&r, 0,
245 sizeof(struct mostek48t02), NULL);
246 mstk48t08_regs = NULL; /* To catch weirdness */
247 intersil_clock = NULL; /* just in case */
248
249 /* Kick start the clock if it is completely stopped. */
250 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
251 kick_start_clock();
252 } else if( idprom->id_machtype == (SM_SUN4 | SM_4_260)) {
253 /* intersil setup code */
254 printk("Clock: INTERSIL at %8x ",sun4_clock_physaddr);
255 sp_clock_typ = INTERSIL;
256 r.start = sun4_clock_physaddr;
257 intersil_clock = (struct intersil *)
258 sbus_ioremap(&r, 0, sizeof(*intersil_clock), "intersil");
259 mstk48t02_regs = 0; /* just be sure */
260 mstk48t08_regs = NULL; /* ditto */
261 /* initialise the clock */
262
263 intersil_intr(intersil_clock,INTERSIL_INT_100HZ);
264
265 intersil_start(intersil_clock);
266
267 intersil_read_intr(intersil_clock, temp);
268 while (!(temp & 0x80))
269 intersil_read_intr(intersil_clock, temp);
270
271 intersil_read_intr(intersil_clock, temp);
272 while (!(temp & 0x80))
273 intersil_read_intr(intersil_clock, temp);
274
275 intersil_stop(intersil_clock);
276
277 }
278#endif
279}
280
281/* Probe for the mostek real time clock chip. */
282static __inline__ void clock_probe(void)
283{
284 struct linux_prom_registers clk_reg[2];
285 char model[128];
286 register int node, cpuunit, bootbus;
287 struct resource r;
288
289 cpuunit = bootbus = 0;
290 memset(&r, 0, sizeof(r));
291
292 /* Determine the correct starting PROM node for the probe. */
293 node = prom_getchild(prom_root_node);
294 switch (sparc_cpu_model) {
295 case sun4c:
296 break;
297 case sun4m:
298 node = prom_getchild(prom_searchsiblings(node, "obio"));
299 break;
300 case sun4d:
301 node = prom_getchild(bootbus = prom_searchsiblings(prom_getchild(cpuunit = prom_searchsiblings(node, "cpu-unit")), "bootbus"));
302 break;
303 default:
304 prom_printf("CLOCK: Unsupported architecture!\n");
305 prom_halt();
306 }
307
308 /* Find the PROM node describing the real time clock. */
309 sp_clock_typ = MSTK_INVALID;
310 node = prom_searchsiblings(node,"eeprom");
311 if (!node) {
312 prom_printf("CLOCK: No clock found!\n");
313 prom_halt();
314 }
315
316 /* Get the model name and setup everything up. */
317 model[0] = '\0';
318 prom_getstring(node, "model", model, sizeof(model));
319 if (strcmp(model, "mk48t02") == 0) {
320 sp_clock_typ = MSTK48T02;
321 if (prom_getproperty(node, "reg", (char *) clk_reg, sizeof(clk_reg)) == -1) {
322 prom_printf("clock_probe: FAILED!\n");
323 prom_halt();
324 }
325 if (sparc_cpu_model == sun4d)
326 prom_apply_generic_ranges (bootbus, cpuunit, clk_reg, 1);
327 else
328 prom_apply_obio_ranges(clk_reg, 1);
329 /* Map the clock register io area read-only */
330 r.flags = clk_reg[0].which_io;
331 r.start = clk_reg[0].phys_addr;
332 mstk48t02_regs = sbus_ioremap(&r, 0,
333 sizeof(struct mostek48t02), "mk48t02");
334 mstk48t08_regs = NULL; /* To catch weirdness */
335 } else if (strcmp(model, "mk48t08") == 0) {
336 sp_clock_typ = MSTK48T08;
337 if(prom_getproperty(node, "reg", (char *) clk_reg,
338 sizeof(clk_reg)) == -1) {
339 prom_printf("clock_probe: FAILED!\n");
340 prom_halt();
341 }
342 if (sparc_cpu_model == sun4d)
343 prom_apply_generic_ranges (bootbus, cpuunit, clk_reg, 1);
344 else
345 prom_apply_obio_ranges(clk_reg, 1);
346 /* Map the clock register io area read-only */
347 /* XXX r/o attribute is somewhere in r.flags */
348 r.flags = clk_reg[0].which_io;
349 r.start = clk_reg[0].phys_addr;
350 mstk48t08_regs = (struct mostek48t08 *) sbus_ioremap(&r, 0,
351 sizeof(struct mostek48t08), "mk48t08");
352
353 mstk48t02_regs = &mstk48t08_regs->regs;
354 } else {
355 prom_printf("CLOCK: Unknown model name '%s'\n",model);
356 prom_halt();
357 }
358
359 /* Report a low battery voltage condition. */
360 if (has_low_battery())
361 printk(KERN_CRIT "NVRAM: Low battery voltage!\n");
362
363 /* Kick start the clock if it is completely stopped. */
364 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
365 kick_start_clock();
366}
367
368void __init sbus_time_init(void)
369{
370 unsigned int year, mon, day, hour, min, sec;
371 struct mostek48t02 *mregs;
372
373#ifdef CONFIG_SUN4
374 int temp;
375 struct intersil *iregs;
376#endif
377
378 BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM);
379 btfixup();
380
381 if (ARCH_SUN4)
382 sun4_clock_probe();
383 else
384 clock_probe();
385
386 sparc_init_timers(timer_interrupt);
387
388#ifdef CONFIG_SUN4
389 if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) {
390#endif
391 mregs = (struct mostek48t02 *)mstk48t02_regs;
392 if(!mregs) {
393 prom_printf("Something wrong, clock regs not mapped yet.\n");
394 prom_halt();
395 }
396 spin_lock_irq(&mostek_lock);
397 mregs->creg |= MSTK_CREG_READ;
398 sec = MSTK_REG_SEC(mregs);
399 min = MSTK_REG_MIN(mregs);
400 hour = MSTK_REG_HOUR(mregs);
401 day = MSTK_REG_DOM(mregs);
402 mon = MSTK_REG_MONTH(mregs);
403 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
404 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
405 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
406 set_normalized_timespec(&wall_to_monotonic,
407 -xtime.tv_sec, -xtime.tv_nsec);
408 mregs->creg &= ~MSTK_CREG_READ;
409 spin_unlock_irq(&mostek_lock);
410#ifdef CONFIG_SUN4
411 } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) {
412 /* initialise the intersil on sun4 */
413
414 iregs=intersil_clock;
415 if(!iregs) {
416 prom_printf("Something wrong, clock regs not mapped yet.\n");
417 prom_halt();
418 }
419
420 intersil_intr(intersil_clock,INTERSIL_INT_100HZ);
421 disable_pil_irq(10);
422 intersil_stop(iregs);
423 intersil_read_intr(intersil_clock, temp);
424
425 temp = iregs->clk.int_csec;
426
427 sec = iregs->clk.int_sec;
428 min = iregs->clk.int_min;
429 hour = iregs->clk.int_hour;
430 day = iregs->clk.int_day;
431 mon = iregs->clk.int_month;
432 year = MSTK_CVT_YEAR(iregs->clk.int_year);
433
434 enable_pil_irq(10);
435 intersil_start(iregs);
436
437 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
438 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
439 set_normalized_timespec(&wall_to_monotonic,
440 -xtime.tv_sec, -xtime.tv_nsec);
441 printk("%u/%u/%u %u:%u:%u\n",day,mon,year,hour,min,sec);
442 }
443#endif
444
445 /* Now that OBP ticker has been silenced, it is safe to enable IRQ. */
446 local_irq_enable();
447}
448
449void __init time_init(void)
450{
451#ifdef CONFIG_PCI
452 extern void pci_time_init(void);
453 if (pcic_present()) {
454 pci_time_init();
455 return;
456 }
457#endif
458 sbus_time_init();
459}
460
461extern __inline__ unsigned long do_gettimeoffset(void)
462{
463 return (*master_l10_counter >> 10) & 0x1fffff;
464}
465
466/*
467 * Returns nanoseconds
468 * XXX This is a suboptimal implementation.
469 */
470unsigned long long sched_clock(void)
471{
472 return (unsigned long long)jiffies * (1000000000 / HZ);
473}
474
475/* Ok, my cute asm atomicity trick doesn't work anymore.
476 * There are just too many variables that need to be protected
477 * now (both members of xtime, wall_jiffies, et al.)
478 */
479void do_gettimeofday(struct timeval *tv)
480{
481 unsigned long flags;
482 unsigned long seq;
483 unsigned long usec, sec;
484 unsigned long max_ntp_tick = tick_usec - tickadj;
485
486 do {
487 unsigned long lost;
488
489 seq = read_seqbegin_irqsave(&xtime_lock, flags);
490 usec = do_gettimeoffset();
491 lost = jiffies - wall_jiffies;
492
493 /*
494 * If time_adjust is negative then NTP is slowing the clock
495 * so make sure not to go into next possible interval.
496 * Better to lose some accuracy than have time go backwards..
497 */
498 if (unlikely(time_adjust < 0)) {
499 usec = min(usec, max_ntp_tick);
500
501 if (lost)
502 usec += lost * max_ntp_tick;
503 }
504 else if (unlikely(lost))
505 usec += lost * tick_usec;
506
507 sec = xtime.tv_sec;
508 usec += (xtime.tv_nsec / 1000);
509 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
510
511 while (usec >= 1000000) {
512 usec -= 1000000;
513 sec++;
514 }
515
516 tv->tv_sec = sec;
517 tv->tv_usec = usec;
518}
519
520EXPORT_SYMBOL(do_gettimeofday);
521
522int do_settimeofday(struct timespec *tv)
523{
524 int ret;
525
526 write_seqlock_irq(&xtime_lock);
527 ret = bus_do_settimeofday(tv);
528 write_sequnlock_irq(&xtime_lock);
529 clock_was_set();
530 return ret;
531}
532
533EXPORT_SYMBOL(do_settimeofday);
534
535static int sbus_do_settimeofday(struct timespec *tv)
536{
537 time_t wtm_sec, sec = tv->tv_sec;
538 long wtm_nsec, nsec = tv->tv_nsec;
539
540 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
541 return -EINVAL;
542
543 /*
544 * This is revolting. We need to set "xtime" correctly. However, the
545 * value in this location is the value at the most recent update of
546 * wall time. Discover what correction gettimeofday() would have
547 * made, and then undo it!
548 */
549 nsec -= 1000 * (do_gettimeoffset() +
550 (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ));
551
552 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
553 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
554
555 set_normalized_timespec(&xtime, sec, nsec);
556 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
557
558 time_adjust = 0; /* stop active adjtime() */
559 time_status |= STA_UNSYNC;
560 time_maxerror = NTP_PHASE_LIMIT;
561 time_esterror = NTP_PHASE_LIMIT;
562 return 0;
563}
564
565/*
566 * BUG: This routine does not handle hour overflow properly; it just
567 * sets the minutes. Usually you won't notice until after reboot!
568 */
569static int set_rtc_mmss(unsigned long nowtime)
570{
571 int real_seconds, real_minutes, mostek_minutes;
572 struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
573 unsigned long flags;
574#ifdef CONFIG_SUN4
575 struct intersil *iregs = intersil_clock;
576 int temp;
577#endif
578
579 /* Not having a register set can lead to trouble. */
580 if (!regs) {
581#ifdef CONFIG_SUN4
582 if(!iregs)
583 return -1;
584 else {
585 temp = iregs->clk.int_csec;
586
587 mostek_minutes = iregs->clk.int_min;
588
589 real_seconds = nowtime % 60;
590 real_minutes = nowtime / 60;
591 if (((abs(real_minutes - mostek_minutes) + 15)/30) & 1)
592 real_minutes += 30; /* correct for half hour time zone */
593 real_minutes %= 60;
594
595 if (abs(real_minutes - mostek_minutes) < 30) {
596 intersil_stop(iregs);
597 iregs->clk.int_sec=real_seconds;
598 iregs->clk.int_min=real_minutes;
599 intersil_start(iregs);
600 } else {
601 printk(KERN_WARNING
602 "set_rtc_mmss: can't update from %d to %d\n",
603 mostek_minutes, real_minutes);
604 return -1;
605 }
606
607 return 0;
608 }
609#endif
610 }
611
612 spin_lock_irqsave(&mostek_lock, flags);
613 /* Read the current RTC minutes. */
614 regs->creg |= MSTK_CREG_READ;
615 mostek_minutes = MSTK_REG_MIN(regs);
616 regs->creg &= ~MSTK_CREG_READ;
617
618 /*
619 * since we're only adjusting minutes and seconds,
620 * don't interfere with hour overflow. This avoids
621 * messing with unknown time zones but requires your
622 * RTC not to be off by more than 15 minutes
623 */
624 real_seconds = nowtime % 60;
625 real_minutes = nowtime / 60;
626 if (((abs(real_minutes - mostek_minutes) + 15)/30) & 1)
627 real_minutes += 30; /* correct for half hour time zone */
628 real_minutes %= 60;
629
630 if (abs(real_minutes - mostek_minutes) < 30) {
631 regs->creg |= MSTK_CREG_WRITE;
632 MSTK_SET_REG_SEC(regs,real_seconds);
633 MSTK_SET_REG_MIN(regs,real_minutes);
634 regs->creg &= ~MSTK_CREG_WRITE;
635 spin_unlock_irqrestore(&mostek_lock, flags);
636 return 0;
637 } else {
638 spin_unlock_irqrestore(&mostek_lock, flags);
639 return -1;
640 }
641}
diff --git a/arch/sparc/kernel/trampoline.S b/arch/sparc/kernel/trampoline.S
new file mode 100644
index 000000000000..2dcdaa1fd8cd
--- /dev/null
+++ b/arch/sparc/kernel/trampoline.S
@@ -0,0 +1,162 @@
1/* $Id: trampoline.S,v 1.14 2002/01/11 08:45:38 davem Exp $
2 * trampoline.S: SMP cpu boot-up trampoline code.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/init.h>
9#include <asm/head.h>
10#include <asm/psr.h>
11#include <asm/page.h>
12#include <asm/asi.h>
13#include <asm/ptrace.h>
14#include <asm/vaddrs.h>
15#include <asm/contregs.h>
16#include <asm/thread_info.h>
17
18 .globl sun4m_cpu_startup, __smp4m_processor_id
19 .globl sun4d_cpu_startup, __smp4d_processor_id
20
21 __INIT
22 .align 4
23
24/* When we start up a cpu for the first time it enters this routine.
25 * This initializes the chip from whatever state the prom left it
26 * in and sets PIL in %psr to 15, no irqs.
27 */
28
29sun4m_cpu_startup:
30cpu1_startup:
31 sethi %hi(trapbase_cpu1), %g3
32 b 1f
33 or %g3, %lo(trapbase_cpu1), %g3
34
35cpu2_startup:
36 sethi %hi(trapbase_cpu2), %g3
37 b 1f
38 or %g3, %lo(trapbase_cpu2), %g3
39
40cpu3_startup:
41 sethi %hi(trapbase_cpu3), %g3
42 b 1f
43 or %g3, %lo(trapbase_cpu3), %g3
44
451:
46 /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
47 set (PSR_PIL | PSR_S | PSR_PS), %g1
48 wr %g1, 0x0, %psr ! traps off though
49 WRITE_PAUSE
50
51 /* Our %wim is one behind CWP */
52 mov 2, %g1
53 wr %g1, 0x0, %wim
54 WRITE_PAUSE
55
56 /* This identifies "this cpu". */
57 wr %g3, 0x0, %tbr
58 WRITE_PAUSE
59
60 /* Give ourselves a stack and curptr. */
61 set current_set, %g5
62 srl %g3, 10, %g4
63 and %g4, 0xc, %g4
64 ld [%g5 + %g4], %g6
65
66 sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp
67 or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp
68 add %g6, %sp, %sp
69
70 /* Turn on traps (PSR_ET). */
71 rd %psr, %g1
72 wr %g1, PSR_ET, %psr ! traps on
73 WRITE_PAUSE
74
75 /* Init our caches, etc. */
76 set poke_srmmu, %g5
77 ld [%g5], %g5
78 call %g5
79 nop
80
81 /* Start this processor. */
82 call smp4m_callin
83 nop
84
85 b,a smp_do_cpu_idle
86
87 .text
88 .align 4
89
90smp_do_cpu_idle:
91 call cpu_idle
92 mov 0, %o0
93
94 call cpu_panic
95 nop
96
97__smp4m_processor_id:
98 rd %tbr, %g2
99 srl %g2, 12, %g2
100 and %g2, 3, %g2
101 retl
102 mov %g1, %o7
103
104__smp4d_processor_id:
105 lda [%g0] ASI_M_VIKING_TMP1, %g2
106 retl
107 mov %g1, %o7
108
109/* CPUID in bootbus can be found at PA 0xff0140000 */
110#define SUN4D_BOOTBUS_CPUID 0xf0140000
111
112 __INIT
113 .align 4
114
115sun4d_cpu_startup:
116 /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
117 set (PSR_PIL | PSR_S | PSR_PS), %g1
118 wr %g1, 0x0, %psr ! traps off though
119 WRITE_PAUSE
120
121 /* Our %wim is one behind CWP */
122 mov 2, %g1
123 wr %g1, 0x0, %wim
124 WRITE_PAUSE
125
126 /* Set tbr - we use just one trap table. */
127 set trapbase, %g1
128 wr %g1, 0x0, %tbr
129 WRITE_PAUSE
130
131 /* Get our CPU id out of bootbus */
132 set SUN4D_BOOTBUS_CPUID, %g3
133 lduba [%g3] ASI_M_CTL, %g3
134 and %g3, 0xf8, %g3
135 srl %g3, 3, %g1
136 sta %g1, [%g0] ASI_M_VIKING_TMP1
137
138 /* Give ourselves a stack and curptr. */
139 set current_set, %g5
140 srl %g3, 1, %g4
141 ld [%g5 + %g4], %g6
142
143 sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp
144 or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp
145 add %g6, %sp, %sp
146
147 /* Turn on traps (PSR_ET). */
148 rd %psr, %g1
149 wr %g1, PSR_ET, %psr ! traps on
150 WRITE_PAUSE
151
152 /* Init our caches, etc. */
153 set poke_srmmu, %g5
154 ld [%g5], %g5
155 call %g5
156 nop
157
158 /* Start this processor. */
159 call smp4d_callin
160 nop
161
162 b,a smp_do_cpu_idle
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
new file mode 100644
index 000000000000..3f451ae66482
--- /dev/null
+++ b/arch/sparc/kernel/traps.c
@@ -0,0 +1,515 @@
1/* $Id: traps.c,v 1.64 2000/09/03 15:00:49 anton Exp $
2 * arch/sparc/kernel/traps.c
3 *
4 * Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I hate traps on the sparc, grrr...
10 */
11
12#include <linux/config.h>
13#include <linux/sched.h> /* for jiffies */
14#include <linux/kernel.h>
15#include <linux/kallsyms.h>
16#include <linux/signal.h>
17#include <linux/smp.h>
18#include <linux/smp_lock.h>
19
20#include <asm/delay.h>
21#include <asm/system.h>
22#include <asm/ptrace.h>
23#include <asm/oplib.h>
24#include <asm/page.h>
25#include <asm/pgtable.h>
26#include <asm/kdebug.h>
27#include <asm/unistd.h>
28#include <asm/traps.h>
29
30/* #define TRAP_DEBUG */
31
32struct trap_trace_entry {
33 unsigned long pc;
34 unsigned long type;
35};
36
37int trap_curbuf = 0;
38struct trap_trace_entry trapbuf[1024];
39
40void syscall_trace_entry(struct pt_regs *regs)
41{
42 printk("%s[%d]: ", current->comm, current->pid);
43 printk("scall<%d> (could be %d)\n", (int) regs->u_regs[UREG_G1],
44 (int) regs->u_regs[UREG_I0]);
45}
46
47void syscall_trace_exit(struct pt_regs *regs)
48{
49}
50
51void sun4m_nmi(struct pt_regs *regs)
52{
53 unsigned long afsr, afar;
54
55 printk("Aieee: sun4m NMI received!\n");
56 /* XXX HyperSparc hack XXX */
57 __asm__ __volatile__("mov 0x500, %%g1\n\t"
58 "lda [%%g1] 0x4, %0\n\t"
59 "mov 0x600, %%g1\n\t"
60 "lda [%%g1] 0x4, %1\n\t" :
61 "=r" (afsr), "=r" (afar));
62 printk("afsr=%08lx afar=%08lx\n", afsr, afar);
63 printk("you lose buddy boy...\n");
64 show_regs(regs);
65 prom_halt();
66}
67
68void sun4d_nmi(struct pt_regs *regs)
69{
70 printk("Aieee: sun4d NMI received!\n");
71 printk("you lose buddy boy...\n");
72 show_regs(regs);
73 prom_halt();
74}
75
76void instruction_dump (unsigned long *pc)
77{
78 int i;
79
80 if((((unsigned long) pc) & 3))
81 return;
82
83 for(i = -3; i < 6; i++)
84 printk("%c%08lx%c",i?' ':'<',pc[i],i?' ':'>');
85 printk("\n");
86}
87
88#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
89#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
90
91void die_if_kernel(char *str, struct pt_regs *regs)
92{
93 static int die_counter;
94 int count = 0;
95
96 /* Amuse the user. */
97 printk(
98" \\|/ ____ \\|/\n"
99" \"@'/ ,. \\`@\"\n"
100" /_| \\__/ |_\\\n"
101" \\__U_/\n");
102
103 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
104 show_regs(regs);
105
106 __SAVE; __SAVE; __SAVE; __SAVE;
107 __SAVE; __SAVE; __SAVE; __SAVE;
108 __RESTORE; __RESTORE; __RESTORE; __RESTORE;
109 __RESTORE; __RESTORE; __RESTORE; __RESTORE;
110
111 {
112 struct reg_window *rw = (struct reg_window *)regs->u_regs[UREG_FP];
113
114 /* Stop the back trace when we hit userland or we
115 * find some badly aligned kernel stack. Set an upper
116 * bound in case our stack is trashed and we loop.
117 */
118 while(rw &&
119 count++ < 30 &&
120 (((unsigned long) rw) >= PAGE_OFFSET) &&
121 !(((unsigned long) rw) & 0x7)) {
122 printk("Caller[%08lx]", rw->ins[7]);
123 print_symbol(": %s\n", rw->ins[7]);
124 rw = (struct reg_window *)rw->ins[6];
125 }
126 }
127 printk("Instruction DUMP:");
128 instruction_dump ((unsigned long *) regs->pc);
129 if(regs->psr & PSR_PS)
130 do_exit(SIGKILL);
131 do_exit(SIGSEGV);
132}
133
134void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
135{
136 siginfo_t info;
137
138 if(type < 0x80) {
139 /* Sun OS's puke from bad traps, Linux survives! */
140 printk("Unimplemented Sparc TRAP, type = %02lx\n", type);
141 die_if_kernel("Whee... Hello Mr. Penguin", regs);
142 }
143
144 if(regs->psr & PSR_PS)
145 die_if_kernel("Kernel bad trap", regs);
146
147 info.si_signo = SIGILL;
148 info.si_errno = 0;
149 info.si_code = ILL_ILLTRP;
150 info.si_addr = (void __user *)regs->pc;
151 info.si_trapno = type - 0x80;
152 force_sig_info(SIGILL, &info, current);
153}
154
155void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
156 unsigned long psr)
157{
158 extern int do_user_muldiv (struct pt_regs *, unsigned long);
159 siginfo_t info;
160
161 if(psr & PSR_PS)
162 die_if_kernel("Kernel illegal instruction", regs);
163#ifdef TRAP_DEBUG
164 printk("Ill instr. at pc=%08lx instruction is %08lx\n",
165 regs->pc, *(unsigned long *)regs->pc);
166#endif
167 if (!do_user_muldiv (regs, pc))
168 return;
169
170 info.si_signo = SIGILL;
171 info.si_errno = 0;
172 info.si_code = ILL_ILLOPC;
173 info.si_addr = (void __user *)pc;
174 info.si_trapno = 0;
175 send_sig_info(SIGILL, &info, current);
176}
177
178void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
179 unsigned long psr)
180{
181 siginfo_t info;
182
183 if(psr & PSR_PS)
184 die_if_kernel("Penguin instruction from Penguin mode??!?!", regs);
185 info.si_signo = SIGILL;
186 info.si_errno = 0;
187 info.si_code = ILL_PRVOPC;
188 info.si_addr = (void __user *)pc;
189 info.si_trapno = 0;
190 send_sig_info(SIGILL, &info, current);
191}
192
193/* XXX User may want to be allowed to do this. XXX */
194
195void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned long npc,
196 unsigned long psr)
197{
198 siginfo_t info;
199
200 if(regs->psr & PSR_PS) {
201 printk("KERNEL MNA at pc %08lx npc %08lx called by %08lx\n", pc, npc,
202 regs->u_regs[UREG_RETPC]);
203 die_if_kernel("BOGUS", regs);
204 /* die_if_kernel("Kernel MNA access", regs); */
205 }
206#if 0
207 show_regs (regs);
208 instruction_dump ((unsigned long *) regs->pc);
209 printk ("do_MNA!\n");
210#endif
211 info.si_signo = SIGBUS;
212 info.si_errno = 0;
213 info.si_code = BUS_ADRALN;
214 info.si_addr = /* FIXME: Should dig out mna address */ (void *)0;
215 info.si_trapno = 0;
216 send_sig_info(SIGBUS, &info, current);
217}
218
219extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
220 void *fpqueue, unsigned long *fpqdepth);
221extern void fpload(unsigned long *fpregs, unsigned long *fsr);
222
223static unsigned long init_fsr = 0x0UL;
224static unsigned long init_fregs[32] __attribute__ ((aligned (8))) =
225 { ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
226 ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
227 ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
228 ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL };
229
230void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
231 unsigned long psr)
232{
233 /* Sanity check... */
234 if(psr & PSR_PS)
235 die_if_kernel("Kernel gets FloatingPenguinUnit disabled trap", regs);
236
237 put_psr(get_psr() | PSR_EF); /* Allow FPU ops. */
238 regs->psr |= PSR_EF;
239#ifndef CONFIG_SMP
240 if(last_task_used_math == current)
241 return;
242 if(last_task_used_math) {
243 /* Other processes fpu state, save away */
244 struct task_struct *fptask = last_task_used_math;
245 fpsave(&fptask->thread.float_regs[0], &fptask->thread.fsr,
246 &fptask->thread.fpqueue[0], &fptask->thread.fpqdepth);
247 }
248 last_task_used_math = current;
249 if(used_math()) {
250 fpload(&current->thread.float_regs[0], &current->thread.fsr);
251 } else {
252 /* Set initial sane state. */
253 fpload(&init_fregs[0], &init_fsr);
254 set_used_math();
255 }
256#else
257 if(!used_math()) {
258 fpload(&init_fregs[0], &init_fsr);
259 set_used_math();
260 } else {
261 fpload(&current->thread.float_regs[0], &current->thread.fsr);
262 }
263 current_thread_info()->flags |= _TIF_USEDFPU;
264#endif
265}
266
267static unsigned long fake_regs[32] __attribute__ ((aligned (8)));
268static unsigned long fake_fsr;
269static unsigned long fake_queue[32] __attribute__ ((aligned (8)));
270static unsigned long fake_depth;
271
272extern int do_mathemu(struct pt_regs *, struct task_struct *);
273
274void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
275 unsigned long psr)
276{
277 static int calls;
278 siginfo_t info;
279 unsigned long fsr;
280 int ret = 0;
281#ifndef CONFIG_SMP
282 struct task_struct *fpt = last_task_used_math;
283#else
284 struct task_struct *fpt = current;
285#endif
286 put_psr(get_psr() | PSR_EF);
287 /* If nobody owns the fpu right now, just clear the
288 * error into our fake static buffer and hope it don't
289 * happen again. Thank you crashme...
290 */
291#ifndef CONFIG_SMP
292 if(!fpt) {
293#else
294 if(!(fpt->thread_info->flags & _TIF_USEDFPU)) {
295#endif
296 fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth);
297 regs->psr &= ~PSR_EF;
298 return;
299 }
300 fpsave(&fpt->thread.float_regs[0], &fpt->thread.fsr,
301 &fpt->thread.fpqueue[0], &fpt->thread.fpqdepth);
302#ifdef DEBUG_FPU
303 printk("Hmm, FP exception, fsr was %016lx\n", fpt->thread.fsr);
304#endif
305
306 switch ((fpt->thread.fsr & 0x1c000)) {
307 /* switch on the contents of the ftt [floating point trap type] field */
308#ifdef DEBUG_FPU
309 case (1 << 14):
310 printk("IEEE_754_exception\n");
311 break;
312#endif
313 case (2 << 14): /* unfinished_FPop (underflow & co) */
314 case (3 << 14): /* unimplemented_FPop (quad stuff, maybe sqrt) */
315 ret = do_mathemu(regs, fpt);
316 break;
317#ifdef DEBUG_FPU
318 case (4 << 14):
319 printk("sequence_error (OS bug...)\n");
320 break;
321 case (5 << 14):
322 printk("hardware_error (uhoh!)\n");
323 break;
324 case (6 << 14):
325 printk("invalid_fp_register (user error)\n");
326 break;
327#endif /* DEBUG_FPU */
328 }
329 /* If we successfully emulated the FPop, we pretend the trap never happened :-> */
330 if (ret) {
331 fpload(&current->thread.float_regs[0], &current->thread.fsr);
332 return;
333 }
334 /* nope, better SIGFPE the offending process... */
335
336#ifdef CONFIG_SMP
337 fpt->thread_info->flags &= ~_TIF_USEDFPU;
338#endif
339 if(psr & PSR_PS) {
340 /* The first fsr store/load we tried trapped,
341 * the second one will not (we hope).
342 */
343 printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n",
344 regs->pc);
345 regs->pc = regs->npc;
346 regs->npc += 4;
347 calls++;
348 if(calls > 2)
349 die_if_kernel("Too many Penguin-FPU traps from kernel mode",
350 regs);
351 return;
352 }
353
354 fsr = fpt->thread.fsr;
355 info.si_signo = SIGFPE;
356 info.si_errno = 0;
357 info.si_addr = (void __user *)pc;
358 info.si_trapno = 0;
359 info.si_code = __SI_FAULT;
360 if ((fsr & 0x1c000) == (1 << 14)) {
361 if (fsr & 0x10)
362 info.si_code = FPE_FLTINV;
363 else if (fsr & 0x08)
364 info.si_code = FPE_FLTOVF;
365 else if (fsr & 0x04)
366 info.si_code = FPE_FLTUND;
367 else if (fsr & 0x02)
368 info.si_code = FPE_FLTDIV;
369 else if (fsr & 0x01)
370 info.si_code = FPE_FLTRES;
371 }
372 send_sig_info(SIGFPE, &info, fpt);
373#ifndef CONFIG_SMP
374 last_task_used_math = NULL;
375#endif
376 regs->psr &= ~PSR_EF;
377 if(calls > 0)
378 calls=0;
379}
380
381void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc,
382 unsigned long psr)
383{
384 siginfo_t info;
385
386 if(psr & PSR_PS)
387 die_if_kernel("Penguin overflow trap from kernel mode", regs);
388 info.si_signo = SIGEMT;
389 info.si_errno = 0;
390 info.si_code = EMT_TAGOVF;
391 info.si_addr = (void __user *)pc;
392 info.si_trapno = 0;
393 send_sig_info(SIGEMT, &info, current);
394}
395
396void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc,
397 unsigned long psr)
398{
399#ifdef TRAP_DEBUG
400 printk("Watchpoint detected at PC %08lx NPC %08lx PSR %08lx\n",
401 pc, npc, psr);
402#endif
403 if(psr & PSR_PS)
404 panic("Tell me what a watchpoint trap is, and I'll then deal "
405 "with such a beast...");
406}
407
408void handle_reg_access(struct pt_regs *regs, unsigned long pc, unsigned long npc,
409 unsigned long psr)
410{
411 siginfo_t info;
412
413#ifdef TRAP_DEBUG
414 printk("Register Access Exception at PC %08lx NPC %08lx PSR %08lx\n",
415 pc, npc, psr);
416#endif
417 info.si_signo = SIGBUS;
418 info.si_errno = 0;
419 info.si_code = BUS_OBJERR;
420 info.si_addr = (void __user *)pc;
421 info.si_trapno = 0;
422 force_sig_info(SIGBUS, &info, current);
423}
424
425void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, unsigned long npc,
426 unsigned long psr)
427{
428 siginfo_t info;
429
430 info.si_signo = SIGILL;
431 info.si_errno = 0;
432 info.si_code = ILL_COPROC;
433 info.si_addr = (void __user *)pc;
434 info.si_trapno = 0;
435 send_sig_info(SIGILL, &info, current);
436}
437
438void handle_cp_exception(struct pt_regs *regs, unsigned long pc, unsigned long npc,
439 unsigned long psr)
440{
441 siginfo_t info;
442
443#ifdef TRAP_DEBUG
444 printk("Co-Processor Exception at PC %08lx NPC %08lx PSR %08lx\n",
445 pc, npc, psr);
446#endif
447 info.si_signo = SIGILL;
448 info.si_errno = 0;
449 info.si_code = ILL_COPROC;
450 info.si_addr = (void __user *)pc;
451 info.si_trapno = 0;
452 send_sig_info(SIGILL, &info, current);
453}
454
455void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc,
456 unsigned long psr)
457{
458 siginfo_t info;
459
460 info.si_signo = SIGFPE;
461 info.si_errno = 0;
462 info.si_code = FPE_INTDIV;
463 info.si_addr = (void __user *)pc;
464 info.si_trapno = 0;
465 send_sig_info(SIGFPE, &info, current);
466}
467
468#ifdef CONFIG_DEBUG_BUGVERBOSE
469void do_BUG(const char *file, int line)
470{
471 // bust_spinlocks(1); XXX Not in our original BUG()
472 printk("kernel BUG at %s:%d!\n", file, line);
473}
474#endif
475
476/* Since we have our mappings set up, on multiprocessors we can spin them
477 * up here so that timer interrupts work during initialization.
478 */
479
480extern void sparc_cpu_startup(void);
481
482int linux_smp_still_initting;
483unsigned int thiscpus_tbr;
484int thiscpus_mid;
485
486void trap_init(void)
487{
488 extern void thread_info_offsets_are_bolixed_pete(void);
489
490 /* Force linker to barf if mismatched */
491 if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) ||
492 TI_TASK != offsetof(struct thread_info, task) ||
493 TI_EXECDOMAIN != offsetof(struct thread_info, exec_domain) ||
494 TI_FLAGS != offsetof(struct thread_info, flags) ||
495 TI_CPU != offsetof(struct thread_info, cpu) ||
496 TI_PREEMPT != offsetof(struct thread_info, preempt_count) ||
497 TI_SOFTIRQ != offsetof(struct thread_info, softirq_count) ||
498 TI_HARDIRQ != offsetof(struct thread_info, hardirq_count) ||
499 TI_KSP != offsetof(struct thread_info, ksp) ||
500 TI_KPC != offsetof(struct thread_info, kpc) ||
501 TI_KPSR != offsetof(struct thread_info, kpsr) ||
502 TI_KWIM != offsetof(struct thread_info, kwim) ||
503 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
504 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
505 TI_W_SAVED != offsetof(struct thread_info, w_saved))
506 thread_info_offsets_are_bolixed_pete();
507
508 /* Attach to the address space of init_task. */
509 atomic_inc(&init_mm.mm_count);
510 current->active_mm = &init_mm;
511
512 /* NOTE: Other cpus have this done as they are started
513 * up on SMP.
514 */
515}
diff --git a/arch/sparc/kernel/unaligned.c b/arch/sparc/kernel/unaligned.c
new file mode 100644
index 000000000000..a6330fbc9dd9
--- /dev/null
+++ b/arch/sparc/kernel/unaligned.c
@@ -0,0 +1,548 @@
1/* $Id: unaligned.c,v 1.23 2001/12/21 00:54:31 davem Exp $
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <asm/ptrace.h>
15#include <asm/processor.h>
16#include <asm/system.h>
17#include <asm/uaccess.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20
21/* #define DEBUG_MNA */
22
23enum direction {
24 load, /* ld, ldd, ldh, ldsh */
25 store, /* st, std, sth, stsh */
26 both, /* Swap, ldstub, etc. */
27 fpload,
28 fpstore,
29 invalid,
30};
31
32#ifdef DEBUG_MNA
33static char *dirstrings[] = {
34 "load", "store", "both", "fpload", "fpstore", "invalid"
35};
36#endif
37
38static inline enum direction decode_direction(unsigned int insn)
39{
40 unsigned long tmp = (insn >> 21) & 1;
41
42 if(!tmp)
43 return load;
44 else {
45 if(((insn>>19)&0x3f) == 15)
46 return both;
47 else
48 return store;
49 }
50}
51
52/* 8 = double-word, 4 = word, 2 = half-word */
53static inline int decode_access_size(unsigned int insn)
54{
55 insn = (insn >> 19) & 3;
56
57 if(!insn)
58 return 4;
59 else if(insn == 3)
60 return 8;
61 else if(insn == 2)
62 return 2;
63 else {
64 printk("Impossible unaligned trap. insn=%08x\n", insn);
65 die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
66 return 4; /* just to keep gcc happy. */
67 }
68}
69
70/* 0x400000 = signed, 0 = unsigned */
71static inline int decode_signedness(unsigned int insn)
72{
73 return (insn & 0x400000);
74}
75
76static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
77 unsigned int rd)
78{
79 if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
80 /* Wheee... */
81 __asm__ __volatile__("save %sp, -0x40, %sp\n\t"
82 "save %sp, -0x40, %sp\n\t"
83 "save %sp, -0x40, %sp\n\t"
84 "save %sp, -0x40, %sp\n\t"
85 "save %sp, -0x40, %sp\n\t"
86 "save %sp, -0x40, %sp\n\t"
87 "save %sp, -0x40, %sp\n\t"
88 "restore; restore; restore; restore;\n\t"
89 "restore; restore; restore;\n\t");
90 }
91}
92
93static inline int sign_extend_imm13(int imm)
94{
95 return imm << 19 >> 19;
96}
97
98static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
99{
100 struct reg_window *win;
101
102 if(reg < 16)
103 return (!reg ? 0 : regs->u_regs[reg]);
104
105 /* Ho hum, the slightly complicated case. */
106 win = (struct reg_window *) regs->u_regs[UREG_FP];
107 return win->locals[reg - 16]; /* yes, I know what this does... */
108}
109
110static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs)
111{
112 struct reg_window __user *win;
113 unsigned long ret;
114
115 if (reg < 16)
116 return (!reg ? 0 : regs->u_regs[reg]);
117
118 /* Ho hum, the slightly complicated case. */
119 win = (struct reg_window __user *) regs->u_regs[UREG_FP];
120
121 if ((unsigned long)win & 3)
122 return -1;
123
124 if (get_user(ret, &win->locals[reg - 16]))
125 return -1;
126
127 return ret;
128}
129
130static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
131{
132 struct reg_window *win;
133
134 if(reg < 16)
135 return &regs->u_regs[reg];
136 win = (struct reg_window *) regs->u_regs[UREG_FP];
137 return &win->locals[reg - 16];
138}
139
140static unsigned long compute_effective_address(struct pt_regs *regs,
141 unsigned int insn)
142{
143 unsigned int rs1 = (insn >> 14) & 0x1f;
144 unsigned int rs2 = insn & 0x1f;
145 unsigned int rd = (insn >> 25) & 0x1f;
146
147 if(insn & 0x2000) {
148 maybe_flush_windows(rs1, 0, rd);
149 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
150 } else {
151 maybe_flush_windows(rs1, rs2, rd);
152 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
153 }
154}
155
156unsigned long safe_compute_effective_address(struct pt_regs *regs,
157 unsigned int insn)
158{
159 unsigned int rs1 = (insn >> 14) & 0x1f;
160 unsigned int rs2 = insn & 0x1f;
161 unsigned int rd = (insn >> 25) & 0x1f;
162
163 if(insn & 0x2000) {
164 maybe_flush_windows(rs1, 0, rd);
165 return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn));
166 } else {
167 maybe_flush_windows(rs1, rs2, rd);
168 return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs));
169 }
170}
171
172/* This is just to make gcc think panic does return... */
173static void unaligned_panic(char *str)
174{
175 panic(str);
176}
177
178#define do_integer_load(dest_reg, size, saddr, is_signed, errh) ({ \
179__asm__ __volatile__ ( \
180 "cmp %1, 8\n\t" \
181 "be 9f\n\t" \
182 " cmp %1, 4\n\t" \
183 "be 6f\n" \
184"4:\t" " ldub [%2], %%l1\n" \
185"5:\t" "ldub [%2 + 1], %%l2\n\t" \
186 "sll %%l1, 8, %%l1\n\t" \
187 "tst %3\n\t" \
188 "be 3f\n\t" \
189 " add %%l1, %%l2, %%l1\n\t" \
190 "sll %%l1, 16, %%l1\n\t" \
191 "sra %%l1, 16, %%l1\n" \
192"3:\t" "b 0f\n\t" \
193 " st %%l1, [%0]\n" \
194"6:\t" "ldub [%2 + 1], %%l2\n\t" \
195 "sll %%l1, 24, %%l1\n" \
196"7:\t" "ldub [%2 + 2], %%g7\n\t" \
197 "sll %%l2, 16, %%l2\n" \
198"8:\t" "ldub [%2 + 3], %%g1\n\t" \
199 "sll %%g7, 8, %%g7\n\t" \
200 "or %%l1, %%l2, %%l1\n\t" \
201 "or %%g7, %%g1, %%g7\n\t" \
202 "or %%l1, %%g7, %%l1\n\t" \
203 "b 0f\n\t" \
204 " st %%l1, [%0]\n" \
205"9:\t" "ldub [%2], %%l1\n" \
206"10:\t" "ldub [%2 + 1], %%l2\n\t" \
207 "sll %%l1, 24, %%l1\n" \
208"11:\t" "ldub [%2 + 2], %%g7\n\t" \
209 "sll %%l2, 16, %%l2\n" \
210"12:\t" "ldub [%2 + 3], %%g1\n\t" \
211 "sll %%g7, 8, %%g7\n\t" \
212 "or %%l1, %%l2, %%l1\n\t" \
213 "or %%g7, %%g1, %%g7\n\t" \
214 "or %%l1, %%g7, %%g7\n" \
215"13:\t" "ldub [%2 + 4], %%l1\n\t" \
216 "st %%g7, [%0]\n" \
217"14:\t" "ldub [%2 + 5], %%l2\n\t" \
218 "sll %%l1, 24, %%l1\n" \
219"15:\t" "ldub [%2 + 6], %%g7\n\t" \
220 "sll %%l2, 16, %%l2\n" \
221"16:\t" "ldub [%2 + 7], %%g1\n\t" \
222 "sll %%g7, 8, %%g7\n\t" \
223 "or %%l1, %%l2, %%l1\n\t" \
224 "or %%g7, %%g1, %%g7\n\t" \
225 "or %%l1, %%g7, %%g7\n\t" \
226 "st %%g7, [%0 + 4]\n" \
227"0:\n\n\t" \
228 ".section __ex_table,#alloc\n\t" \
229 ".word 4b, " #errh "\n\t" \
230 ".word 5b, " #errh "\n\t" \
231 ".word 6b, " #errh "\n\t" \
232 ".word 7b, " #errh "\n\t" \
233 ".word 8b, " #errh "\n\t" \
234 ".word 9b, " #errh "\n\t" \
235 ".word 10b, " #errh "\n\t" \
236 ".word 11b, " #errh "\n\t" \
237 ".word 12b, " #errh "\n\t" \
238 ".word 13b, " #errh "\n\t" \
239 ".word 14b, " #errh "\n\t" \
240 ".word 15b, " #errh "\n\t" \
241 ".word 16b, " #errh "\n\n\t" \
242 ".previous\n\t" \
243 : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed) \
244 : "l1", "l2", "g7", "g1", "cc"); \
245})
246
247#define store_common(dst_addr, size, src_val, errh) ({ \
248__asm__ __volatile__ ( \
249 "ld [%2], %%l1\n" \
250 "cmp %1, 2\n\t" \
251 "be 2f\n\t" \
252 " cmp %1, 4\n\t" \
253 "be 1f\n\t" \
254 " srl %%l1, 24, %%l2\n\t" \
255 "srl %%l1, 16, %%g7\n" \
256"4:\t" "stb %%l2, [%0]\n\t" \
257 "srl %%l1, 8, %%l2\n" \
258"5:\t" "stb %%g7, [%0 + 1]\n\t" \
259 "ld [%2 + 4], %%g7\n" \
260"6:\t" "stb %%l2, [%0 + 2]\n\t" \
261 "srl %%g7, 24, %%l2\n" \
262"7:\t" "stb %%l1, [%0 + 3]\n\t" \
263 "srl %%g7, 16, %%l1\n" \
264"8:\t" "stb %%l2, [%0 + 4]\n\t" \
265 "srl %%g7, 8, %%l2\n" \
266"9:\t" "stb %%l1, [%0 + 5]\n" \
267"10:\t" "stb %%l2, [%0 + 6]\n\t" \
268 "b 0f\n" \
269"11:\t" " stb %%g7, [%0 + 7]\n" \
270"1:\t" "srl %%l1, 16, %%g7\n" \
271"12:\t" "stb %%l2, [%0]\n\t" \
272 "srl %%l1, 8, %%l2\n" \
273"13:\t" "stb %%g7, [%0 + 1]\n" \
274"14:\t" "stb %%l2, [%0 + 2]\n\t" \
275 "b 0f\n" \
276"15:\t" " stb %%l1, [%0 + 3]\n" \
277"2:\t" "srl %%l1, 8, %%l2\n" \
278"16:\t" "stb %%l2, [%0]\n" \
279"17:\t" "stb %%l1, [%0 + 1]\n" \
280"0:\n\n\t" \
281 ".section __ex_table,#alloc\n\t" \
282 ".word 4b, " #errh "\n\t" \
283 ".word 5b, " #errh "\n\t" \
284 ".word 6b, " #errh "\n\t" \
285 ".word 7b, " #errh "\n\t" \
286 ".word 8b, " #errh "\n\t" \
287 ".word 9b, " #errh "\n\t" \
288 ".word 10b, " #errh "\n\t" \
289 ".word 11b, " #errh "\n\t" \
290 ".word 12b, " #errh "\n\t" \
291 ".word 13b, " #errh "\n\t" \
292 ".word 14b, " #errh "\n\t" \
293 ".word 15b, " #errh "\n\t" \
294 ".word 16b, " #errh "\n\t" \
295 ".word 17b, " #errh "\n\n\t" \
296 ".previous\n\t" \
297 : : "r" (dst_addr), "r" (size), "r" (src_val) \
298 : "l1", "l2", "g7", "g1", "cc"); \
299})
300
301#define do_integer_store(reg_num, size, dst_addr, regs, errh) ({ \
302 unsigned long *src_val; \
303 static unsigned long zero[2] = { 0, }; \
304 \
305 if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
306 else { \
307 src_val = &zero[0]; \
308 if (size == 8) \
309 zero[1] = fetch_reg(1, regs); \
310 } \
311 store_common(dst_addr, size, src_val, errh); \
312})
313
314extern void smp_capture(void);
315extern void smp_release(void);
316
317#define do_atomic(srcdest_reg, mem, errh) ({ \
318 unsigned long flags, tmp; \
319 \
320 smp_capture(); \
321 local_irq_save(flags); \
322 tmp = *srcdest_reg; \
323 do_integer_load(srcdest_reg, 4, mem, 0, errh); \
324 store_common(mem, 4, &tmp, errh); \
325 local_irq_restore(flags); \
326 smp_release(); \
327})
328
329static inline void advance(struct pt_regs *regs)
330{
331 regs->pc = regs->npc;
332 regs->npc += 4;
333}
334
335static inline int floating_point_load_or_store_p(unsigned int insn)
336{
337 return (insn >> 24) & 1;
338}
339
340static inline int ok_for_kernel(unsigned int insn)
341{
342 return !floating_point_load_or_store_p(insn);
343}
344
345void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
346
347void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
348{
349 unsigned long g2 = regs->u_regs [UREG_G2];
350 unsigned long fixup = search_extables_range(regs->pc, &g2);
351
352 if (!fixup) {
353 unsigned long address = compute_effective_address(regs, insn);
354 if(address < PAGE_SIZE) {
355 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
356 } else
357 printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
358 printk(KERN_ALERT " at virtual address %08lx\n",address);
359 printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n",
360 (current->mm ? current->mm->context :
361 current->active_mm->context));
362 printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n",
363 (current->mm ? (unsigned long) current->mm->pgd :
364 (unsigned long) current->active_mm->pgd));
365 die_if_kernel("Oops", regs);
366 /* Not reached */
367 }
368 regs->pc = fixup;
369 regs->npc = regs->pc + 4;
370 regs->u_regs [UREG_G2] = g2;
371}
372
373asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
374{
375 enum direction dir = decode_direction(insn);
376 int size = decode_access_size(insn);
377
378 if(!ok_for_kernel(insn) || dir == both) {
379 printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
380 regs->pc);
381 unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
382
383 __asm__ __volatile__ ("\n"
384"kernel_unaligned_trap_fault:\n\t"
385 "mov %0, %%o0\n\t"
386 "call kernel_mna_trap_fault\n\t"
387 " mov %1, %%o1\n\t"
388 :
389 : "r" (regs), "r" (insn)
390 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
391 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
392 } else {
393 unsigned long addr = compute_effective_address(regs, insn);
394
395#ifdef DEBUG_MNA
396 printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
397 regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
398#endif
399 switch(dir) {
400 case load:
401 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
402 size, (unsigned long *) addr,
403 decode_signedness(insn),
404 kernel_unaligned_trap_fault);
405 break;
406
407 case store:
408 do_integer_store(((insn>>25)&0x1f), size,
409 (unsigned long *) addr, regs,
410 kernel_unaligned_trap_fault);
411 break;
412#if 0 /* unsupported */
413 case both:
414 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
415 (unsigned long *) addr,
416 kernel_unaligned_trap_fault);
417 break;
418#endif
419 default:
420 panic("Impossible kernel unaligned trap.");
421 /* Not reached... */
422 }
423 advance(regs);
424 }
425}
426
427static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
428 enum direction dir)
429{
430 unsigned int reg;
431 int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE;
432 int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
433
434 if ((regs->pc | regs->npc) & 3)
435 return 0;
436
437 /* Must access_ok() in all the necessary places. */
438#define WINREG_ADDR(regnum) \
439 ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
440
441 reg = (insn >> 25) & 0x1f;
442 if (reg >= 16) {
443 if (!access_ok(check, WINREG_ADDR(reg - 16), size))
444 return -EFAULT;
445 }
446 reg = (insn >> 14) & 0x1f;
447 if (reg >= 16) {
448 if (!access_ok(check, WINREG_ADDR(reg - 16), size))
449 return -EFAULT;
450 }
451 if (!(insn & 0x2000)) {
452 reg = (insn & 0x1f);
453 if (reg >= 16) {
454 if (!access_ok(check, WINREG_ADDR(reg - 16), size))
455 return -EFAULT;
456 }
457 }
458#undef WINREG_ADDR
459 return 0;
460}
461
462void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("user_mna_trap_fault");
463
464void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
465{
466 siginfo_t info;
467
468 info.si_signo = SIGBUS;
469 info.si_errno = 0;
470 info.si_code = BUS_ADRALN;
471 info.si_addr = (void __user *)safe_compute_effective_address(regs, insn);
472 info.si_trapno = 0;
473 send_sig_info(SIGBUS, &info, current);
474}
475
476asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
477{
478 enum direction dir;
479
480 lock_kernel();
481 if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
482 (((insn >> 30) & 3) != 3))
483 goto kill_user;
484 dir = decode_direction(insn);
485 if(!ok_for_user(regs, insn, dir)) {
486 goto kill_user;
487 } else {
488 int size = decode_access_size(insn);
489 unsigned long addr;
490
491 if(floating_point_load_or_store_p(insn)) {
492 printk("User FPU load/store unaligned unsupported.\n");
493 goto kill_user;
494 }
495
496 addr = compute_effective_address(regs, insn);
497 switch(dir) {
498 case load:
499 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
500 size, (unsigned long *) addr,
501 decode_signedness(insn),
502 user_unaligned_trap_fault);
503 break;
504
505 case store:
506 do_integer_store(((insn>>25)&0x1f), size,
507 (unsigned long *) addr, regs,
508 user_unaligned_trap_fault);
509 break;
510
511 case both:
512#if 0 /* unsupported */
513 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
514 (unsigned long *) addr,
515 user_unaligned_trap_fault);
516#else
517 /*
518 * This was supported in 2.4. However, we question
519 * the value of SWAP instruction across word boundaries.
520 */
521 printk("Unaligned SWAP unsupported.\n");
522 goto kill_user;
523#endif
524 break;
525
526 default:
527 unaligned_panic("Impossible user unaligned trap.");
528
529 __asm__ __volatile__ ("\n"
530"user_unaligned_trap_fault:\n\t"
531 "mov %0, %%o0\n\t"
532 "call user_mna_trap_fault\n\t"
533 " mov %1, %%o1\n\t"
534 :
535 : "r" (regs), "r" (insn)
536 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
537 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
538 goto out;
539 }
540 advance(regs);
541 goto out;
542 }
543
544kill_user:
545 user_mna_trap_fault(regs, insn);
546out:
547 unlock_kernel();
548}
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..38938d2e63aa
--- /dev/null
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -0,0 +1,103 @@
1/* ld script to make SparcLinux kernel */
2
3#include <asm-generic/vmlinux.lds.h>
4
5OUTPUT_FORMAT("elf32-sparc", "elf32-sparc", "elf32-sparc")
6OUTPUT_ARCH(sparc)
7ENTRY(_start)
8jiffies = jiffies_64 + 4;
9SECTIONS
10{
11 . = 0x10000 + SIZEOF_HEADERS;
12 .text 0xf0004000 :
13 {
14 *(.text)
15 SCHED_TEXT
16 LOCK_TEXT
17 *(.gnu.warning)
18 } =0
19 _etext = .;
20 PROVIDE (etext = .);
21 RODATA
22 .data :
23 {
24 *(.data)
25 CONSTRUCTORS
26 }
27 .data1 : { *(.data1) }
28 _edata = .;
29 PROVIDE (edata = .);
30 __start___fixup = .;
31 .fixup : { *(.fixup) }
32 __stop___fixup = .;
33 __start___ex_table = .;
34 __ex_table : { *(__ex_table) }
35 __stop___ex_table = .;
36
37 . = ALIGN(4096);
38 __init_begin = .;
39 .init.text : {
40 _sinittext = .;
41 *(.init.text)
42 _einittext = .;
43 }
44 __init_text_end = .;
45 .init.data : { *(.init.data) }
46 . = ALIGN(16);
47 __setup_start = .;
48 .init.setup : { *(.init.setup) }
49 __setup_end = .;
50 __initcall_start = .;
51 .initcall.init : {
52 *(.initcall1.init)
53 *(.initcall2.init)
54 *(.initcall3.init)
55 *(.initcall4.init)
56 *(.initcall5.init)
57 *(.initcall6.init)
58 *(.initcall7.init)
59 }
60 __initcall_end = .;
61 __con_initcall_start = .;
62 .con_initcall.init : { *(.con_initcall.init) }
63 __con_initcall_end = .;
64 SECURITY_INIT
65 . = ALIGN(4096);
66 __initramfs_start = .;
67 .init.ramfs : { *(.init.ramfs) }
68 __initramfs_end = .;
69 . = ALIGN(32);
70 __per_cpu_start = .;
71 .data.percpu : { *(.data.percpu) }
72 __per_cpu_end = .;
73 . = ALIGN(4096);
74 __init_end = .;
75 . = ALIGN(32);
76 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
77
78 __bss_start = .;
79 .sbss : { *(.sbss) *(.scommon) }
80 .bss :
81 {
82 *(.dynbss)
83 *(.bss)
84 *(COMMON)
85 }
86 _end = . ;
87 PROVIDE (end = .);
88 /* Stabs debugging sections. */
89 .stab 0 : { *(.stab) }
90 .stabstr 0 : { *(.stabstr) }
91 .stab.excl 0 : { *(.stab.excl) }
92 .stab.exclstr 0 : { *(.stab.exclstr) }
93 .stab.index 0 : { *(.stab.index) }
94 .stab.indexstr 0 : { *(.stab.indexstr) }
95 .comment 0 : { *(.comment) }
96 .debug 0 : { *(.debug) }
97 .debug_srcinfo 0 : { *(.debug_srcinfo) }
98 .debug_aranges 0 : { *(.debug_aranges) }
99 .debug_pubnames 0 : { *(.debug_pubnames) }
100 .debug_sfnames 0 : { *(.debug_sfnames) }
101 .line 0 : { *(.line) }
102 /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) }
103}
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
new file mode 100644
index 000000000000..9cc93eaa4abf
--- /dev/null
+++ b/arch/sparc/kernel/windows.c
@@ -0,0 +1,127 @@
1/* windows.c: Routines to deal with register window management
2 * at the C-code level.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/string.h>
10#include <linux/mm.h>
11#include <linux/smp.h>
12#include <linux/smp_lock.h>
13
14#include <asm/uaccess.h>
15
16/* Do save's until all user register windows are out of the cpu. */
17void flush_user_windows(void)
18{
19 register int ctr asm("g5");
20
21 ctr = 0;
22 __asm__ __volatile__(
23 "\n1:\n\t"
24 "ld [%%g6 + %2], %%g4\n\t"
25 "orcc %%g0, %%g4, %%g0\n\t"
26 "add %0, 1, %0\n\t"
27 "bne 1b\n\t"
28 " save %%sp, -64, %%sp\n"
29 "2:\n\t"
30 "subcc %0, 1, %0\n\t"
31 "bne 2b\n\t"
32 " restore %%g0, %%g0, %%g0\n"
33 : "=&r" (ctr)
34 : "0" (ctr),
35 "i" ((const unsigned long)TI_UWINMASK)
36 : "g4", "cc");
37}
38
39static inline void shift_window_buffer(int first_win, int last_win, struct thread_info *tp)
40{
41 int i;
42
43 for(i = first_win; i < last_win; i++) {
44 tp->rwbuf_stkptrs[i] = tp->rwbuf_stkptrs[i+1];
45 memcpy(&tp->reg_window[i], &tp->reg_window[i+1], sizeof(struct reg_window));
46 }
47}
48
49/* Place as many of the user's current register windows
50 * on the stack that we can. Even if the %sp is unaligned
51 * we still copy the window there, the only case that we don't
52 * succeed is if the %sp points to a bum mapping altogether.
53 * setup_frame() and do_sigreturn() use this before shifting
54 * the user stack around. Future instruction and hardware
55 * bug workaround routines will need this functionality as
56 * well.
57 */
58void synchronize_user_stack(void)
59{
60 struct thread_info *tp = current_thread_info();
61 int window;
62
63 flush_user_windows();
64 if(!tp->w_saved)
65 return;
66
67 /* Ok, there is some dirty work to do. */
68 for(window = tp->w_saved - 1; window >= 0; window--) {
69 unsigned long sp = tp->rwbuf_stkptrs[window];
70
71 /* Ok, let it rip. */
72 if (copy_to_user((char __user *) sp, &tp->reg_window[window],
73 sizeof(struct reg_window)))
74 continue;
75
76 shift_window_buffer(window, tp->w_saved - 1, tp);
77 tp->w_saved--;
78 }
79}
80
81#if 0
82/* An optimization. */
83static inline void copy_aligned_window(void *dest, const void *src)
84{
85 __asm__ __volatile__("ldd [%1], %%g2\n\t"
86 "ldd [%1 + 0x8], %%g4\n\t"
87 "std %%g2, [%0]\n\t"
88 "std %%g4, [%0 + 0x8]\n\t"
89 "ldd [%1 + 0x10], %%g2\n\t"
90 "ldd [%1 + 0x18], %%g4\n\t"
91 "std %%g2, [%0 + 0x10]\n\t"
92 "std %%g4, [%0 + 0x18]\n\t"
93 "ldd [%1 + 0x20], %%g2\n\t"
94 "ldd [%1 + 0x28], %%g4\n\t"
95 "std %%g2, [%0 + 0x20]\n\t"
96 "std %%g4, [%0 + 0x28]\n\t"
97 "ldd [%1 + 0x30], %%g2\n\t"
98 "ldd [%1 + 0x38], %%g4\n\t"
99 "std %%g2, [%0 + 0x30]\n\t"
100 "std %%g4, [%0 + 0x38]\n\t" : :
101 "r" (dest), "r" (src) :
102 "g2", "g3", "g4", "g5");
103}
104#endif
105
106/* Try to push the windows in a threads window buffer to the
107 * user stack. Unaligned %sp's are not allowed here.
108 */
109
110void try_to_clear_window_buffer(struct pt_regs *regs, int who)
111{
112 struct thread_info *tp = current_thread_info();
113 int window;
114
115 lock_kernel();
116 flush_user_windows();
117 for(window = 0; window < tp->w_saved; window++) {
118 unsigned long sp = tp->rwbuf_stkptrs[window];
119
120 if ((sp & 7) ||
121 copy_to_user((char __user *) sp, &tp->reg_window[window],
122 sizeof(struct reg_window)))
123 do_exit(SIGILL);
124 }
125 tp->w_saved = 0;
126 unlock_kernel();
127}
diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S
new file mode 100644
index 000000000000..083b1215d515
--- /dev/null
+++ b/arch/sparc/kernel/wof.S
@@ -0,0 +1,428 @@
1/* $Id: wof.S,v 1.40 2000/01/08 16:38:18 anton Exp $
2 * wof.S: Sparc window overflow handler.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/contregs.h>
8#include <asm/page.h>
9#include <asm/ptrace.h>
10#include <asm/psr.h>
11#include <asm/smp.h>
12#include <asm/asi.h>
13#include <asm/winmacro.h>
14#include <asm/asmmacro.h>
15#include <asm/thread_info.h>
16
17/* WARNING: This routine is hairy and _very_ complicated, but it
18 * must be as fast as possible as it handles the allocation
19 * of register windows to the user and kernel. If you touch
20 * this code be _very_ careful as many other pieces of the
21 * kernel depend upon how this code behaves. You have been
22 * duly warned...
23 */
24
25/* We define macro's for registers which have a fixed
26 * meaning throughout this entire routine. The 'T' in
27 * the comments mean that the register can only be
28 * accessed when in the 'trap' window, 'G' means
29 * accessible in any window. Do not change these registers
30 * after they have been set, until you are ready to return
31 * from the trap.
32 */
33#define t_psr l0 /* %psr at trap time T */
34#define t_pc l1 /* PC for trap return T */
35#define t_npc l2 /* NPC for trap return T */
36#define t_wim l3 /* %wim at trap time T */
37#define saved_g5 l5 /* Global save register T */
38#define saved_g6 l6 /* Global save register T */
39#define curptr g6 /* Gets set to 'current' then stays G */
40
41/* Now registers whose values can change within the handler. */
42#define twin_tmp l4 /* Temp reg, only usable in trap window T */
43#define glob_tmp g5 /* Global temporary reg, usable anywhere G */
44
45 .text
46 .align 4
47 /* BEGINNING OF PATCH INSTRUCTIONS */
48 /* On a 7-window Sparc the boot code patches spnwin_*
49 * instructions with the following ones.
50 */
51 .globl spnwin_patch1_7win, spnwin_patch2_7win, spnwin_patch3_7win
52spnwin_patch1_7win: sll %t_wim, 6, %glob_tmp
53spnwin_patch2_7win: and %glob_tmp, 0x7f, %glob_tmp
54spnwin_patch3_7win: and %twin_tmp, 0x7f, %twin_tmp
55 /* END OF PATCH INSTRUCTIONS */
56
57 /* The trap entry point has done the following:
58 *
59 * rd %psr, %l0
60 * rd %wim, %l3
61 * b spill_window_entry
62 * andcc %l0, PSR_PS, %g0
63 */
64
65 /* Datum current_thread_info->uwinmask contains at all times a bitmask
66 * where if any user windows are active, at least one bit will
67 * be set in to mask. If no user windows are active, the bitmask
68 * will be all zeroes.
69 */
70 .globl spill_window_entry
71 .globl spnwin_patch1, spnwin_patch2, spnwin_patch3
72spill_window_entry:
73 /* LOCATION: Trap Window */
74
75 mov %g5, %saved_g5 ! save away global temp register
76 mov %g6, %saved_g6 ! save away 'current' ptr register
77
78 /* Compute what the new %wim will be if we save the
79 * window properly in this trap handler.
80 *
81 * newwim = ((%wim>>1) | (%wim<<(nwindows - 1)));
82 */
83 srl %t_wim, 0x1, %twin_tmp
84spnwin_patch1: sll %t_wim, 7, %glob_tmp
85 or %glob_tmp, %twin_tmp, %glob_tmp
86spnwin_patch2: and %glob_tmp, 0xff, %glob_tmp
87
88 /* The trap entry point has set the condition codes
89 * up for us to see if this is from user or kernel.
90 * Get the load of 'curptr' out of the way.
91 */
92 LOAD_CURRENT(curptr, twin_tmp)
93
94 andcc %t_psr, PSR_PS, %g0
95 be,a spwin_fromuser ! all user wins, branch
96 save %g0, %g0, %g0 ! Go where saving will occur
97
98 /* See if any user windows are active in the set. */
99 ld [%curptr + TI_UWINMASK], %twin_tmp ! grab win mask
100 orcc %g0, %twin_tmp, %g0 ! check for set bits
101 bne spwin_exist_uwins ! yep, there are some
102 andn %twin_tmp, %glob_tmp, %twin_tmp ! compute new uwinmask
103
104 /* Save into the window which must be saved and do it.
105 * Basically if we are here, this means that we trapped
106 * from kernel mode with only kernel windows in the register
107 * file.
108 */
109 save %g0, %g0, %g0 ! save into the window to stash away
110 wr %glob_tmp, 0x0, %wim ! set new %wim, this is safe now
111
112spwin_no_userwins_from_kernel:
113 /* LOCATION: Window to be saved */
114
115 STORE_WINDOW(sp) ! stash the window
116 restore %g0, %g0, %g0 ! go back into trap window
117
118 /* LOCATION: Trap window */
119 mov %saved_g5, %g5 ! restore %glob_tmp
120 mov %saved_g6, %g6 ! restore %curptr
121 wr %t_psr, 0x0, %psr ! restore condition codes in %psr
122 WRITE_PAUSE ! waste some time
123 jmp %t_pc ! Return from trap
124 rett %t_npc ! we are done
125
126spwin_exist_uwins:
127 /* LOCATION: Trap window */
128
129 /* Wow, user windows have to be dealt with, this is dirty
130 * and messy as all hell. And difficult to follow if you
131 * are approaching the infamous register window trap handling
132 * problem for the first time. DON'T LOOK!
133 *
134 * Note that how the execution path works out, the new %wim
135 * will be left for us in the global temporary register,
136 * %glob_tmp. We cannot set the new %wim first because we
137 * need to save into the appropriate window without inducing
138 * a trap (traps are off, we'd get a watchdog wheee)...
139 * But first, store the new user window mask calculated
140 * above.
141 */
142 st %twin_tmp, [%curptr + TI_UWINMASK]
143 save %g0, %g0, %g0 ! Go to where the saving will occur
144
145spwin_fromuser:
146 /* LOCATION: Window to be saved */
147 wr %glob_tmp, 0x0, %wim ! Now it is safe to set new %wim
148
149 /* LOCATION: Window to be saved */
150
151 /* This instruction branches to a routine which will check
152 * to validity of the users stack pointer by whatever means
153 * are necessary. This means that this is architecture
154 * specific and thus this branch instruction will need to
155 * be patched at boot time once the machine type is known.
156 * This routine _shall not_ touch %curptr under any
157 * circumstances whatsoever! It will branch back to the
158 * label 'spwin_good_ustack' if the stack is ok but still
159 * needs to be dumped (SRMMU for instance will not need to
160 * do this) or 'spwin_finish_up' if the stack is ok and the
161 * registers have already been saved. If the stack is found
162 * to be bogus for some reason the routine shall branch to
163 * the label 'spwin_user_stack_is_bolixed' which will take
164 * care of things at that point.
165 */
166 .globl spwin_mmu_patchme
167spwin_mmu_patchme: b spwin_sun4c_stackchk
168 andcc %sp, 0x7, %g0
169
170spwin_good_ustack:
171 /* LOCATION: Window to be saved */
172
173 /* The users stack is ok and we can safely save it at
174 * %sp.
175 */
176 STORE_WINDOW(sp)
177
178spwin_finish_up:
179 restore %g0, %g0, %g0 /* Back to trap window. */
180
181 /* LOCATION: Trap window */
182
183 /* We have spilled successfully, and we have properly stored
184 * the appropriate window onto the stack.
185 */
186
187 /* Restore saved globals */
188 mov %saved_g5, %g5
189 mov %saved_g6, %g6
190
191 wr %t_psr, 0x0, %psr
192 WRITE_PAUSE
193 jmp %t_pc
194 rett %t_npc
195
196spwin_user_stack_is_bolixed:
197 /* LOCATION: Window to be saved */
198
199 /* Wheee, user has trashed his/her stack. We have to decide
200 * how to proceed based upon whether we came from kernel mode
201 * or not. If we came from kernel mode, toss the window into
202 * a special buffer and proceed, the kernel _needs_ a window
203 * and we could be in an interrupt handler so timing is crucial.
204 * If we came from user land we build a full stack frame and call
205 * c-code to gun down the process.
206 */
207 rd %psr, %glob_tmp
208 andcc %glob_tmp, PSR_PS, %g0
209 bne spwin_bad_ustack_from_kernel
210 nop
211
212 /* Oh well, throw this one window into the per-task window
213 * buffer, the first one.
214 */
215 st %sp, [%curptr + TI_RWIN_SPTRS]
216 STORE_WINDOW(curptr + TI_REG_WINDOW)
217 restore %g0, %g0, %g0
218
219 /* LOCATION: Trap Window */
220
221 /* Back in the trap window, update winbuffer save count. */
222 mov 1, %twin_tmp
223 st %twin_tmp, [%curptr + TI_W_SAVED]
224
225 /* Compute new user window mask. What we are basically
226 * doing is taking two windows, the invalid one at trap
227 * time and the one we attempted to throw onto the users
228 * stack, and saying that everything else is an ok user
229 * window. umask = ((~(%t_wim | %wim)) & valid_wim_bits)
230 */
231 rd %wim, %twin_tmp
232 or %twin_tmp, %t_wim, %twin_tmp
233 not %twin_tmp
234spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
235 st %twin_tmp, [%curptr + TI_UWINMASK]
236
237#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
238
239 sethi %hi(STACK_OFFSET), %sp
240 or %sp, %lo(STACK_OFFSET), %sp
241 add %curptr, %sp, %sp
242
243 /* Restore the saved globals and build a pt_regs frame. */
244 mov %saved_g5, %g5
245 mov %saved_g6, %g6
246 STORE_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
247
248 sethi %hi(STACK_OFFSET), %g6
249 or %g6, %lo(STACK_OFFSET), %g6
250 sub %sp, %g6, %g6 ! curptr
251
252 /* Turn on traps and call c-code to deal with it. */
253 wr %t_psr, PSR_ET, %psr
254 nop
255 call window_overflow_fault
256 nop
257
258 /* Return from trap if C-code actually fixes things, if it
259 * doesn't then we never get this far as the process will
260 * be given the look of death from Commander Peanut.
261 */
262 b ret_trap_entry
263 clr %l6
264
265spwin_bad_ustack_from_kernel:
266 /* LOCATION: Window to be saved */
267
268 /* The kernel provoked a spill window trap, but the window we
269 * need to save is a user one and the process has trashed its
270 * stack pointer. We need to be quick, so we throw it into
271 * a per-process window buffer until we can properly handle
272 * this later on.
273 */
274 SAVE_BOLIXED_USER_STACK(curptr, glob_tmp)
275 restore %g0, %g0, %g0
276
277 /* LOCATION: Trap window */
278
279 /* Restore globals, condition codes in the %psr and
280 * return from trap. Note, restoring %g6 when returning
281 * to kernel mode is not necessarily these days. ;-)
282 */
283 mov %saved_g5, %g5
284 mov %saved_g6, %g6
285
286 wr %t_psr, 0x0, %psr
287 WRITE_PAUSE
288
289 jmp %t_pc
290 rett %t_npc
291
292/* Undefine the register macros which would only cause trouble
293 * if used below. This helps find 'stupid' coding errors that
294 * produce 'odd' behavior. The routines below are allowed to
295 * make usage of glob_tmp and t_psr so we leave them defined.
296 */
297#undef twin_tmp
298#undef curptr
299#undef t_pc
300#undef t_npc
301#undef t_wim
302#undef saved_g5
303#undef saved_g6
304
305/* Now come the per-architecture window overflow stack checking routines.
306 * As noted above %curptr cannot be touched by this routine at all.
307 */
308
309 .globl spwin_sun4c_stackchk
310spwin_sun4c_stackchk:
311 /* LOCATION: Window to be saved on the stack */
312
313 /* See if the stack is in the address space hole but first,
314 * check results of callers andcc %sp, 0x7, %g0
315 */
316 be 1f
317 sra %sp, 29, %glob_tmp
318
319 rd %psr, %glob_tmp
320 b spwin_user_stack_is_bolixed + 0x4
321 nop
322
3231:
324 add %glob_tmp, 0x1, %glob_tmp
325 andncc %glob_tmp, 0x1, %g0
326 be 1f
327 and %sp, 0xfff, %glob_tmp ! delay slot
328
329 rd %psr, %glob_tmp
330 b spwin_user_stack_is_bolixed + 0x4
331 nop
332
333 /* See if our dump area will be on more than one
334 * page.
335 */
3361:
337 add %glob_tmp, 0x38, %glob_tmp
338 andncc %glob_tmp, 0xff8, %g0
339 be spwin_sun4c_onepage ! only one page to check
340 lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
341
342spwin_sun4c_twopages:
343 /* Is first page ok permission wise? */
344 srl %glob_tmp, 29, %glob_tmp
345 cmp %glob_tmp, 0x6
346 be 1f
347 add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
348
349 rd %psr, %glob_tmp
350 b spwin_user_stack_is_bolixed + 0x4
351 nop
352
3531:
354 sra %glob_tmp, 29, %glob_tmp
355 add %glob_tmp, 0x1, %glob_tmp
356 andncc %glob_tmp, 0x1, %g0
357 be 1f
358 add %sp, 0x38, %glob_tmp
359
360 rd %psr, %glob_tmp
361 b spwin_user_stack_is_bolixed + 0x4
362 nop
363
3641:
365 lda [%glob_tmp] ASI_PTE, %glob_tmp
366
367spwin_sun4c_onepage:
368 srl %glob_tmp, 29, %glob_tmp
369 cmp %glob_tmp, 0x6 ! can user write to it?
370 be spwin_good_ustack ! success
371 nop
372
373 rd %psr, %glob_tmp
374 b spwin_user_stack_is_bolixed + 0x4
375 nop
376
377 /* This is a generic SRMMU routine. As far as I know this
378 * works for all current v8/srmmu implementations, we'll
379 * see...
380 */
381 .globl spwin_srmmu_stackchk
382spwin_srmmu_stackchk:
383 /* LOCATION: Window to be saved on the stack */
384
385 /* Because of SMP concerns and speed we play a trick.
386 * We disable fault traps in the MMU control register,
387 * Execute the stores, then check the fault registers
388 * to see what happens. I can hear Linus now
389 * "disgusting... broken hardware...".
390 *
391 * But first, check to see if the users stack has ended
392 * up in kernel vma, then we would succeed for the 'wrong'
393 * reason... ;( Note that the 'sethi' below assumes the
394 * kernel is page aligned, which should always be the case.
395 */
396 /* Check results of callers andcc %sp, 0x7, %g0 */
397 bne spwin_user_stack_is_bolixed
398 sethi %hi(PAGE_OFFSET), %glob_tmp
399 cmp %glob_tmp, %sp
400 bleu spwin_user_stack_is_bolixed
401 mov AC_M_SFSR, %glob_tmp
402
403 /* Clear the fault status and turn on the no_fault bit. */
404 lda [%glob_tmp] ASI_M_MMUREGS, %g0 ! eat SFSR
405
406 lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
407 or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
408 sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
409
410 /* Dump the registers and cross fingers. */
411 STORE_WINDOW(sp)
412
413 /* Clear the no_fault bit and check the status. */
414 andn %glob_tmp, 0x2, %glob_tmp
415 sta %glob_tmp, [%g0] ASI_M_MMUREGS
416
417 mov AC_M_SFAR, %glob_tmp
418 lda [%glob_tmp] ASI_M_MMUREGS, %g0
419
420 mov AC_M_SFSR, %glob_tmp
421 lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp
422 andcc %glob_tmp, 0x2, %g0 ! did we fault?
423 be,a spwin_finish_up + 0x4 ! cool beans, success
424 restore %g0, %g0, %g0
425
426 rd %psr, %glob_tmp
427 b spwin_user_stack_is_bolixed + 0x4 ! we faulted, ugh
428 nop
diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S
new file mode 100644
index 000000000000..d1a266bf103a
--- /dev/null
+++ b/arch/sparc/kernel/wuf.S
@@ -0,0 +1,360 @@
1/* $Id: wuf.S,v 1.39 2000/01/08 16:38:18 anton Exp $
2 * wuf.S: Window underflow trap handler for the Sparc.
3 *
4 * Copyright (C) 1995 David S. Miller
5 */
6
7#include <asm/contregs.h>
8#include <asm/page.h>
9#include <asm/ptrace.h>
10#include <asm/psr.h>
11#include <asm/smp.h>
12#include <asm/asi.h>
13#include <asm/winmacro.h>
14#include <asm/asmmacro.h>
15#include <asm/thread_info.h>
16
17/* Just like the overflow handler we define macros for registers
18 * with fixed meanings in this routine.
19 */
20#define t_psr l0
21#define t_pc l1
22#define t_npc l2
23#define t_wim l3
24/* Don't touch the above registers or else you die horribly... */
25
26/* Now macros for the available scratch registers in this routine. */
27#define twin_tmp1 l4
28#define twin_tmp2 l5
29
30#define curptr g6
31
32 .text
33 .align 4
34
35 /* The trap entry point has executed the following:
36 *
37 * rd %psr, %l0
38 * rd %wim, %l3
39 * b fill_window_entry
40 * andcc %l0, PSR_PS, %g0
41 */
42
43 /* Datum current_thread_info->uwinmask contains at all times a bitmask
44 * where if any user windows are active, at least one bit will
45 * be set in to mask. If no user windows are active, the bitmask
46 * will be all zeroes.
47 */
48
49 /* To get an idea of what has just happened to cause this
50 * trap take a look at this diagram:
51 *
52 * 1 2 3 4 <-- Window number
53 * ----------
54 * T O W I <-- Symbolic name
55 *
56 * O == the window that execution was in when
57 * the restore was attempted
58 *
59 * T == the trap itself has save'd us into this
60 * window
61 *
62 * W == this window is the one which is now invalid
63 * and must be made valid plus loaded from the
64 * stack
65 *
66 * I == this window will be the invalid one when we
67 * are done and return from trap if successful
68 */
69
70 /* BEGINNING OF PATCH INSTRUCTIONS */
71
72 /* On 7-window Sparc the boot code patches fnwin_patch1
73 * with the following instruction.
74 */
75 .globl fnwin_patch1_7win, fnwin_patch2_7win
76fnwin_patch1_7win: srl %t_wim, 6, %twin_tmp2
77fnwin_patch2_7win: and %twin_tmp1, 0x7f, %twin_tmp1
78 /* END OF PATCH INSTRUCTIONS */
79
80 .globl fill_window_entry, fnwin_patch1, fnwin_patch2
81fill_window_entry:
82 /* LOCATION: Window 'T' */
83
84 /* Compute what the new %wim is going to be if we retrieve
85 * the proper window off of the stack.
86 */
87 sll %t_wim, 1, %twin_tmp1
88fnwin_patch1: srl %t_wim, 7, %twin_tmp2
89 or %twin_tmp1, %twin_tmp2, %twin_tmp1
90fnwin_patch2: and %twin_tmp1, 0xff, %twin_tmp1
91
92 wr %twin_tmp1, 0x0, %wim /* Make window 'I' invalid */
93
94 andcc %t_psr, PSR_PS, %g0
95 be fwin_from_user
96 restore %g0, %g0, %g0 /* Restore to window 'O' */
97
98 /* Trapped from kernel, we trust that the kernel does not
99 * 'over restore' sorta speak and just grab the window
100 * from the stack and return. Easy enough.
101 */
102fwin_from_kernel:
103 /* LOCATION: Window 'O' */
104
105 restore %g0, %g0, %g0
106
107 /* LOCATION: Window 'W' */
108
109 LOAD_WINDOW(sp) /* Load it up */
110
111 /* Spin the wheel... */
112 save %g0, %g0, %g0
113 save %g0, %g0, %g0
114 /* I'd like to buy a vowel please... */
115
116 /* LOCATION: Window 'T' */
117
118 /* Now preserve the condition codes in %psr, pause, and
119 * return from trap. This is the simplest case of all.
120 */
121 wr %t_psr, 0x0, %psr
122 WRITE_PAUSE
123
124 jmp %t_pc
125 rett %t_npc
126
127fwin_from_user:
128 /* LOCATION: Window 'O' */
129
130 restore %g0, %g0, %g0 /* Restore to window 'W' */
131
132 /* LOCATION: Window 'W' */
133
134 /* Branch to the architecture specific stack validation
135 * routine. They can be found below...
136 */
137 .globl fwin_mmu_patchme
138fwin_mmu_patchme: b sun4c_fwin_stackchk
139 andcc %sp, 0x7, %g0
140
141#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
142
143fwin_user_stack_is_bolixed:
144 /* LOCATION: Window 'W' */
145
146 /* Place a pt_regs frame on the kernel stack, save back
147 * to the trap window and call c-code to deal with this.
148 */
149 LOAD_CURRENT(l4, l5)
150
151 sethi %hi(STACK_OFFSET), %l5
152 or %l5, %lo(STACK_OFFSET), %l5
153 add %l4, %l5, %l5
154
155 /* Store globals into pt_regs frame. */
156 STORE_PT_GLOBALS(l5)
157 STORE_PT_YREG(l5, g3)
158
159 /* Save current in a global while we change windows. */
160 mov %l4, %curptr
161
162 save %g0, %g0, %g0
163
164 /* LOCATION: Window 'O' */
165
166 rd %psr, %g3 /* Read %psr in live user window */
167 mov %fp, %g4 /* Save bogus frame pointer. */
168
169 save %g0, %g0, %g0
170
171 /* LOCATION: Window 'T' */
172
173 sethi %hi(STACK_OFFSET), %l5
174 or %l5, %lo(STACK_OFFSET), %l5
175 add %curptr, %l5, %sp
176
177 /* Build rest of pt_regs. */
178 STORE_PT_INS(sp)
179 STORE_PT_PRIV(sp, t_psr, t_pc, t_npc)
180
181 /* re-set trap time %wim value */
182 wr %t_wim, 0x0, %wim
183
184 /* Fix users window mask and buffer save count. */
185 mov 0x1, %g5
186 sll %g5, %g3, %g5
187 st %g5, [%curptr + TI_UWINMASK] ! one live user window still
188 st %g0, [%curptr + TI_W_SAVED] ! no windows in the buffer
189
190 wr %t_psr, PSR_ET, %psr ! enable traps
191 nop
192 call window_underflow_fault
193 mov %g4, %o0
194
195 b ret_trap_entry
196 clr %l6
197
198fwin_user_stack_is_ok:
199 /* LOCATION: Window 'W' */
200
201 /* The users stack area is kosher and mapped, load the
202 * window and fall through to the finish up routine.
203 */
204 LOAD_WINDOW(sp)
205
206 /* Round and round she goes... */
207 save %g0, %g0, %g0 /* Save to window 'O' */
208 save %g0, %g0, %g0 /* Save to window 'T' */
209 /* Where she'll trap nobody knows... */
210
211 /* LOCATION: Window 'T' */
212
213fwin_user_finish_up:
214 /* LOCATION: Window 'T' */
215
216 wr %t_psr, 0x0, %psr
217 WRITE_PAUSE
218
219 jmp %t_pc
220 rett %t_npc
221
222 /* Here come the architecture specific checks for stack.
223 * mappings. Note that unlike the window overflow handler
224 * we only need to check whether the user can read from
225 * the appropriate addresses. Also note that we are in
226 * an invalid window which will be loaded, and this means
227 * that until we actually load the window up we are free
228 * to use any of the local registers contained within.
229 *
230 * On success these routine branch to fwin_user_stack_is_ok
231 * if the area at %sp is user readable and the window still
232 * needs to be loaded, else fwin_user_finish_up if the
233 * routine has done the loading itself. On failure (bogus
234 * user stack) the routine shall branch to the label called
235 * fwin_user_stack_is_bolixed.
236 *
237 * Contrary to the arch-specific window overflow stack
238 * check routines in wof.S, these routines are free to use
239 * any of the local registers they want to as this window
240 * does not belong to anyone at this point, however the
241 * outs and ins are still verboten as they are part of
242 * 'someone elses' window possibly.
243 */
244
245 .align 4
246 .globl sun4c_fwin_stackchk
247sun4c_fwin_stackchk:
248 /* LOCATION: Window 'W' */
249
250 /* Caller did 'andcc %sp, 0x7, %g0' */
251 be 1f
252 and %sp, 0xfff, %l0 ! delay slot
253
254 b,a fwin_user_stack_is_bolixed
255
256 /* See if we have to check the sanity of one page or two */
2571:
258 add %l0, 0x38, %l0
259 sra %sp, 29, %l5
260 add %l5, 0x1, %l5
261 andncc %l5, 0x1, %g0
262 be 1f
263 andncc %l0, 0xff8, %g0
264
265 b,a fwin_user_stack_is_bolixed /* %sp is in vma hole, yuck */
266
2671:
268 be sun4c_fwin_onepage /* Only one page to check */
269 lda [%sp] ASI_PTE, %l1
270sun4c_fwin_twopages:
271 add %sp, 0x38, %l0
272 sra %l0, 29, %l5
273 add %l5, 0x1, %l5
274 andncc %l5, 0x1, %g0
275 be 1f
276 lda [%l0] ASI_PTE, %l1
277
278 b,a fwin_user_stack_is_bolixed /* Second page in vma hole */
279
2801:
281 srl %l1, 29, %l1
282 andcc %l1, 0x4, %g0
283 bne sun4c_fwin_onepage
284 lda [%sp] ASI_PTE, %l1
285
286 b,a fwin_user_stack_is_bolixed /* Second page has bad perms */
287
288sun4c_fwin_onepage:
289 srl %l1, 29, %l1
290 andcc %l1, 0x4, %g0
291 bne fwin_user_stack_is_ok
292 nop
293
294 /* A page had bad page permissions, losing... */
295 b,a fwin_user_stack_is_bolixed
296
297 .globl srmmu_fwin_stackchk
298srmmu_fwin_stackchk:
299 /* LOCATION: Window 'W' */
300
301 /* Caller did 'andcc %sp, 0x7, %g0' */
302 bne fwin_user_stack_is_bolixed
303 sethi %hi(PAGE_OFFSET), %l5
304
305 /* Check if the users stack is in kernel vma, then our
306 * trial and error technique below would succeed for
307 * the 'wrong' reason.
308 */
309 mov AC_M_SFSR, %l4
310 cmp %l5, %sp
311 bleu fwin_user_stack_is_bolixed
312 lda [%l4] ASI_M_MMUREGS, %g0 ! clear fault status
313
314 /* The technique is, turn off faults on this processor,
315 * just let the load rip, then check the sfsr to see if
316 * a fault did occur. Then we turn on fault traps again
317 * and branch conditionally based upon what happened.
318 */
319 lda [%g0] ASI_M_MMUREGS, %l5 ! read mmu-ctrl reg
320 or %l5, 0x2, %l5 ! turn on no-fault bit
321 sta %l5, [%g0] ASI_M_MMUREGS ! store it
322
323 /* Cross fingers and go for it. */
324 LOAD_WINDOW(sp)
325
326 /* A penny 'saved'... */
327 save %g0, %g0, %g0
328 save %g0, %g0, %g0
329 /* Is a BADTRAP earned... */
330
331 /* LOCATION: Window 'T' */
332
333 lda [%g0] ASI_M_MMUREGS, %twin_tmp1 ! load mmu-ctrl again
334 andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit
335 sta %twin_tmp1, [%g0] ASI_M_MMUREGS ! store it
336
337 mov AC_M_SFAR, %twin_tmp2
338 lda [%twin_tmp2] ASI_M_MMUREGS, %g0 ! read fault address
339
340 mov AC_M_SFSR, %twin_tmp2
341 lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2 ! read fault status
342 andcc %twin_tmp2, 0x2, %g0 ! did fault occur?
343
344 bne 1f ! yep, cleanup
345 nop
346
347 wr %t_psr, 0x0, %psr
348 nop
349 b fwin_user_finish_up + 0x4
350 nop
351
352 /* Did I ever tell you about my window lobotomy?
353 * anyways... fwin_user_stack_is_bolixed expects
354 * to be in window 'W' so make it happy or else
355 * we watchdog badly.
356 */
3571:
358 restore %g0, %g0, %g0
359 b fwin_user_stack_is_bolixed ! oh well
360 restore %g0, %g0, %g0