aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/mips/kernel
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile65
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c119
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c139
-rw-r--r--arch/mips/kernel/branch.c199
-rw-r--r--arch/mips/kernel/cpu-bugs64.c321
-rw-r--r--arch/mips/kernel/cpu-probe.c598
-rw-r--r--arch/mips/kernel/entry.S155
-rw-r--r--arch/mips/kernel/gdb-low.S370
-rw-r--r--arch/mips/kernel/gdb-stub.c1091
-rw-r--r--arch/mips/kernel/genex.S302
-rw-r--r--arch/mips/kernel/genrtc.c64
-rw-r--r--arch/mips/kernel/head.S221
-rw-r--r--arch/mips/kernel/i8259.c331
-rw-r--r--arch/mips/kernel/init_task.c42
-rw-r--r--arch/mips/kernel/ioctl32.c58
-rw-r--r--arch/mips/kernel/irix5sys.S1041
-rw-r--r--arch/mips/kernel/irixelf.c1326
-rw-r--r--arch/mips/kernel/irixinv.c77
-rw-r--r--arch/mips/kernel/irixioctl.c261
-rw-r--r--arch/mips/kernel/irixsig.c853
-rw-r--r--arch/mips/kernel/irq-msc01.c189
-rw-r--r--arch/mips/kernel/irq-mv6434x.c161
-rw-r--r--arch/mips/kernel/irq-rm7000.c98
-rw-r--r--arch/mips/kernel/irq-rm9000.c149
-rw-r--r--arch/mips/kernel/irq.c140
-rw-r--r--arch/mips/kernel/irq_cpu.c118
-rw-r--r--arch/mips/kernel/linux32.c1469
-rw-r--r--arch/mips/kernel/mips_ksyms.c67
-rw-r--r--arch/mips/kernel/module-elf32.c250
-rw-r--r--arch/mips/kernel/module-elf64.c274
-rw-r--r--arch/mips/kernel/module.c53
-rw-r--r--arch/mips/kernel/offset.c314
-rw-r--r--arch/mips/kernel/proc.c149
-rw-r--r--arch/mips/kernel/process.c364
-rw-r--r--arch/mips/kernel/ptrace.c338
-rw-r--r--arch/mips/kernel/ptrace32.c285
-rw-r--r--arch/mips/kernel/r2300_fpu.S126
-rw-r--r--arch/mips/kernel/r2300_switch.S174
-rw-r--r--arch/mips/kernel/r4k_fpu.S191
-rw-r--r--arch/mips/kernel/r4k_switch.S221
-rw-r--r--arch/mips/kernel/r6000_fpu.S87
-rw-r--r--arch/mips/kernel/reset.c43
-rw-r--r--arch/mips/kernel/scall32-o32.S641
-rw-r--r--arch/mips/kernel/scall64-64.S451
-rw-r--r--arch/mips/kernel/scall64-n32.S365
-rw-r--r--arch/mips/kernel/scall64-o32.S488
-rw-r--r--arch/mips/kernel/semaphore.c164
-rw-r--r--arch/mips/kernel/setup.c571
-rw-r--r--arch/mips/kernel/signal-common.h137
-rw-r--r--arch/mips/kernel/signal.c517
-rw-r--r--arch/mips/kernel/signal32.c905
-rw-r--r--arch/mips/kernel/signal_n32.c197
-rw-r--r--arch/mips/kernel/smp.c425
-rw-r--r--arch/mips/kernel/syscall.c407
-rw-r--r--arch/mips/kernel/sysirix.c2179
-rw-r--r--arch/mips/kernel/time.c755
-rw-r--r--arch/mips/kernel/traps.c1062
-rw-r--r--arch/mips/kernel/unaligned.c550
-rw-r--r--arch/mips/kernel/vmlinux.lds.S183
59 files changed, 22890 insertions, 0 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
new file mode 100644
index 000000000000..a0230ee0f7f4
--- /dev/null
+++ b/arch/mips/kernel/Makefile
@@ -0,0 +1,65 @@
1#
2# Makefile for the Linux/MIPS kernel.
3#
4
5extra-y := head.o init_task.o vmlinux.lds
6
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
8 ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \
9 time.o traps.o unaligned.o
10
11binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
12 irix5sys.o sysirix.o
13
14ifdef CONFIG_MODULES
15obj-y += mips_ksyms.o module.o
16obj-$(CONFIG_MIPS32) += module-elf32.o
17obj-$(CONFIG_MIPS64) += module-elf64.o
18endif
19
20obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
21obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
22obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
23obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o
24obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
25obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o
26obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o
27obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o
28obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o
29obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o
30obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o
31obj-$(CONFIG_CPU_RM9000) += r4k_fpu.o r4k_switch.o
32obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o
33obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o
34obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o
35obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o
36obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o
37obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
38
39obj-$(CONFIG_SMP) += smp.o
40
41obj-$(CONFIG_NO_ISA) += dma-no-isa.o
42obj-$(CONFIG_I8259) += i8259.o
43obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
44obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
45obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
46obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o
47
48obj-$(CONFIG_MIPS32) += scall32-o32.o
49obj-$(CONFIG_MIPS64) += scall64-64.o
50obj-$(CONFIG_BINFMT_IRIX) += binfmt_irix.o
51obj-$(CONFIG_MIPS32_COMPAT) += ioctl32.o linux32.o signal32.o
52obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
53obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o ptrace32.o
54
55obj-$(CONFIG_KGDB) += gdb-low.o gdb-stub.o
56obj-$(CONFIG_PROC_FS) += proc.o
57
58obj-$(CONFIG_MIPS64) += cpu-bugs64.o
59
60obj-$(CONFIG_GEN_RTC) += genrtc.o
61
62CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
63CFLAGS_ioctl32.o += -Ifs/
64
65EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
new file mode 100644
index 000000000000..ed47041f3030
--- /dev/null
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -0,0 +1,119 @@
1/*
2 * Support for n32 Linux/MIPS ELF binaries.
3 *
4 * Copyright (C) 1999, 2001 Ralf Baechle
5 * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
6 *
7 * Heavily inspired by the 32-bit Sparc compat code which is
8 * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
9 * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
10 */
11
12#define ELF_ARCH EM_MIPS
13#define ELF_CLASS ELFCLASS32
14#ifdef __MIPSEB__
15#define ELF_DATA ELFDATA2MSB;
16#else /* __MIPSEL__ */
17#define ELF_DATA ELFDATA2LSB;
18#endif
19
20/* ELF register definitions */
21#define ELF_NGREG 45
22#define ELF_NFPREG 33
23
24typedef unsigned long elf_greg_t;
25typedef elf_greg_t elf_gregset_t[ELF_NGREG];
26
27typedef double elf_fpreg_t;
28typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
29
30/*
31 * This is used to ensure we don't load something for the wrong architecture.
32 */
33#define elf_check_arch(hdr) \
34({ \
35 int __res = 1; \
36 struct elfhdr *__h = (hdr); \
37 \
38 if (__h->e_machine != EM_MIPS) \
39 __res = 0; \
40 if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
41 __res = 0; \
42 if (((__h->e_flags & EF_MIPS_ABI2) == 0) || \
43 ((__h->e_flags & EF_MIPS_ABI) != 0)) \
44 __res = 0; \
45 \
46 __res; \
47})
48
49#define TASK32_SIZE 0x7fff8000UL
50#undef ELF_ET_DYN_BASE
51#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
52
53#include <asm/processor.h>
54#include <linux/module.h>
55#include <linux/config.h>
56#include <linux/elfcore.h>
57#include <linux/compat.h>
58
59#define elf_prstatus elf_prstatus32
60struct elf_prstatus32
61{
62 struct elf_siginfo pr_info; /* Info associated with signal */
63 short pr_cursig; /* Current signal */
64 unsigned int pr_sigpend; /* Set of pending signals */
65 unsigned int pr_sighold; /* Set of held signals */
66 pid_t pr_pid;
67 pid_t pr_ppid;
68 pid_t pr_pgrp;
69 pid_t pr_sid;
70 struct compat_timeval pr_utime; /* User time */
71 struct compat_timeval pr_stime; /* System time */
72 struct compat_timeval pr_cutime;/* Cumulative user time */
73 struct compat_timeval pr_cstime;/* Cumulative system time */
74 elf_gregset_t pr_reg; /* GP registers */
75 int pr_fpvalid; /* True if math co-processor being used. */
76};
77
78#define elf_prpsinfo elf_prpsinfo32
79struct elf_prpsinfo32
80{
81 char pr_state; /* numeric process state */
82 char pr_sname; /* char for pr_state */
83 char pr_zomb; /* zombie */
84 char pr_nice; /* nice val */
85 unsigned int pr_flag; /* flags */
86 __kernel_uid_t pr_uid;
87 __kernel_gid_t pr_gid;
88 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
89 /* Lots missing */
90 char pr_fname[16]; /* filename of executable */
91 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
92};
93
94#define elf_addr_t u32
95#define elf_caddr_t u32
96#define init_elf_binfmt init_elfn32_binfmt
97
98#define jiffies_to_timeval jiffies_to_compat_timeval
99static __inline__ void
100jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
101{
102 /*
103 * Convert jiffies to nanoseconds and seperate with
104 * one divide.
105 */
106 u64 nsec = (u64)jiffies * TICK_NSEC;
107 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
108 value->tv_usec /= NSEC_PER_USEC;
109}
110
111#define ELF_CORE_EFLAGS EF_MIPS_ABI2
112
113MODULE_DESCRIPTION("Binary format loader for compatibility with n32 Linux/MIPS binaries");
114MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
115
116#undef MODULE_DESCRIPTION
117#undef MODULE_AUTHOR
118
119#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
new file mode 100644
index 000000000000..ee21b18c37a8
--- /dev/null
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -0,0 +1,139 @@
1/*
2 * Support for o32 Linux/MIPS ELF binaries.
3 *
4 * Copyright (C) 1999, 2001 Ralf Baechle
5 * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
6 *
7 * Heavily inspired by the 32-bit Sparc compat code which is
8 * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
9 * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
10 */
11
12#define ELF_ARCH EM_MIPS
13#define ELF_CLASS ELFCLASS32
14#ifdef __MIPSEB__
15#define ELF_DATA ELFDATA2MSB;
16#else /* __MIPSEL__ */
17#define ELF_DATA ELFDATA2LSB;
18#endif
19
20/* ELF register definitions */
21#define ELF_NGREG 45
22#define ELF_NFPREG 33
23
24typedef unsigned int elf_greg_t;
25typedef elf_greg_t elf_gregset_t[ELF_NGREG];
26
27typedef double elf_fpreg_t;
28typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
29
30/*
31 * This is used to ensure we don't load something for the wrong architecture.
32 */
33#define elf_check_arch(hdr) \
34({ \
35 int __res = 1; \
36 struct elfhdr *__h = (hdr); \
37 \
38 if (__h->e_machine != EM_MIPS) \
39 __res = 0; \
40 if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
41 __res = 0; \
42 if ((__h->e_flags & EF_MIPS_ABI2) != 0) \
43 __res = 0; \
44 if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
45 ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
46 __res = 0; \
47 \
48 __res; \
49})
50
51#define TASK32_SIZE 0x7fff8000UL
52#undef ELF_ET_DYN_BASE
53#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
54
55#include <asm/processor.h>
56#include <linux/module.h>
57#include <linux/config.h>
58#include <linux/elfcore.h>
59#include <linux/compat.h>
60
61#define elf_prstatus elf_prstatus32
62struct elf_prstatus32
63{
64 struct elf_siginfo pr_info; /* Info associated with signal */
65 short pr_cursig; /* Current signal */
66 unsigned int pr_sigpend; /* Set of pending signals */
67 unsigned int pr_sighold; /* Set of held signals */
68 pid_t pr_pid;
69 pid_t pr_ppid;
70 pid_t pr_pgrp;
71 pid_t pr_sid;
72 struct compat_timeval pr_utime; /* User time */
73 struct compat_timeval pr_stime; /* System time */
74 struct compat_timeval pr_cutime;/* Cumulative user time */
75 struct compat_timeval pr_cstime;/* Cumulative system time */
76 elf_gregset_t pr_reg; /* GP registers */
77 int pr_fpvalid; /* True if math co-processor being used. */
78};
79
80#define elf_prpsinfo elf_prpsinfo32
81struct elf_prpsinfo32
82{
83 char pr_state; /* numeric process state */
84 char pr_sname; /* char for pr_state */
85 char pr_zomb; /* zombie */
86 char pr_nice; /* nice val */
87 unsigned int pr_flag; /* flags */
88 __kernel_uid_t pr_uid;
89 __kernel_gid_t pr_gid;
90 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
91 /* Lots missing */
92 char pr_fname[16]; /* filename of executable */
93 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
94};
95
96#define elf_addr_t u32
97#define elf_caddr_t u32
98#define init_elf_binfmt init_elf32_binfmt
99
100#define jiffies_to_timeval jiffies_to_compat_timeval
101static __inline__ void
102jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
103{
104 /*
105 * Convert jiffies to nanoseconds and seperate with
106 * one divide.
107 */
108 u64 nsec = (u64)jiffies * TICK_NSEC;
109 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
110 value->tv_usec /= NSEC_PER_USEC;
111}
112
113#undef ELF_CORE_COPY_REGS
114#define ELF_CORE_COPY_REGS(_dest,_regs) elf32_core_copy_regs(_dest,_regs);
115
116void elf32_core_copy_regs(elf_gregset_t _dest, struct pt_regs *_regs)
117{
118 int i;
119
120 memset(_dest, 0, sizeof(elf_gregset_t));
121
122 /* XXXKW the 6 is from EF_REG0 in gdb/gdb/mips-linux-tdep.c, include/asm-mips/reg.h */
123 for (i=6; i<38; i++)
124 _dest[i] = (elf_greg_t) _regs->regs[i-6];
125 _dest[i++] = (elf_greg_t) _regs->lo;
126 _dest[i++] = (elf_greg_t) _regs->hi;
127 _dest[i++] = (elf_greg_t) _regs->cp0_epc;
128 _dest[i++] = (elf_greg_t) _regs->cp0_badvaddr;
129 _dest[i++] = (elf_greg_t) _regs->cp0_status;
130 _dest[i++] = (elf_greg_t) _regs->cp0_cause;
131}
132
133MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries");
134MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
135
136#undef MODULE_DESCRIPTION
137#undef MODULE_AUTHOR
138
139#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
new file mode 100644
index 000000000000..01117e977a7f
--- /dev/null
+++ b/arch/mips/kernel/branch.c
@@ -0,0 +1,199 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996, 97, 2000, 2001 by Ralf Baechle
7 * Copyright (C) 2001 MIPS Technologies, Inc.
8 */
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/signal.h>
12#include <asm/branch.h>
13#include <asm/cpu.h>
14#include <asm/cpu-features.h>
15#include <asm/inst.h>
16#include <asm/ptrace.h>
17#include <asm/uaccess.h>
18
19/*
20 * Compute the return address and do emulate branch simulation, if required.
21 */
22int __compute_return_epc(struct pt_regs *regs)
23{
24 unsigned int *addr, bit, fcr31;
25 long epc;
26 union mips_instruction insn;
27
28 epc = regs->cp0_epc;
29 if (epc & 3)
30 goto unaligned;
31
32 /*
33 * Read the instruction
34 */
35 addr = (unsigned int *) epc;
36 if (__get_user(insn.word, addr)) {
37 force_sig(SIGSEGV, current);
38 return -EFAULT;
39 }
40
41 regs->regs[0] = 0;
42 switch (insn.i_format.opcode) {
43 /*
44 * jr and jalr are in r_format format.
45 */
46 case spec_op:
47 switch (insn.r_format.func) {
48 case jalr_op:
49 regs->regs[insn.r_format.rd] = epc + 8;
50 /* Fall through */
51 case jr_op:
52 regs->cp0_epc = regs->regs[insn.r_format.rs];
53 break;
54 }
55 break;
56
57 /*
58 * This group contains:
59 * bltz_op, bgez_op, bltzl_op, bgezl_op,
60 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
61 */
62 case bcond_op:
63 switch (insn.i_format.rt) {
64 case bltz_op:
65 case bltzl_op:
66 if ((long)regs->regs[insn.i_format.rs] < 0)
67 epc = epc + 4 + (insn.i_format.simmediate << 2);
68 else
69 epc += 8;
70 regs->cp0_epc = epc;
71 break;
72
73 case bgez_op:
74 case bgezl_op:
75 if ((long)regs->regs[insn.i_format.rs] >= 0)
76 epc = epc + 4 + (insn.i_format.simmediate << 2);
77 else
78 epc += 8;
79 regs->cp0_epc = epc;
80 break;
81
82 case bltzal_op:
83 case bltzall_op:
84 regs->regs[31] = epc + 8;
85 if ((long)regs->regs[insn.i_format.rs] < 0)
86 epc = epc + 4 + (insn.i_format.simmediate << 2);
87 else
88 epc += 8;
89 regs->cp0_epc = epc;
90 break;
91
92 case bgezal_op:
93 case bgezall_op:
94 regs->regs[31] = epc + 8;
95 if ((long)regs->regs[insn.i_format.rs] >= 0)
96 epc = epc + 4 + (insn.i_format.simmediate << 2);
97 else
98 epc += 8;
99 regs->cp0_epc = epc;
100 break;
101 }
102 break;
103
104 /*
105 * These are unconditional and in j_format.
106 */
107 case jal_op:
108 regs->regs[31] = regs->cp0_epc + 8;
109 case j_op:
110 epc += 4;
111 epc >>= 28;
112 epc <<= 28;
113 epc |= (insn.j_format.target << 2);
114 regs->cp0_epc = epc;
115 break;
116
117 /*
118 * These are conditional and in i_format.
119 */
120 case beq_op:
121 case beql_op:
122 if (regs->regs[insn.i_format.rs] ==
123 regs->regs[insn.i_format.rt])
124 epc = epc + 4 + (insn.i_format.simmediate << 2);
125 else
126 epc += 8;
127 regs->cp0_epc = epc;
128 break;
129
130 case bne_op:
131 case bnel_op:
132 if (regs->regs[insn.i_format.rs] !=
133 regs->regs[insn.i_format.rt])
134 epc = epc + 4 + (insn.i_format.simmediate << 2);
135 else
136 epc += 8;
137 regs->cp0_epc = epc;
138 break;
139
140 case blez_op: /* not really i_format */
141 case blezl_op:
142 /* rt field assumed to be zero */
143 if ((long)regs->regs[insn.i_format.rs] <= 0)
144 epc = epc + 4 + (insn.i_format.simmediate << 2);
145 else
146 epc += 8;
147 regs->cp0_epc = epc;
148 break;
149
150 case bgtz_op:
151 case bgtzl_op:
152 /* rt field assumed to be zero */
153 if ((long)regs->regs[insn.i_format.rs] > 0)
154 epc = epc + 4 + (insn.i_format.simmediate << 2);
155 else
156 epc += 8;
157 regs->cp0_epc = epc;
158 break;
159
160 /*
161 * And now the FPA/cp1 branch instructions.
162 */
163 case cop1_op:
164 if (!cpu_has_fpu)
165 fcr31 = current->thread.fpu.soft.fcr31;
166 else
167 asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
168 bit = (insn.i_format.rt >> 2);
169 bit += (bit != 0);
170 bit += 23;
171 switch (insn.i_format.rt) {
172 case 0: /* bc1f */
173 case 2: /* bc1fl */
174 if (~fcr31 & (1 << bit))
175 epc = epc + 4 + (insn.i_format.simmediate << 2);
176 else
177 epc += 8;
178 regs->cp0_epc = epc;
179 break;
180
181 case 1: /* bc1t */
182 case 3: /* bc1tl */
183 if (fcr31 & (1 << bit))
184 epc = epc + 4 + (insn.i_format.simmediate << 2);
185 else
186 epc += 8;
187 regs->cp0_epc = epc;
188 break;
189 }
190 break;
191 }
192
193 return 0;
194
195unaligned:
196 printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
197 force_sig(SIGBUS, current);
198 return -EFAULT;
199}
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
new file mode 100644
index 000000000000..11ebe5d4c446
--- /dev/null
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -0,0 +1,321 @@
1/*
2 * Copyright (C) 2003, 2004 Maciej W. Rozycki
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/config.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/ptrace.h>
13#include <linux/stddef.h>
14
15#include <asm/bugs.h>
16#include <asm/compiler.h>
17#include <asm/cpu.h>
18#include <asm/fpu.h>
19#include <asm/mipsregs.h>
20#include <asm/system.h>
21
22static inline void align_mod(const int align, const int mod)
23{
24 asm volatile(
25 ".set push\n\t"
26 ".set noreorder\n\t"
27 ".balign %0\n\t"
28 ".rept %1\n\t"
29 "nop\n\t"
30 ".endr\n\t"
31 ".set pop"
32 :
33 : "n" (align), "n" (mod));
34}
35
36static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
37 const int align, const int mod)
38{
39 unsigned long flags;
40 int m1, m2;
41 long p, s, lv1, lv2, lw;
42
43 /*
44 * We want the multiply and the shift to be isolated from the
45 * rest of the code to disable gcc optimizations. Hence the
46 * asm statements that execute nothing, but make gcc not know
47 * what the values of m1, m2 and s are and what lv2 and p are
48 * used for.
49 */
50
51 local_irq_save(flags);
52 /*
53 * The following code leads to a wrong result of the first
54 * dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId
55 * 00000422 or 00000430, respectively).
56 *
57 * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
58 * 3.0" by MIPS Technologies, Inc., errata #16 and #28 for
59 * details. I got no permission to duplicate them here,
60 * sigh... --macro
61 */
62 asm volatile(
63 ""
64 : "=r" (m1), "=r" (m2), "=r" (s)
65 : "0" (5), "1" (8), "2" (5));
66 align_mod(align, mod);
67 /*
68 * The trailing nop is needed to fullfill the two-instruction
69 * requirement between reading hi/lo and staring a mult/div.
70 * Leaving it out may cause gas insert a nop itself breaking
71 * the desired alignment of the next chunk.
72 */
73 asm volatile(
74 ".set push\n\t"
75 ".set noat\n\t"
76 ".set noreorder\n\t"
77 ".set nomacro\n\t"
78 "mult %2, %3\n\t"
79 "dsll32 %0, %4, %5\n\t"
80 "mflo $0\n\t"
81 "dsll32 %1, %4, %5\n\t"
82 "nop\n\t"
83 ".set pop"
84 : "=&r" (lv1), "=r" (lw)
85 : "r" (m1), "r" (m2), "r" (s), "I" (0)
86 : "hi", "lo", GCC_REG_ACCUM);
87 /* We have to use single integers for m1 and m2 and a double
88 * one for p to be sure the mulsidi3 gcc's RTL multiplication
89 * instruction has the workaround applied. Older versions of
90 * gcc have correct umulsi3 and mulsi3, but other
91 * multiplication variants lack the workaround.
92 */
93 asm volatile(
94 ""
95 : "=r" (m1), "=r" (m2), "=r" (s)
96 : "0" (m1), "1" (m2), "2" (s));
97 align_mod(align, mod);
98 p = m1 * m2;
99 lv2 = s << 32;
100 asm volatile(
101 ""
102 : "=r" (lv2)
103 : "0" (lv2), "r" (p));
104 local_irq_restore(flags);
105
106 *v1 = lv1;
107 *v2 = lv2;
108 *w = lw;
109}
110
111static inline void check_mult_sh(void)
112{
113 long v1[8], v2[8], w[8];
114 int bug, fix, i;
115
116 printk("Checking for the multiply/shift bug... ");
117
118 /*
119 * Testing discovered false negatives for certain code offsets
120 * into cache lines. Hence we test all possible offsets for
121 * the worst assumption of an R4000 I-cache line width of 32
122 * bytes.
123 *
124 * We can't use a loop as alignment directives need to be
125 * immediates.
126 */
127 mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
128 mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
129 mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
130 mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
131 mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
132 mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
133 mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
134 mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
135
136 bug = 0;
137 for (i = 0; i < 8; i++)
138 if (v1[i] != w[i])
139 bug = 1;
140
141 if (bug == 0) {
142 printk("no.\n");
143 return;
144 }
145
146 printk("yes, workaround... ");
147
148 fix = 1;
149 for (i = 0; i < 8; i++)
150 if (v2[i] != w[i])
151 fix = 0;
152
153 if (fix == 1) {
154 printk("yes.\n");
155 return;
156 }
157
158 printk("no.\n");
159 panic("Reliable operation impossible!\n"
160#ifndef CONFIG_CPU_R4000
161 "Configure for R4000 to enable the workaround."
162#else
163 "Please report to <linux-mips@linux-mips.org>."
164#endif
165 );
166}
167
168static volatile int daddi_ov __initdata = 0;
169
170asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
171{
172 daddi_ov = 1;
173 regs->cp0_epc += 4;
174}
175
176static inline void check_daddi(void)
177{
178 extern asmlinkage void handle_daddi_ov(void);
179 unsigned long flags;
180 void *handler;
181 long v, tmp;
182
183 printk("Checking for the daddi bug... ");
184
185 local_irq_save(flags);
186 handler = set_except_vector(12, handle_daddi_ov);
187 /*
188 * The following code fails to trigger an overflow exception
189 * when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or
190 * 00000430, respectively).
191 *
192 * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
193 * 3.0" by MIPS Technologies, Inc., erratum #23 for details.
194 * I got no permission to duplicate it here, sigh... --macro
195 */
196 asm volatile(
197 ".set push\n\t"
198 ".set noat\n\t"
199 ".set noreorder\n\t"
200 ".set nomacro\n\t"
201 "addiu %1, $0, %2\n\t"
202 "dsrl %1, %1, 1\n\t"
203#ifdef HAVE_AS_SET_DADDI
204 ".set daddi\n\t"
205#endif
206 "daddi %0, %1, %3\n\t"
207 ".set pop"
208 : "=r" (v), "=&r" (tmp)
209 : "I" (0xffffffffffffdb9a), "I" (0x1234));
210 set_except_vector(12, handler);
211 local_irq_restore(flags);
212
213 if (daddi_ov) {
214 printk("no.\n");
215 return;
216 }
217
218 printk("yes, workaround... ");
219
220 local_irq_save(flags);
221 handler = set_except_vector(12, handle_daddi_ov);
222 asm volatile(
223 "addiu %1, $0, %2\n\t"
224 "dsrl %1, %1, 1\n\t"
225 "daddi %0, %1, %3"
226 : "=r" (v), "=&r" (tmp)
227 : "I" (0xffffffffffffdb9a), "I" (0x1234));
228 set_except_vector(12, handler);
229 local_irq_restore(flags);
230
231 if (daddi_ov) {
232 printk("yes.\n");
233 return;
234 }
235
236 printk("no.\n");
237 panic("Reliable operation impossible!\n"
238#if !defined(CONFIG_CPU_R4000) && !defined(CONFIG_CPU_R4400)
239 "Configure for R4000 or R4400 to enable the workaround."
240#else
241 "Please report to <linux-mips@linux-mips.org>."
242#endif
243 );
244}
245
246static inline void check_daddiu(void)
247{
248 long v, w, tmp;
249
250 printk("Checking for the daddiu bug... ");
251
252 /*
253 * The following code leads to a wrong result of daddiu when
254 * executed on R4400 rev. 1.0 (PRId 00000440).
255 *
256 * See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by
257 * MIPS Technologies, Inc., erratum #7 for details.
258 *
259 * According to "MIPS R4000PC/SC Errata, Processor Revision
260 * 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this
261 * problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and
262 * 00000430, respectively), too. Testing failed to trigger it
263 * so far.
264 *
265 * I got no permission to duplicate the errata here, sigh...
266 * --macro
267 */
268 asm volatile(
269 ".set push\n\t"
270 ".set noat\n\t"
271 ".set noreorder\n\t"
272 ".set nomacro\n\t"
273 "addiu %2, $0, %3\n\t"
274 "dsrl %2, %2, 1\n\t"
275#ifdef HAVE_AS_SET_DADDI
276 ".set daddi\n\t"
277#endif
278 "daddiu %0, %2, %4\n\t"
279 "addiu %1, $0, %4\n\t"
280 "daddu %1, %2\n\t"
281 ".set pop"
282 : "=&r" (v), "=&r" (w), "=&r" (tmp)
283 : "I" (0xffffffffffffdb9a), "I" (0x1234));
284
285 if (v == w) {
286 printk("no.\n");
287 return;
288 }
289
290 printk("yes, workaround... ");
291
292 asm volatile(
293 "addiu %2, $0, %3\n\t"
294 "dsrl %2, %2, 1\n\t"
295 "daddiu %0, %2, %4\n\t"
296 "addiu %1, $0, %4\n\t"
297 "daddu %1, %2"
298 : "=&r" (v), "=&r" (w), "=&r" (tmp)
299 : "I" (0xffffffffffffdb9a), "I" (0x1234));
300
301 if (v == w) {
302 printk("yes.\n");
303 return;
304 }
305
306 printk("no.\n");
307 panic("Reliable operation impossible!\n"
308#if !defined(CONFIG_CPU_R4000) && !defined(CONFIG_CPU_R4400)
309 "Configure for R4000 or R4400 to enable the workaround."
310#else
311 "Please report to <linux-mips@linux-mips.org>."
312#endif
313 );
314}
315
316void __init check_bugs64(void)
317{
318 check_mult_sh();
319 check_daddi();
320 check_daddiu();
321}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
new file mode 100644
index 000000000000..4bb849582314
--- /dev/null
+++ b/arch/mips/kernel/cpu-probe.c
@@ -0,0 +1,598 @@
1/*
2 * Processor capabilities determination functions.
3 *
4 * Copyright (C) xxxx the Anonymous
5 * Copyright (C) 2003 Maciej W. Rozycki
6 * Copyright (C) 1994 - 2003 Ralf Baechle
7 * Copyright (C) 2001 MIPS Inc.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14#include <linux/config.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/ptrace.h>
18#include <linux/stddef.h>
19
20#include <asm/bugs.h>
21#include <asm/cpu.h>
22#include <asm/fpu.h>
23#include <asm/mipsregs.h>
24#include <asm/system.h>
25
26/*
27 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
28 * the implementation of the "wait" feature differs between CPU families. This
29 * points to the function that implements CPU specific wait.
30 * The wait instruction stops the pipeline and reduces the power consumption of
31 * the CPU very much.
32 */
33void (*cpu_wait)(void) = NULL;
34
35static void r3081_wait(void)
36{
37 unsigned long cfg = read_c0_conf();
38 write_c0_conf(cfg | R30XX_CONF_HALT);
39}
40
41static void r39xx_wait(void)
42{
43 unsigned long cfg = read_c0_conf();
44 write_c0_conf(cfg | TX39_CONF_HALT);
45}
46
47static void r4k_wait(void)
48{
49 __asm__(".set\tmips3\n\t"
50 "wait\n\t"
51 ".set\tmips0");
52}
53
54/*
55 * The Au1xxx wait is available only if we run CONFIG_PM and
56 * the timer setup found we had a 32KHz counter available.
57 * There are still problems with functions that may call au1k_wait
58 * directly, but that will be discovered pretty quickly.
59 */
60extern void (*au1k_wait_ptr)(void);
61
62void au1k_wait(void)
63{
64#ifdef CONFIG_PM
65 /* using the wait instruction makes CP0 counter unusable */
66 __asm__(".set\tmips3\n\t"
67 "wait\n\t"
68 "nop\n\t"
69 "nop\n\t"
70 "nop\n\t"
71 "nop\n\t"
72 ".set\tmips0");
73#else
74 __asm__("nop\n\t"
75 "nop");
76#endif
77}
78
79static inline void check_wait(void)
80{
81 struct cpuinfo_mips *c = &current_cpu_data;
82
83 printk("Checking for 'wait' instruction... ");
84 switch (c->cputype) {
85 case CPU_R3081:
86 case CPU_R3081E:
87 cpu_wait = r3081_wait;
88 printk(" available.\n");
89 break;
90 case CPU_TX3927:
91 cpu_wait = r39xx_wait;
92 printk(" available.\n");
93 break;
94 case CPU_R4200:
95/* case CPU_R4300: */
96 case CPU_R4600:
97 case CPU_R4640:
98 case CPU_R4650:
99 case CPU_R4700:
100 case CPU_R5000:
101 case CPU_NEVADA:
102 case CPU_RM7000:
103 case CPU_RM9000:
104 case CPU_TX49XX:
105 case CPU_4KC:
106 case CPU_4KEC:
107 case CPU_4KSC:
108 case CPU_5KC:
109/* case CPU_20KC:*/
110 case CPU_24K:
111 case CPU_25KF:
112 cpu_wait = r4k_wait;
113 printk(" available.\n");
114 break;
115#ifdef CONFIG_PM
116 case CPU_AU1000:
117 case CPU_AU1100:
118 case CPU_AU1500:
119 if (au1k_wait_ptr != NULL) {
120 cpu_wait = au1k_wait_ptr;
121 printk(" available.\n");
122 }
123 else {
124 printk(" unavailable.\n");
125 }
126 break;
127#endif
128 default:
129 printk(" unavailable.\n");
130 break;
131 }
132}
133
134void __init check_bugs32(void)
135{
136 check_wait();
137}
138
139/*
140 * Probe whether cpu has config register by trying to play with
141 * alternate cache bit and see whether it matters.
142 * It's used by cpu_probe to distinguish between R3000A and R3081.
143 */
144static inline int cpu_has_confreg(void)
145{
146#ifdef CONFIG_CPU_R3000
147 extern unsigned long r3k_cache_size(unsigned long);
148 unsigned long size1, size2;
149 unsigned long cfg = read_c0_conf();
150
151 size1 = r3k_cache_size(ST0_ISC);
152 write_c0_conf(cfg ^ R30XX_CONF_AC);
153 size2 = r3k_cache_size(ST0_ISC);
154 write_c0_conf(cfg);
155 return size1 != size2;
156#else
157 return 0;
158#endif
159}
160
161/*
162 * Get the FPU Implementation/Revision.
163 */
164static inline unsigned long cpu_get_fpu_id(void)
165{
166 unsigned long tmp, fpu_id;
167
168 tmp = read_c0_status();
169 __enable_fpu();
170 fpu_id = read_32bit_cp1_register(CP1_REVISION);
171 write_c0_status(tmp);
172 return fpu_id;
173}
174
175/*
176 * Check the CPU has an FPU the official way.
177 */
178static inline int __cpu_has_fpu(void)
179{
180 return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE);
181}
182
183#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4KTLB \
184 | MIPS_CPU_COUNTER)
185
186static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
187{
188 switch (c->processor_id & 0xff00) {
189 case PRID_IMP_R2000:
190 c->cputype = CPU_R2000;
191 c->isa_level = MIPS_CPU_ISA_I;
192 c->options = MIPS_CPU_TLB | MIPS_CPU_NOFPUEX;
193 if (__cpu_has_fpu())
194 c->options |= MIPS_CPU_FPU;
195 c->tlbsize = 64;
196 break;
197 case PRID_IMP_R3000:
198 if ((c->processor_id & 0xff) == PRID_REV_R3000A)
199 if (cpu_has_confreg())
200 c->cputype = CPU_R3081E;
201 else
202 c->cputype = CPU_R3000A;
203 else
204 c->cputype = CPU_R3000;
205 c->isa_level = MIPS_CPU_ISA_I;
206 c->options = MIPS_CPU_TLB | MIPS_CPU_NOFPUEX;
207 if (__cpu_has_fpu())
208 c->options |= MIPS_CPU_FPU;
209 c->tlbsize = 64;
210 break;
211 case PRID_IMP_R4000:
212 if (read_c0_config() & CONF_SC) {
213 if ((c->processor_id & 0xff) >= PRID_REV_R4400)
214 c->cputype = CPU_R4400PC;
215 else
216 c->cputype = CPU_R4000PC;
217 } else {
218 if ((c->processor_id & 0xff) >= PRID_REV_R4400)
219 c->cputype = CPU_R4400SC;
220 else
221 c->cputype = CPU_R4000SC;
222 }
223
224 c->isa_level = MIPS_CPU_ISA_III;
225 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
226 MIPS_CPU_WATCH | MIPS_CPU_VCE |
227 MIPS_CPU_LLSC;
228 c->tlbsize = 48;
229 break;
230 case PRID_IMP_VR41XX:
231 switch (c->processor_id & 0xf0) {
232#ifndef CONFIG_VR4181
233 case PRID_REV_VR4111:
234 c->cputype = CPU_VR4111;
235 break;
236#else
237 case PRID_REV_VR4181:
238 c->cputype = CPU_VR4181;
239 break;
240#endif
241 case PRID_REV_VR4121:
242 c->cputype = CPU_VR4121;
243 break;
244 case PRID_REV_VR4122:
245 if ((c->processor_id & 0xf) < 0x3)
246 c->cputype = CPU_VR4122;
247 else
248 c->cputype = CPU_VR4181A;
249 break;
250 case PRID_REV_VR4130:
251 if ((c->processor_id & 0xf) < 0x4)
252 c->cputype = CPU_VR4131;
253 else
254 c->cputype = CPU_VR4133;
255 break;
256 default:
257 printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
258 c->cputype = CPU_VR41XX;
259 break;
260 }
261 c->isa_level = MIPS_CPU_ISA_III;
262 c->options = R4K_OPTS;
263 c->tlbsize = 32;
264 break;
265 case PRID_IMP_R4300:
266 c->cputype = CPU_R4300;
267 c->isa_level = MIPS_CPU_ISA_III;
268 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
269 MIPS_CPU_LLSC;
270 c->tlbsize = 32;
271 break;
272 case PRID_IMP_R4600:
273 c->cputype = CPU_R4600;
274 c->isa_level = MIPS_CPU_ISA_III;
275 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
276 c->tlbsize = 48;
277 break;
278 #if 0
279 case PRID_IMP_R4650:
280 /*
281 * This processor doesn't have an MMU, so it's not
282 * "real easy" to run Linux on it. It is left purely
283 * for documentation. Commented out because it shares
284 * it's c0_prid id number with the TX3900.
285 */
286 c->cputype = CPU_R4650;
287 c->isa_level = MIPS_CPU_ISA_III;
288 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
289 c->tlbsize = 48;
290 break;
291 #endif
292 case PRID_IMP_TX39:
293 c->isa_level = MIPS_CPU_ISA_I;
294 c->options = MIPS_CPU_TLB;
295
296 if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
297 c->cputype = CPU_TX3927;
298 c->tlbsize = 64;
299 } else {
300 switch (c->processor_id & 0xff) {
301 case PRID_REV_TX3912:
302 c->cputype = CPU_TX3912;
303 c->tlbsize = 32;
304 break;
305 case PRID_REV_TX3922:
306 c->cputype = CPU_TX3922;
307 c->tlbsize = 64;
308 break;
309 default:
310 c->cputype = CPU_UNKNOWN;
311 break;
312 }
313 }
314 break;
315 case PRID_IMP_R4700:
316 c->cputype = CPU_R4700;
317 c->isa_level = MIPS_CPU_ISA_III;
318 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
319 MIPS_CPU_LLSC;
320 c->tlbsize = 48;
321 break;
322 case PRID_IMP_TX49:
323 c->cputype = CPU_TX49XX;
324 c->isa_level = MIPS_CPU_ISA_III;
325 c->options = R4K_OPTS | MIPS_CPU_LLSC;
326 if (!(c->processor_id & 0x08))
327 c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
328 c->tlbsize = 48;
329 break;
330 case PRID_IMP_R5000:
331 c->cputype = CPU_R5000;
332 c->isa_level = MIPS_CPU_ISA_IV;
333 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
334 MIPS_CPU_LLSC;
335 c->tlbsize = 48;
336 break;
337 case PRID_IMP_R5432:
338 c->cputype = CPU_R5432;
339 c->isa_level = MIPS_CPU_ISA_IV;
340 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
341 MIPS_CPU_WATCH | MIPS_CPU_LLSC;
342 c->tlbsize = 48;
343 break;
344 case PRID_IMP_R5500:
345 c->cputype = CPU_R5500;
346 c->isa_level = MIPS_CPU_ISA_IV;
347 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
348 MIPS_CPU_WATCH | MIPS_CPU_LLSC;
349 c->tlbsize = 48;
350 break;
351 case PRID_IMP_NEVADA:
352 c->cputype = CPU_NEVADA;
353 c->isa_level = MIPS_CPU_ISA_IV;
354 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
355 MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
356 c->tlbsize = 48;
357 break;
358 case PRID_IMP_R6000:
359 c->cputype = CPU_R6000;
360 c->isa_level = MIPS_CPU_ISA_II;
361 c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
362 MIPS_CPU_LLSC;
363 c->tlbsize = 32;
364 break;
365 case PRID_IMP_R6000A:
366 c->cputype = CPU_R6000A;
367 c->isa_level = MIPS_CPU_ISA_II;
368 c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
369 MIPS_CPU_LLSC;
370 c->tlbsize = 32;
371 break;
372 case PRID_IMP_RM7000:
373 c->cputype = CPU_RM7000;
374 c->isa_level = MIPS_CPU_ISA_IV;
375 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
376 MIPS_CPU_LLSC;
377 /*
378 * Undocumented RM7000: Bit 29 in the info register of
379 * the RM7000 v2.0 indicates if the TLB has 48 or 64
380 * entries.
381 *
382 * 29 1 => 64 entry JTLB
383 * 0 => 48 entry JTLB
384 */
385 c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
386 break;
387 case PRID_IMP_RM9000:
388 c->cputype = CPU_RM9000;
389 c->isa_level = MIPS_CPU_ISA_IV;
390 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
391 MIPS_CPU_LLSC;
392 /*
393 * Bit 29 in the info register of the RM9000
394 * indicates if the TLB has 48 or 64 entries.
395 *
396 * 29 1 => 64 entry JTLB
397 * 0 => 48 entry JTLB
398 */
399 c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
400 break;
401 case PRID_IMP_R8000:
402 c->cputype = CPU_R8000;
403 c->isa_level = MIPS_CPU_ISA_IV;
404 c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
405 MIPS_CPU_FPU | MIPS_CPU_32FPR |
406 MIPS_CPU_LLSC;
407 c->tlbsize = 384; /* has weird TLB: 3-way x 128 */
408 break;
409 case PRID_IMP_R10000:
410 c->cputype = CPU_R10000;
411 c->isa_level = MIPS_CPU_ISA_IV;
412 c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
413 MIPS_CPU_FPU | MIPS_CPU_32FPR |
414 MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
415 MIPS_CPU_LLSC;
416 c->tlbsize = 64;
417 break;
418 case PRID_IMP_R12000:
419 c->cputype = CPU_R12000;
420 c->isa_level = MIPS_CPU_ISA_IV;
421 c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
422 MIPS_CPU_FPU | MIPS_CPU_32FPR |
423 MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
424 MIPS_CPU_LLSC;
425 c->tlbsize = 64;
426 break;
427 }
428}
429
430static inline void decode_config1(struct cpuinfo_mips *c)
431{
432 unsigned long config0 = read_c0_config();
433 unsigned long config1;
434
435 if ((config0 & (1 << 31)) == 0)
436 return; /* actually wort a panic() */
437
438 /* MIPS32 or MIPS64 compliant CPU. Read Config 1 register. */
439 c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
440 MIPS_CPU_4KTLB | MIPS_CPU_COUNTER | MIPS_CPU_DIVEC |
441 MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
442 config1 = read_c0_config1();
443 if (config1 & (1 << 3))
444 c->options |= MIPS_CPU_WATCH;
445 if (config1 & (1 << 2))
446 c->options |= MIPS_CPU_MIPS16;
447 if (config1 & (1 << 1))
448 c->options |= MIPS_CPU_EJTAG;
449 if (config1 & 1) {
450 c->options |= MIPS_CPU_FPU;
451 c->options |= MIPS_CPU_32FPR;
452 }
453 c->scache.flags = MIPS_CACHE_NOT_PRESENT;
454
455 c->tlbsize = ((config1 >> 25) & 0x3f) + 1;
456}
457
458static inline void cpu_probe_mips(struct cpuinfo_mips *c)
459{
460 decode_config1(c);
461 switch (c->processor_id & 0xff00) {
462 case PRID_IMP_4KC:
463 c->cputype = CPU_4KC;
464 c->isa_level = MIPS_CPU_ISA_M32;
465 break;
466 case PRID_IMP_4KEC:
467 c->cputype = CPU_4KEC;
468 c->isa_level = MIPS_CPU_ISA_M32;
469 break;
470 case PRID_IMP_4KSC:
471 c->cputype = CPU_4KSC;
472 c->isa_level = MIPS_CPU_ISA_M32;
473 break;
474 case PRID_IMP_5KC:
475 c->cputype = CPU_5KC;
476 c->isa_level = MIPS_CPU_ISA_M64;
477 break;
478 case PRID_IMP_20KC:
479 c->cputype = CPU_20KC;
480 c->isa_level = MIPS_CPU_ISA_M64;
481 break;
482 case PRID_IMP_24K:
483 c->cputype = CPU_24K;
484 c->isa_level = MIPS_CPU_ISA_M32;
485 break;
486 case PRID_IMP_25KF:
487 c->cputype = CPU_25KF;
488 c->isa_level = MIPS_CPU_ISA_M64;
489 /* Probe for L2 cache */
490 c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
491 break;
492 }
493}
494
495static inline void cpu_probe_alchemy(struct cpuinfo_mips *c)
496{
497 decode_config1(c);
498 switch (c->processor_id & 0xff00) {
499 case PRID_IMP_AU1_REV1:
500 case PRID_IMP_AU1_REV2:
501 switch ((c->processor_id >> 24) & 0xff) {
502 case 0:
503 c->cputype = CPU_AU1000;
504 break;
505 case 1:
506 c->cputype = CPU_AU1500;
507 break;
508 case 2:
509 c->cputype = CPU_AU1100;
510 break;
511 case 3:
512 c->cputype = CPU_AU1550;
513 break;
514 default:
515 panic("Unknown Au Core!");
516 break;
517 }
518 c->isa_level = MIPS_CPU_ISA_M32;
519 break;
520 }
521}
522
523static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
524{
525 decode_config1(c);
526 switch (c->processor_id & 0xff00) {
527 case PRID_IMP_SB1:
528 c->cputype = CPU_SB1;
529 c->isa_level = MIPS_CPU_ISA_M64;
530 c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
531 MIPS_CPU_COUNTER | MIPS_CPU_DIVEC |
532 MIPS_CPU_MCHECK | MIPS_CPU_EJTAG |
533 MIPS_CPU_WATCH | MIPS_CPU_LLSC;
534#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
535 /* FPU in pass1 is known to have issues. */
536 c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
537#endif
538 break;
539 }
540}
541
542static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c)
543{
544 decode_config1(c);
545 switch (c->processor_id & 0xff00) {
546 case PRID_IMP_SR71000:
547 c->cputype = CPU_SR71000;
548 c->isa_level = MIPS_CPU_ISA_M64;
549 c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
550 MIPS_CPU_4KTLB | MIPS_CPU_FPU |
551 MIPS_CPU_COUNTER | MIPS_CPU_MCHECK;
552 c->scache.ways = 8;
553 c->tlbsize = 64;
554 break;
555 }
556}
557
558__init void cpu_probe(void)
559{
560 struct cpuinfo_mips *c = &current_cpu_data;
561
562 c->processor_id = PRID_IMP_UNKNOWN;
563 c->fpu_id = FPIR_IMP_NONE;
564 c->cputype = CPU_UNKNOWN;
565
566 c->processor_id = read_c0_prid();
567 switch (c->processor_id & 0xff0000) {
568 case PRID_COMP_LEGACY:
569 cpu_probe_legacy(c);
570 break;
571 case PRID_COMP_MIPS:
572 cpu_probe_mips(c);
573 break;
574 case PRID_COMP_ALCHEMY:
575 cpu_probe_alchemy(c);
576 break;
577 case PRID_COMP_SIBYTE:
578 cpu_probe_sibyte(c);
579 break;
580
581 case PRID_COMP_SANDCRAFT:
582 cpu_probe_sandcraft(c);
583 break;
584 default:
585 c->cputype = CPU_UNKNOWN;
586 }
587 if (c->options & MIPS_CPU_FPU)
588 c->fpu_id = cpu_get_fpu_id();
589}
590
591__init void cpu_report(void)
592{
593 struct cpuinfo_mips *c = &current_cpu_data;
594
595 printk("CPU revision is: %08x\n", c->processor_id);
596 if (c->options & MIPS_CPU_FPU)
597 printk("FPU revision is: %08x\n", c->fpu_id);
598}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
new file mode 100644
index 000000000000..5eb429137e06
--- /dev/null
+++ b/arch/mips/kernel/entry.S
@@ -0,0 +1,155 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 */
10#include <linux/config.h>
11
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15#include <asm/mipsregs.h>
16#include <asm/stackframe.h>
17#include <asm/isadep.h>
18#include <asm/thread_info.h>
19#include <asm/war.h>
20
21#ifdef CONFIG_PREEMPT
22 .macro preempt_stop reg=t0
23 .endm
24#else
25 .macro preempt_stop reg=t0
26 local_irq_disable \reg
27 .endm
28#define resume_kernel restore_all
29#endif
30
31 .text
32 .align 5
33FEXPORT(ret_from_exception)
34 preempt_stop
35FEXPORT(ret_from_irq)
36 LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
37 andi t0, t0, KU_USER
38 beqz t0, resume_kernel
39
40FEXPORT(resume_userspace)
41 local_irq_disable t0 # make sure we dont miss an
42 # interrupt setting need_resched
43 # between sampling and return
44 LONG_L a2, TI_FLAGS($28) # current->work
45 andi a2, _TIF_WORK_MASK # (ignoring syscall_trace)
46 bnez a2, work_pending
47 j restore_all
48
49#ifdef CONFIG_PREEMPT
50ENTRY(resume_kernel)
51 lw t0, TI_PRE_COUNT($28)
52 bnez t0, restore_all
53need_resched:
54 LONG_L t0, TI_FLAGS($28)
55 andi t1, t0, _TIF_NEED_RESCHED
56 beqz t1, restore_all
57 LONG_L t0, PT_STATUS(sp) # Interrupts off?
58 andi t0, 1
59 beqz t0, restore_all
60 li t0, PREEMPT_ACTIVE
61 sw t0, TI_PRE_COUNT($28)
62 local_irq_enable t0
63 jal schedule
64 sw zero, TI_PRE_COUNT($28)
65 local_irq_disable t0
66 b need_resched
67#endif
68
69FEXPORT(ret_from_fork)
70 jal schedule_tail # a0 = task_t *prev
71
72FEXPORT(syscall_exit)
73 local_irq_disable # make sure need_resched and
74 # signals dont change between
75 # sampling and return
76 LONG_L a2, TI_FLAGS($28) # current->work
77 li t0, _TIF_ALLWORK_MASK
78 and t0, a2, t0
79 bnez t0, syscall_exit_work
80
81FEXPORT(restore_all) # restore full frame
82 .set noat
83 RESTORE_TEMP
84 RESTORE_AT
85 RESTORE_STATIC
86FEXPORT(restore_partial) # restore partial frame
87 RESTORE_SOME
88 RESTORE_SP_AND_RET
89 .set at
90
91FEXPORT(work_pending)
92 andi t0, a2, _TIF_NEED_RESCHED
93 beqz t0, work_notifysig
94work_resched:
95 jal schedule
96
97 local_irq_disable t0 # make sure need_resched and
98 # signals dont change between
99 # sampling and return
100 LONG_L a2, TI_FLAGS($28)
101 andi t0, a2, _TIF_WORK_MASK # is there any work to be done
102 # other than syscall tracing?
103 beqz t0, restore_all
104 andi t0, a2, _TIF_NEED_RESCHED
105 bnez t0, work_resched
106
107work_notifysig: # deal with pending signals and
108 # notify-resume requests
109 move a0, sp
110 li a1, 0
111 jal do_notify_resume # a2 already loaded
112 j restore_all
113
114FEXPORT(syscall_exit_work_partial)
115 SAVE_STATIC
116FEXPORT(syscall_exit_work)
117 LONG_L t0, TI_FLAGS($28)
118 li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
119 and t0, t1
120 beqz t0, work_pending # trace bit is set
121 local_irq_enable # could let do_syscall_trace()
122 # call schedule() instead
123 move a0, sp
124 li a1, 1
125 jal do_syscall_trace
126 b resume_userspace
127
128/*
129 * Common spurious interrupt handler.
130 */
131 .text
132 .align 5
133LEAF(spurious_interrupt)
134 /*
135 * Someone tried to fool us by sending an interrupt but we
136 * couldn't find a cause for it.
137 */
138#ifdef CONFIG_SMP
139 lui t1, %hi(irq_err_count)
1401: ll t0, %lo(irq_err_count)(t1)
141 addiu t0, 1
142 sc t0, %lo(irq_err_count)(t1)
143#if R10000_LLSC_WAR
144 beqzl t0, 1b
145#else
146 beqz t0, 1b
147#endif
148#else
149 lui t1, %hi(irq_err_count)
150 lw t0, %lo(irq_err_count)(t1)
151 addiu t0, 1
152 sw t0, %lo(irq_err_count)(t1)
153#endif
154 j ret_from_irq
155 END(spurious_interrupt)
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S
new file mode 100644
index 000000000000..ece6ddaf7011
--- /dev/null
+++ b/arch/mips/kernel/gdb-low.S
@@ -0,0 +1,370 @@
1/*
2 * gdb-low.S contains the low-level trap handler for the GDB stub.
3 *
4 * Copyright (C) 1995 Andreas Busse
5 */
6#include <linux/config.h>
7#include <linux/sys.h>
8
9#include <asm/asm.h>
10#include <asm/errno.h>
11#include <asm/mipsregs.h>
12#include <asm/regdef.h>
13#include <asm/stackframe.h>
14#include <asm/gdb-stub.h>
15
16#ifdef CONFIG_MIPS32
17#define DMFC0 mfc0
18#define DMTC0 mtc0
19#define LDC1 lwc1
20#define SDC1 lwc1
21#endif
22#ifdef CONFIG_MIPS64
23#define DMFC0 dmfc0
24#define DMTC0 dmtc0
25#define LDC1 ldc1
26#define SDC1 ldc1
27#endif
28
29/*
30 * [jsun] We reserves about 2x GDB_FR_SIZE in stack. The lower (addressed)
31 * part is used to store registers and passed to exception handler.
32 * The upper part is reserved for "call func" feature where gdb client
33 * saves some of the regs, setups call frame and passes args.
34 *
35 * A trace shows about 200 bytes are used to store about half of all regs.
36 * The rest should be big enough for frame setup and passing args.
37 */
38
39/*
40 * The low level trap handler
41 */
42 .align 5
43 NESTED(trap_low, GDB_FR_SIZE, sp)
44 .set noat
45 .set noreorder
46
47 mfc0 k0, CP0_STATUS
48 sll k0, 3 /* extract cu0 bit */
49 bltz k0, 1f
50 move k1, sp
51
52 /*
53 * Called from user mode, go somewhere else.
54 */
55 lui k1, %hi(saved_vectors)
56 mfc0 k0, CP0_CAUSE
57 andi k0, k0, 0x7c
58 add k1, k1, k0
59 lw k0, %lo(saved_vectors)(k1)
60 jr k0
61 nop
621:
63 move k0, sp
64 subu sp, k1, GDB_FR_SIZE*2 # see comment above
65 LONG_S k0, GDB_FR_REG29(sp)
66 LONG_S $2, GDB_FR_REG2(sp)
67
68/*
69 * First save the CP0 and special registers
70 */
71
72 mfc0 v0, CP0_STATUS
73 LONG_S v0, GDB_FR_STATUS(sp)
74 mfc0 v0, CP0_CAUSE
75 LONG_S v0, GDB_FR_CAUSE(sp)
76 DMFC0 v0, CP0_EPC
77 LONG_S v0, GDB_FR_EPC(sp)
78 DMFC0 v0, CP0_BADVADDR
79 LONG_S v0, GDB_FR_BADVADDR(sp)
80 mfhi v0
81 LONG_S v0, GDB_FR_HI(sp)
82 mflo v0
83 LONG_S v0, GDB_FR_LO(sp)
84
85/*
86 * Now the integer registers
87 */
88
89 LONG_S zero, GDB_FR_REG0(sp) /* I know... */
90 LONG_S $1, GDB_FR_REG1(sp)
91 /* v0 already saved */
92 LONG_S $3, GDB_FR_REG3(sp)
93 LONG_S $4, GDB_FR_REG4(sp)
94 LONG_S $5, GDB_FR_REG5(sp)
95 LONG_S $6, GDB_FR_REG6(sp)
96 LONG_S $7, GDB_FR_REG7(sp)
97 LONG_S $8, GDB_FR_REG8(sp)
98 LONG_S $9, GDB_FR_REG9(sp)
99 LONG_S $10, GDB_FR_REG10(sp)
100 LONG_S $11, GDB_FR_REG11(sp)
101 LONG_S $12, GDB_FR_REG12(sp)
102 LONG_S $13, GDB_FR_REG13(sp)
103 LONG_S $14, GDB_FR_REG14(sp)
104 LONG_S $15, GDB_FR_REG15(sp)
105 LONG_S $16, GDB_FR_REG16(sp)
106 LONG_S $17, GDB_FR_REG17(sp)
107 LONG_S $18, GDB_FR_REG18(sp)
108 LONG_S $19, GDB_FR_REG19(sp)
109 LONG_S $20, GDB_FR_REG20(sp)
110 LONG_S $21, GDB_FR_REG21(sp)
111 LONG_S $22, GDB_FR_REG22(sp)
112 LONG_S $23, GDB_FR_REG23(sp)
113 LONG_S $24, GDB_FR_REG24(sp)
114 LONG_S $25, GDB_FR_REG25(sp)
115 LONG_S $26, GDB_FR_REG26(sp)
116 LONG_S $27, GDB_FR_REG27(sp)
117 LONG_S $28, GDB_FR_REG28(sp)
118 /* sp already saved */
119 LONG_S $30, GDB_FR_REG30(sp)
120 LONG_S $31, GDB_FR_REG31(sp)
121
122 CLI /* disable interrupts */
123
124/*
125 * Followed by the floating point registers
126 */
127 mfc0 v0, CP0_STATUS /* FPU enabled? */
128 srl v0, v0, 16
129 andi v0, v0, (ST0_CU1 >> 16)
130
131 beqz v0,2f /* disabled, skip */
132 nop
133
134 SDC1 $0, GDB_FR_FPR0(sp)
135 SDC1 $1, GDB_FR_FPR1(sp)
136 SDC1 $2, GDB_FR_FPR2(sp)
137 SDC1 $3, GDB_FR_FPR3(sp)
138 SDC1 $4, GDB_FR_FPR4(sp)
139 SDC1 $5, GDB_FR_FPR5(sp)
140 SDC1 $6, GDB_FR_FPR6(sp)
141 SDC1 $7, GDB_FR_FPR7(sp)
142 SDC1 $8, GDB_FR_FPR8(sp)
143 SDC1 $9, GDB_FR_FPR9(sp)
144 SDC1 $10, GDB_FR_FPR10(sp)
145 SDC1 $11, GDB_FR_FPR11(sp)
146 SDC1 $12, GDB_FR_FPR12(sp)
147 SDC1 $13, GDB_FR_FPR13(sp)
148 SDC1 $14, GDB_FR_FPR14(sp)
149 SDC1 $15, GDB_FR_FPR15(sp)
150 SDC1 $16, GDB_FR_FPR16(sp)
151 SDC1 $17, GDB_FR_FPR17(sp)
152 SDC1 $18, GDB_FR_FPR18(sp)
153 SDC1 $19, GDB_FR_FPR19(sp)
154 SDC1 $20, GDB_FR_FPR20(sp)
155 SDC1 $21, GDB_FR_FPR21(sp)
156 SDC1 $22, GDB_FR_FPR22(sp)
157 SDC1 $23, GDB_FR_FPR23(sp)
158 SDC1 $24, GDB_FR_FPR24(sp)
159 SDC1 $25, GDB_FR_FPR25(sp)
160 SDC1 $26, GDB_FR_FPR26(sp)
161 SDC1 $27, GDB_FR_FPR27(sp)
162 SDC1 $28, GDB_FR_FPR28(sp)
163 SDC1 $29, GDB_FR_FPR29(sp)
164 SDC1 $30, GDB_FR_FPR30(sp)
165 SDC1 $31, GDB_FR_FPR31(sp)
166
167/*
168 * FPU control registers
169 */
170
171 cfc1 v0, CP1_STATUS
172 LONG_S v0, GDB_FR_FSR(sp)
173 cfc1 v0, CP1_REVISION
174 LONG_S v0, GDB_FR_FIR(sp)
175
176/*
177 * Current stack frame ptr
178 */
179
1802:
181 LONG_S sp, GDB_FR_FRP(sp)
182
183/*
184 * CP0 registers (R4000/R4400 unused registers skipped)
185 */
186
187 mfc0 v0, CP0_INDEX
188 LONG_S v0, GDB_FR_CP0_INDEX(sp)
189 mfc0 v0, CP0_RANDOM
190 LONG_S v0, GDB_FR_CP0_RANDOM(sp)
191 DMFC0 v0, CP0_ENTRYLO0
192 LONG_S v0, GDB_FR_CP0_ENTRYLO0(sp)
193 DMFC0 v0, CP0_ENTRYLO1
194 LONG_S v0, GDB_FR_CP0_ENTRYLO1(sp)
195 DMFC0 v0, CP0_CONTEXT
196 LONG_S v0, GDB_FR_CP0_CONTEXT(sp)
197 mfc0 v0, CP0_PAGEMASK
198 LONG_S v0, GDB_FR_CP0_PAGEMASK(sp)
199 mfc0 v0, CP0_WIRED
200 LONG_S v0, GDB_FR_CP0_WIRED(sp)
201 DMFC0 v0, CP0_ENTRYHI
202 LONG_S v0, GDB_FR_CP0_ENTRYHI(sp)
203 mfc0 v0, CP0_PRID
204 LONG_S v0, GDB_FR_CP0_PRID(sp)
205
206 .set at
207
208/*
209 * Continue with the higher level handler
210 */
211
212 move a0,sp
213
214 jal handle_exception
215 nop
216
217/*
218 * Restore all writable registers, in reverse order
219 */
220
221 .set noat
222
223 LONG_L v0, GDB_FR_CP0_ENTRYHI(sp)
224 LONG_L v1, GDB_FR_CP0_WIRED(sp)
225 DMTC0 v0, CP0_ENTRYHI
226 mtc0 v1, CP0_WIRED
227 LONG_L v0, GDB_FR_CP0_PAGEMASK(sp)
228 LONG_L v1, GDB_FR_CP0_ENTRYLO1(sp)
229 mtc0 v0, CP0_PAGEMASK
230 DMTC0 v1, CP0_ENTRYLO1
231 LONG_L v0, GDB_FR_CP0_ENTRYLO0(sp)
232 LONG_L v1, GDB_FR_CP0_INDEX(sp)
233 DMTC0 v0, CP0_ENTRYLO0
234 LONG_L v0, GDB_FR_CP0_CONTEXT(sp)
235 mtc0 v1, CP0_INDEX
236 DMTC0 v0, CP0_CONTEXT
237
238
239/*
240 * Next, the floating point registers
241 */
242 mfc0 v0, CP0_STATUS /* check if the FPU is enabled */
243 srl v0, v0, 16
244 andi v0, v0, (ST0_CU1 >> 16)
245
246 beqz v0, 3f /* disabled, skip */
247 nop
248
249 LDC1 $31, GDB_FR_FPR31(sp)
250 LDC1 $30, GDB_FR_FPR30(sp)
251 LDC1 $29, GDB_FR_FPR29(sp)
252 LDC1 $28, GDB_FR_FPR28(sp)
253 LDC1 $27, GDB_FR_FPR27(sp)
254 LDC1 $26, GDB_FR_FPR26(sp)
255 LDC1 $25, GDB_FR_FPR25(sp)
256 LDC1 $24, GDB_FR_FPR24(sp)
257 LDC1 $23, GDB_FR_FPR23(sp)
258 LDC1 $22, GDB_FR_FPR22(sp)
259 LDC1 $21, GDB_FR_FPR21(sp)
260 LDC1 $20, GDB_FR_FPR20(sp)
261 LDC1 $19, GDB_FR_FPR19(sp)
262 LDC1 $18, GDB_FR_FPR18(sp)
263 LDC1 $17, GDB_FR_FPR17(sp)
264 LDC1 $16, GDB_FR_FPR16(sp)
265 LDC1 $15, GDB_FR_FPR15(sp)
266 LDC1 $14, GDB_FR_FPR14(sp)
267 LDC1 $13, GDB_FR_FPR13(sp)
268 LDC1 $12, GDB_FR_FPR12(sp)
269 LDC1 $11, GDB_FR_FPR11(sp)
270 LDC1 $10, GDB_FR_FPR10(sp)
271 LDC1 $9, GDB_FR_FPR9(sp)
272 LDC1 $8, GDB_FR_FPR8(sp)
273 LDC1 $7, GDB_FR_FPR7(sp)
274 LDC1 $6, GDB_FR_FPR6(sp)
275 LDC1 $5, GDB_FR_FPR5(sp)
276 LDC1 $4, GDB_FR_FPR4(sp)
277 LDC1 $3, GDB_FR_FPR3(sp)
278 LDC1 $2, GDB_FR_FPR2(sp)
279 LDC1 $1, GDB_FR_FPR1(sp)
280 LDC1 $0, GDB_FR_FPR0(sp)
281
282/*
283 * Now the CP0 and integer registers
284 */
285
2863:
287 mfc0 t0, CP0_STATUS
288 ori t0, 0x1f
289 xori t0, 0x1f
290 mtc0 t0, CP0_STATUS
291
292 LONG_L v0, GDB_FR_STATUS(sp)
293 LONG_L v1, GDB_FR_EPC(sp)
294 mtc0 v0, CP0_STATUS
295 DMTC0 v1, CP0_EPC
296 LONG_L v0, GDB_FR_HI(sp)
297 LONG_L v1, GDB_FR_LO(sp)
298 mthi v0
299 mtlo v1
300 LONG_L $31, GDB_FR_REG31(sp)
301 LONG_L $30, GDB_FR_REG30(sp)
302 LONG_L $28, GDB_FR_REG28(sp)
303 LONG_L $27, GDB_FR_REG27(sp)
304 LONG_L $26, GDB_FR_REG26(sp)
305 LONG_L $25, GDB_FR_REG25(sp)
306 LONG_L $24, GDB_FR_REG24(sp)
307 LONG_L $23, GDB_FR_REG23(sp)
308 LONG_L $22, GDB_FR_REG22(sp)
309 LONG_L $21, GDB_FR_REG21(sp)
310 LONG_L $20, GDB_FR_REG20(sp)
311 LONG_L $19, GDB_FR_REG19(sp)
312 LONG_L $18, GDB_FR_REG18(sp)
313 LONG_L $17, GDB_FR_REG17(sp)
314 LONG_L $16, GDB_FR_REG16(sp)
315 LONG_L $15, GDB_FR_REG15(sp)
316 LONG_L $14, GDB_FR_REG14(sp)
317 LONG_L $13, GDB_FR_REG13(sp)
318 LONG_L $12, GDB_FR_REG12(sp)
319 LONG_L $11, GDB_FR_REG11(sp)
320 LONG_L $10, GDB_FR_REG10(sp)
321 LONG_L $9, GDB_FR_REG9(sp)
322 LONG_L $8, GDB_FR_REG8(sp)
323 LONG_L $7, GDB_FR_REG7(sp)
324 LONG_L $6, GDB_FR_REG6(sp)
325 LONG_L $5, GDB_FR_REG5(sp)
326 LONG_L $4, GDB_FR_REG4(sp)
327 LONG_L $3, GDB_FR_REG3(sp)
328 LONG_L $2, GDB_FR_REG2(sp)
329 LONG_L $1, GDB_FR_REG1(sp)
330#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
331 LONG_L k0, GDB_FR_EPC(sp)
332 LONG_L $29, GDB_FR_REG29(sp) /* Deallocate stack */
333 jr k0
334 rfe
335#else
336 LONG_L sp, GDB_FR_REG29(sp) /* Deallocate stack */
337
338 .set mips3
339 eret
340 .set mips0
341#endif
342 .set at
343 .set reorder
344 END(trap_low)
345
346LEAF(kgdb_read_byte)
3474: lb t0, (a0)
348 sb t0, (a1)
349 li v0, 0
350 jr ra
351 .section __ex_table,"a"
352 PTR 4b, kgdbfault
353 .previous
354 END(kgdb_read_byte)
355
356LEAF(kgdb_write_byte)
3575: sb a0, (a1)
358 li v0, 0
359 jr ra
360 .section __ex_table,"a"
361 PTR 5b, kgdbfault
362 .previous
363 END(kgdb_write_byte)
364
365 .type kgdbfault@function
366 .ent kgdbfault
367
368kgdbfault: li v0, -EFAULT
369 jr ra
370 .end kgdbfault
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
new file mode 100644
index 000000000000..269889302a27
--- /dev/null
+++ b/arch/mips/kernel/gdb-stub.c
@@ -0,0 +1,1091 @@
1/*
2 * arch/mips/kernel/gdb-stub.c
3 *
4 * Originally written by Glenn Engel, Lake Stevens Instrument Division
5 *
6 * Contributed by HP Systems
7 *
8 * Modified for SPARC by Stu Grossman, Cygnus Support.
9 *
10 * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
11 * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
12 *
13 * Copyright (C) 1995 Andreas Busse
14 *
15 * Copyright (C) 2003 MontaVista Software Inc.
16 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
17 */
18
19/*
20 * To enable debugger support, two things need to happen. One, a
21 * call to set_debug_traps() is necessary in order to allow any breakpoints
22 * or error conditions to be properly intercepted and reported to gdb.
23 * Two, a breakpoint needs to be generated to begin communication. This
24 * is most easily accomplished by a call to breakpoint(). Breakpoint()
25 * simulates a breakpoint by executing a BREAK instruction.
26 *
27 *
28 * The following gdb commands are supported:
29 *
30 * command function Return value
31 *
32 * g return the value of the CPU registers hex data or ENN
33 * G set the value of the CPU registers OK or ENN
34 *
35 * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
36 * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
37 *
38 * c Resume at current address SNN ( signal NN)
39 * cAA..AA Continue at address AA..AA SNN
40 *
41 * s Step one instruction SNN
42 * sAA..AA Step one instruction from AA..AA SNN
43 *
44 * k kill
45 *
46 * ? What was the last sigval ? SNN (signal NN)
47 *
48 * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
49 * baud rate
50 *
51 * All commands and responses are sent with a packet which includes a
52 * checksum. A packet consists of
53 *
54 * $<packet info>#<checksum>.
55 *
56 * where
57 * <packet info> :: <characters representing the command or response>
58 * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
59 *
60 * When a packet is received, it is first acknowledged with either '+' or '-'.
61 * '+' indicates a successful transfer. '-' indicates a failed transfer.
62 *
63 * Example:
64 *
65 * Host: Reply:
66 * $m0,10#2a +$00010203040506070809101112131415#42
67 *
68 *
69 * ==============
70 * MORE EXAMPLES:
71 * ==============
72 *
73 * For reference -- the following are the steps that one
74 * company took (RidgeRun Inc) to get remote gdb debugging
75 * going. In this scenario the host machine was a PC and the
76 * target platform was a Galileo EVB64120A MIPS evaluation
77 * board.
78 *
79 * Step 1:
80 * First download gdb-5.0.tar.gz from the internet.
81 * and then build/install the package.
82 *
83 * Example:
84 * $ tar zxf gdb-5.0.tar.gz
85 * $ cd gdb-5.0
86 * $ ./configure --target=mips-linux-elf
87 * $ make
88 * $ install
89 * $ which mips-linux-elf-gdb
90 * /usr/local/bin/mips-linux-elf-gdb
91 *
92 * Step 2:
93 * Configure linux for remote debugging and build it.
94 *
95 * Example:
96 * $ cd ~/linux
97 * $ make menuconfig <go to "Kernel Hacking" and turn on remote debugging>
98 * $ make
99 *
100 * Step 3:
101 * Download the kernel to the remote target and start
102 * the kernel running. It will promptly halt and wait
103 * for the host gdb session to connect. It does this
104 * since the "Kernel Hacking" option has defined
105 * CONFIG_KGDB which in turn enables your calls
106 * to:
107 * set_debug_traps();
108 * breakpoint();
109 *
110 * Step 4:
111 * Start the gdb session on the host.
112 *
113 * Example:
114 * $ mips-linux-elf-gdb vmlinux
115 * (gdb) set remotebaud 115200
116 * (gdb) target remote /dev/ttyS1
117 * ...at this point you are connected to
118 * the remote target and can use gdb
119 * in the normal fasion. Setting
120 * breakpoints, single stepping,
121 * printing variables, etc.
122 */
123#include <linux/config.h>
124#include <linux/string.h>
125#include <linux/kernel.h>
126#include <linux/signal.h>
127#include <linux/sched.h>
128#include <linux/mm.h>
129#include <linux/console.h>
130#include <linux/init.h>
131#include <linux/smp.h>
132#include <linux/spinlock.h>
133#include <linux/slab.h>
134#include <linux/reboot.h>
135
136#include <asm/asm.h>
137#include <asm/cacheflush.h>
138#include <asm/mipsregs.h>
139#include <asm/pgtable.h>
140#include <asm/system.h>
141#include <asm/gdb-stub.h>
142#include <asm/inst.h>
143
144/*
145 * external low-level support routines
146 */
147
148extern int putDebugChar(char c); /* write a single character */
149extern char getDebugChar(void); /* read and return a single char */
150extern void trap_low(void);
151
152/*
153 * breakpoint and test functions
154 */
155extern void breakpoint(void);
156extern void breakinst(void);
157extern void async_breakpoint(void);
158extern void async_breakinst(void);
159extern void adel(void);
160
161/*
162 * local prototypes
163 */
164
165static void getpacket(char *buffer);
166static void putpacket(char *buffer);
167static int computeSignal(int tt);
168static int hex(unsigned char ch);
169static int hexToInt(char **ptr, int *intValue);
170static int hexToLong(char **ptr, long *longValue);
171static unsigned char *mem2hex(char *mem, char *buf, int count, int may_fault);
172void handle_exception(struct gdb_regs *regs);
173
174int kgdb_enabled;
175
176/*
177 * spin locks for smp case
178 */
179static spinlock_t kgdb_lock = SPIN_LOCK_UNLOCKED;
180static spinlock_t kgdb_cpulock[NR_CPUS] = { [0 ... NR_CPUS-1] = SPIN_LOCK_UNLOCKED};
181
182/*
183 * BUFMAX defines the maximum number of characters in inbound/outbound buffers
184 * at least NUMREGBYTES*2 are needed for register packets
185 */
186#define BUFMAX 2048
187
188static char input_buffer[BUFMAX];
189static char output_buffer[BUFMAX];
190static int initialized; /* !0 means we've been initialized */
191static int kgdb_started;
192static const char hexchars[]="0123456789abcdef";
193
194/* Used to prevent crashes in memory access. Note that they'll crash anyway if
195 we haven't set up fault handlers yet... */
196int kgdb_read_byte(unsigned char *address, unsigned char *dest);
197int kgdb_write_byte(unsigned char val, unsigned char *dest);
198
199/*
200 * Convert ch from a hex digit to an int
201 */
202static int hex(unsigned char ch)
203{
204 if (ch >= 'a' && ch <= 'f')
205 return ch-'a'+10;
206 if (ch >= '0' && ch <= '9')
207 return ch-'0';
208 if (ch >= 'A' && ch <= 'F')
209 return ch-'A'+10;
210 return -1;
211}
212
213/*
214 * scan for the sequence $<data>#<checksum>
215 */
216static void getpacket(char *buffer)
217{
218 unsigned char checksum;
219 unsigned char xmitcsum;
220 int i;
221 int count;
222 unsigned char ch;
223
224 do {
225 /*
226 * wait around for the start character,
227 * ignore all other characters
228 */
229 while ((ch = (getDebugChar() & 0x7f)) != '$') ;
230
231 checksum = 0;
232 xmitcsum = -1;
233 count = 0;
234
235 /*
236 * now, read until a # or end of buffer is found
237 */
238 while (count < BUFMAX) {
239 ch = getDebugChar();
240 if (ch == '#')
241 break;
242 checksum = checksum + ch;
243 buffer[count] = ch;
244 count = count + 1;
245 }
246
247 if (count >= BUFMAX)
248 continue;
249
250 buffer[count] = 0;
251
252 if (ch == '#') {
253 xmitcsum = hex(getDebugChar() & 0x7f) << 4;
254 xmitcsum |= hex(getDebugChar() & 0x7f);
255
256 if (checksum != xmitcsum)
257 putDebugChar('-'); /* failed checksum */
258 else {
259 putDebugChar('+'); /* successful transfer */
260
261 /*
262 * if a sequence char is present,
263 * reply the sequence ID
264 */
265 if (buffer[2] == ':') {
266 putDebugChar(buffer[0]);
267 putDebugChar(buffer[1]);
268
269 /*
270 * remove sequence chars from buffer
271 */
272 count = strlen(buffer);
273 for (i=3; i <= count; i++)
274 buffer[i-3] = buffer[i];
275 }
276 }
277 }
278 }
279 while (checksum != xmitcsum);
280}
281
282/*
283 * send the packet in buffer.
284 */
285static void putpacket(char *buffer)
286{
287 unsigned char checksum;
288 int count;
289 unsigned char ch;
290
291 /*
292 * $<packet info>#<checksum>.
293 */
294
295 do {
296 putDebugChar('$');
297 checksum = 0;
298 count = 0;
299
300 while ((ch = buffer[count]) != 0) {
301 if (!(putDebugChar(ch)))
302 return;
303 checksum += ch;
304 count += 1;
305 }
306
307 putDebugChar('#');
308 putDebugChar(hexchars[checksum >> 4]);
309 putDebugChar(hexchars[checksum & 0xf]);
310
311 }
312 while ((getDebugChar() & 0x7f) != '+');
313}
314
315
316/*
317 * Convert the memory pointed to by mem into hex, placing result in buf.
318 * Return a pointer to the last char put in buf (null), in case of mem fault,
319 * return 0.
320 * may_fault is non-zero if we are reading from arbitrary memory, but is currently
321 * not used.
322 */
323static unsigned char *mem2hex(char *mem, char *buf, int count, int may_fault)
324{
325 unsigned char ch;
326
327 while (count-- > 0) {
328 if (kgdb_read_byte(mem++, &ch) != 0)
329 return 0;
330 *buf++ = hexchars[ch >> 4];
331 *buf++ = hexchars[ch & 0xf];
332 }
333
334 *buf = 0;
335
336 return buf;
337}
338
339/*
340 * convert the hex array pointed to by buf into binary to be placed in mem
341 * return a pointer to the character AFTER the last byte written
342 * may_fault is non-zero if we are reading from arbitrary memory, but is currently
343 * not used.
344 */
345static char *hex2mem(char *buf, char *mem, int count, int binary, int may_fault)
346{
347 int i;
348 unsigned char ch;
349
350 for (i=0; i<count; i++)
351 {
352 if (binary) {
353 ch = *buf++;
354 if (ch == 0x7d)
355 ch = 0x20 ^ *buf++;
356 }
357 else {
358 ch = hex(*buf++) << 4;
359 ch |= hex(*buf++);
360 }
361 if (kgdb_write_byte(ch, mem++) != 0)
362 return 0;
363 }
364
365 return mem;
366}
367
368/*
369 * This table contains the mapping between SPARC hardware trap types, and
370 * signals, which are primarily what GDB understands. It also indicates
371 * which hardware traps we need to commandeer when initializing the stub.
372 */
373static struct hard_trap_info {
374 unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
375 unsigned char signo; /* Signal that we map this trap into */
376} hard_trap_info[] = {
377 { 6, SIGBUS }, /* instruction bus error */
378 { 7, SIGBUS }, /* data bus error */
379 { 9, SIGTRAP }, /* break */
380 { 10, SIGILL }, /* reserved instruction */
381/* { 11, SIGILL }, */ /* CPU unusable */
382 { 12, SIGFPE }, /* overflow */
383 { 13, SIGTRAP }, /* trap */
384 { 14, SIGSEGV }, /* virtual instruction cache coherency */
385 { 15, SIGFPE }, /* floating point exception */
386 { 23, SIGSEGV }, /* watch */
387 { 31, SIGSEGV }, /* virtual data cache coherency */
388 { 0, 0} /* Must be last */
389};
390
391/* Save the normal trap handlers for user-mode traps. */
392void *saved_vectors[32];
393
394/*
395 * Set up exception handlers for tracing and breakpoints
396 */
397void set_debug_traps(void)
398{
399 struct hard_trap_info *ht;
400 unsigned long flags;
401 unsigned char c;
402
403 local_irq_save(flags);
404 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
405 saved_vectors[ht->tt] = set_except_vector(ht->tt, trap_low);
406
407 putDebugChar('+'); /* 'hello world' */
408 /*
409 * In case GDB is started before us, ack any packets
410 * (presumably "$?#xx") sitting there.
411 */
412 while((c = getDebugChar()) != '$');
413 while((c = getDebugChar()) != '#');
414 c = getDebugChar(); /* eat first csum byte */
415 c = getDebugChar(); /* eat second csum byte */
416 putDebugChar('+'); /* ack it */
417
418 initialized = 1;
419 local_irq_restore(flags);
420}
421
422void restore_debug_traps(void)
423{
424 struct hard_trap_info *ht;
425 unsigned long flags;
426
427 local_irq_save(flags);
428 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
429 set_except_vector(ht->tt, saved_vectors[ht->tt]);
430 local_irq_restore(flags);
431}
432
433/*
434 * Convert the MIPS hardware trap type code to a Unix signal number.
435 */
436static int computeSignal(int tt)
437{
438 struct hard_trap_info *ht;
439
440 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
441 if (ht->tt == tt)
442 return ht->signo;
443
444 return SIGHUP; /* default for things we don't know about */
445}
446
447/*
448 * While we find nice hex chars, build an int.
449 * Return number of chars processed.
450 */
451static int hexToInt(char **ptr, int *intValue)
452{
453 int numChars = 0;
454 int hexValue;
455
456 *intValue = 0;
457
458 while (**ptr) {
459 hexValue = hex(**ptr);
460 if (hexValue < 0)
461 break;
462
463 *intValue = (*intValue << 4) | hexValue;
464 numChars ++;
465
466 (*ptr)++;
467 }
468
469 return (numChars);
470}
471
472static int hexToLong(char **ptr, long *longValue)
473{
474 int numChars = 0;
475 int hexValue;
476
477 *longValue = 0;
478
479 while (**ptr) {
480 hexValue = hex(**ptr);
481 if (hexValue < 0)
482 break;
483
484 *longValue = (*longValue << 4) | hexValue;
485 numChars ++;
486
487 (*ptr)++;
488 }
489
490 return numChars;
491}
492
493
494#if 0
495/*
496 * Print registers (on target console)
497 * Used only to debug the stub...
498 */
499void show_gdbregs(struct gdb_regs * regs)
500{
501 /*
502 * Saved main processor registers
503 */
504 printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
505 regs->reg0, regs->reg1, regs->reg2, regs->reg3,
506 regs->reg4, regs->reg5, regs->reg6, regs->reg7);
507 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
508 regs->reg8, regs->reg9, regs->reg10, regs->reg11,
509 regs->reg12, regs->reg13, regs->reg14, regs->reg15);
510 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
511 regs->reg16, regs->reg17, regs->reg18, regs->reg19,
512 regs->reg20, regs->reg21, regs->reg22, regs->reg23);
513 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
514 regs->reg24, regs->reg25, regs->reg26, regs->reg27,
515 regs->reg28, regs->reg29, regs->reg30, regs->reg31);
516
517 /*
518 * Saved cp0 registers
519 */
520 printk("epc : %08lx\nStatus: %08lx\nCause : %08lx\n",
521 regs->cp0_epc, regs->cp0_status, regs->cp0_cause);
522}
523#endif /* dead code */
524
525/*
526 * We single-step by setting breakpoints. When an exception
527 * is handled, we need to restore the instructions hoisted
528 * when the breakpoints were set.
529 *
530 * This is where we save the original instructions.
531 */
532static struct gdb_bp_save {
533 unsigned long addr;
534 unsigned int val;
535} step_bp[2];
536
537#define BP 0x0000000d /* break opcode */
538
539/*
540 * Set breakpoint instructions for single stepping.
541 */
542static void single_step(struct gdb_regs *regs)
543{
544 union mips_instruction insn;
545 unsigned long targ;
546 int is_branch, is_cond, i;
547
548 targ = regs->cp0_epc;
549 insn.word = *(unsigned int *)targ;
550 is_branch = is_cond = 0;
551
552 switch (insn.i_format.opcode) {
553 /*
554 * jr and jalr are in r_format format.
555 */
556 case spec_op:
557 switch (insn.r_format.func) {
558 case jalr_op:
559 case jr_op:
560 targ = *(&regs->reg0 + insn.r_format.rs);
561 is_branch = 1;
562 break;
563 }
564 break;
565
566 /*
567 * This group contains:
568 * bltz_op, bgez_op, bltzl_op, bgezl_op,
569 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
570 */
571 case bcond_op:
572 is_branch = is_cond = 1;
573 targ += 4 + (insn.i_format.simmediate << 2);
574 break;
575
576 /*
577 * These are unconditional and in j_format.
578 */
579 case jal_op:
580 case j_op:
581 is_branch = 1;
582 targ += 4;
583 targ >>= 28;
584 targ <<= 28;
585 targ |= (insn.j_format.target << 2);
586 break;
587
588 /*
589 * These are conditional.
590 */
591 case beq_op:
592 case beql_op:
593 case bne_op:
594 case bnel_op:
595 case blez_op:
596 case blezl_op:
597 case bgtz_op:
598 case bgtzl_op:
599 case cop0_op:
600 case cop1_op:
601 case cop2_op:
602 case cop1x_op:
603 is_branch = is_cond = 1;
604 targ += 4 + (insn.i_format.simmediate << 2);
605 break;
606 }
607
608 if (is_branch) {
609 i = 0;
610 if (is_cond && targ != (regs->cp0_epc + 8)) {
611 step_bp[i].addr = regs->cp0_epc + 8;
612 step_bp[i++].val = *(unsigned *)(regs->cp0_epc + 8);
613 *(unsigned *)(regs->cp0_epc + 8) = BP;
614 }
615 step_bp[i].addr = targ;
616 step_bp[i].val = *(unsigned *)targ;
617 *(unsigned *)targ = BP;
618 } else {
619 step_bp[0].addr = regs->cp0_epc + 4;
620 step_bp[0].val = *(unsigned *)(regs->cp0_epc + 4);
621 *(unsigned *)(regs->cp0_epc + 4) = BP;
622 }
623}
624
625/*
626 * If asynchronously interrupted by gdb, then we need to set a breakpoint
627 * at the interrupted instruction so that we wind up stopped with a
628 * reasonable stack frame.
629 */
630static struct gdb_bp_save async_bp;
631
632/*
633 * Swap the interrupted EPC with our asynchronous breakpoint routine.
634 * This is safer than stuffing the breakpoint in-place, since no cache
635 * flushes (or resulting smp_call_functions) are required. The
636 * assumption is that only one CPU will be handling asynchronous bp's,
637 * and only one can be active at a time.
638 */
639extern spinlock_t smp_call_lock;
640void set_async_breakpoint(unsigned long *epc)
641{
642 /* skip breaking into userland */
643 if ((*epc & 0x80000000) == 0)
644 return;
645
646 /* avoid deadlock if someone is make IPC */
647 if (spin_is_locked(&smp_call_lock))
648 return;
649
650 async_bp.addr = *epc;
651 *epc = (unsigned long)async_breakpoint;
652}
653
654void kgdb_wait(void *arg)
655{
656 unsigned flags;
657 int cpu = smp_processor_id();
658
659 local_irq_save(flags);
660
661 spin_lock(&kgdb_cpulock[cpu]);
662 spin_unlock(&kgdb_cpulock[cpu]);
663
664 local_irq_restore(flags);
665}
666
667
668/*
669 * This function does all command processing for interfacing to gdb. It
670 * returns 1 if you should skip the instruction at the trap address, 0
671 * otherwise.
672 */
673void handle_exception (struct gdb_regs *regs)
674{
675 int trap; /* Trap type */
676 int sigval;
677 long addr;
678 int length;
679 char *ptr;
680 unsigned long *stack;
681 int i;
682 int bflag = 0;
683
684 kgdb_started = 1;
685
686 /*
687 * acquire the big kgdb spinlock
688 */
689 if (!spin_trylock(&kgdb_lock)) {
690 /*
691 * some other CPU has the lock, we should go back to
692 * receive the gdb_wait IPC
693 */
694 return;
695 }
696
697 /*
698 * If we're in async_breakpoint(), restore the real EPC from
699 * the breakpoint.
700 */
701 if (regs->cp0_epc == (unsigned long)async_breakinst) {
702 regs->cp0_epc = async_bp.addr;
703 async_bp.addr = 0;
704 }
705
706 /*
707 * acquire the CPU spinlocks
708 */
709 for (i = num_online_cpus()-1; i >= 0; i--)
710 if (spin_trylock(&kgdb_cpulock[i]) == 0)
711 panic("kgdb: couldn't get cpulock %d\n", i);
712
713 /*
714 * force other cpus to enter kgdb
715 */
716 smp_call_function(kgdb_wait, NULL, 0, 0);
717
718 /*
719 * If we're in breakpoint() increment the PC
720 */
721 trap = (regs->cp0_cause & 0x7c) >> 2;
722 if (trap == 9 && regs->cp0_epc == (unsigned long)breakinst)
723 regs->cp0_epc += 4;
724
725 /*
726 * If we were single_stepping, restore the opcodes hoisted
727 * for the breakpoint[s].
728 */
729 if (step_bp[0].addr) {
730 *(unsigned *)step_bp[0].addr = step_bp[0].val;
731 step_bp[0].addr = 0;
732
733 if (step_bp[1].addr) {
734 *(unsigned *)step_bp[1].addr = step_bp[1].val;
735 step_bp[1].addr = 0;
736 }
737 }
738
739 stack = (long *)regs->reg29; /* stack ptr */
740 sigval = computeSignal(trap);
741
742 /*
743 * reply to host that an exception has occurred
744 */
745 ptr = output_buffer;
746
747 /*
748 * Send trap type (converted to signal)
749 */
750 *ptr++ = 'T';
751 *ptr++ = hexchars[sigval >> 4];
752 *ptr++ = hexchars[sigval & 0xf];
753
754 /*
755 * Send Error PC
756 */
757 *ptr++ = hexchars[REG_EPC >> 4];
758 *ptr++ = hexchars[REG_EPC & 0xf];
759 *ptr++ = ':';
760 ptr = mem2hex((char *)&regs->cp0_epc, ptr, sizeof(long), 0);
761 *ptr++ = ';';
762
763 /*
764 * Send frame pointer
765 */
766 *ptr++ = hexchars[REG_FP >> 4];
767 *ptr++ = hexchars[REG_FP & 0xf];
768 *ptr++ = ':';
769 ptr = mem2hex((char *)&regs->reg30, ptr, sizeof(long), 0);
770 *ptr++ = ';';
771
772 /*
773 * Send stack pointer
774 */
775 *ptr++ = hexchars[REG_SP >> 4];
776 *ptr++ = hexchars[REG_SP & 0xf];
777 *ptr++ = ':';
778 ptr = mem2hex((char *)&regs->reg29, ptr, sizeof(long), 0);
779 *ptr++ = ';';
780
781 *ptr++ = 0;
782 putpacket(output_buffer); /* send it off... */
783
784 /*
785 * Wait for input from remote GDB
786 */
787 while (1) {
788 output_buffer[0] = 0;
789 getpacket(input_buffer);
790
791 switch (input_buffer[0])
792 {
793 case '?':
794 output_buffer[0] = 'S';
795 output_buffer[1] = hexchars[sigval >> 4];
796 output_buffer[2] = hexchars[sigval & 0xf];
797 output_buffer[3] = 0;
798 break;
799
800 /*
801 * Detach debugger; let CPU run
802 */
803 case 'D':
804 putpacket(output_buffer);
805 goto finish_kgdb;
806 break;
807
808 case 'd':
809 /* toggle debug flag */
810 break;
811
812 /*
813 * Return the value of the CPU registers
814 */
815 case 'g':
816 ptr = output_buffer;
817 ptr = mem2hex((char *)&regs->reg0, ptr, 32*sizeof(long), 0); /* r0...r31 */
818 ptr = mem2hex((char *)&regs->cp0_status, ptr, 6*sizeof(long), 0); /* cp0 */
819 ptr = mem2hex((char *)&regs->fpr0, ptr, 32*sizeof(long), 0); /* f0...31 */
820 ptr = mem2hex((char *)&regs->cp1_fsr, ptr, 2*sizeof(long), 0); /* cp1 */
821 ptr = mem2hex((char *)&regs->frame_ptr, ptr, 2*sizeof(long), 0); /* frp */
822 ptr = mem2hex((char *)&regs->cp0_index, ptr, 16*sizeof(long), 0); /* cp0 */
823 break;
824
825 /*
826 * set the value of the CPU registers - return OK
827 */
828 case 'G':
829 {
830 ptr = &input_buffer[1];
831 hex2mem(ptr, (char *)&regs->reg0, 32*sizeof(long), 0, 0);
832 ptr += 32*(2*sizeof(long));
833 hex2mem(ptr, (char *)&regs->cp0_status, 6*sizeof(long), 0, 0);
834 ptr += 6*(2*sizeof(long));
835 hex2mem(ptr, (char *)&regs->fpr0, 32*sizeof(long), 0, 0);
836 ptr += 32*(2*sizeof(long));
837 hex2mem(ptr, (char *)&regs->cp1_fsr, 2*sizeof(long), 0, 0);
838 ptr += 2*(2*sizeof(long));
839 hex2mem(ptr, (char *)&regs->frame_ptr, 2*sizeof(long), 0, 0);
840 ptr += 2*(2*sizeof(long));
841 hex2mem(ptr, (char *)&regs->cp0_index, 16*sizeof(long), 0, 0);
842 strcpy(output_buffer,"OK");
843 }
844 break;
845
846 /*
847 * mAA..AA,LLLL Read LLLL bytes at address AA..AA
848 */
849 case 'm':
850 ptr = &input_buffer[1];
851
852 if (hexToLong(&ptr, &addr)
853 && *ptr++ == ','
854 && hexToInt(&ptr, &length)) {
855 if (mem2hex((char *)addr, output_buffer, length, 1))
856 break;
857 strcpy (output_buffer, "E03");
858 } else
859 strcpy(output_buffer,"E01");
860 break;
861
862 /*
863 * XAA..AA,LLLL: Write LLLL escaped binary bytes at address AA.AA
864 */
865 case 'X':
866 bflag = 1;
867 /* fall through */
868
869 /*
870 * MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK
871 */
872 case 'M':
873 ptr = &input_buffer[1];
874
875 if (hexToLong(&ptr, &addr)
876 && *ptr++ == ','
877 && hexToInt(&ptr, &length)
878 && *ptr++ == ':') {
879 if (hex2mem(ptr, (char *)addr, length, bflag, 1))
880 strcpy(output_buffer, "OK");
881 else
882 strcpy(output_buffer, "E03");
883 }
884 else
885 strcpy(output_buffer, "E02");
886 break;
887
888 /*
889 * cAA..AA Continue at address AA..AA(optional)
890 */
891 case 'c':
892 /* try to read optional parameter, pc unchanged if no parm */
893
894 ptr = &input_buffer[1];
895 if (hexToLong(&ptr, &addr))
896 regs->cp0_epc = addr;
897
898 goto exit_kgdb_exception;
899 break;
900
901 /*
902 * kill the program; let us try to restart the machine
903 * Reset the whole machine.
904 */
905 case 'k':
906 case 'r':
907 machine_restart("kgdb restarts machine");
908 break;
909
910 /*
911 * Step to next instruction
912 */
913 case 's':
914 /*
915 * There is no single step insn in the MIPS ISA, so we
916 * use breakpoints and continue, instead.
917 */
918 single_step(regs);
919 goto exit_kgdb_exception;
920 /* NOTREACHED */
921 break;
922
923 /*
924 * Set baud rate (bBB)
925 * FIXME: Needs to be written
926 */
927 case 'b':
928 {
929#if 0
930 int baudrate;
931 extern void set_timer_3();
932
933 ptr = &input_buffer[1];
934 if (!hexToInt(&ptr, &baudrate))
935 {
936 strcpy(output_buffer,"B01");
937 break;
938 }
939
940 /* Convert baud rate to uart clock divider */
941
942 switch (baudrate)
943 {
944 case 38400:
945 baudrate = 16;
946 break;
947 case 19200:
948 baudrate = 33;
949 break;
950 case 9600:
951 baudrate = 65;
952 break;
953 default:
954 baudrate = 0;
955 strcpy(output_buffer,"B02");
956 goto x1;
957 }
958
959 if (baudrate) {
960 putpacket("OK"); /* Ack before changing speed */
961 set_timer_3(baudrate); /* Set it */
962 }
963#endif
964 }
965 break;
966
967 } /* switch */
968
969 /*
970 * reply to the request
971 */
972
973 putpacket(output_buffer);
974
975 } /* while */
976
977 return;
978
979finish_kgdb:
980 restore_debug_traps();
981
982exit_kgdb_exception:
983 /* release locks so other CPUs can go */
984 for (i = num_online_cpus()-1; i >= 0; i--)
985 spin_unlock(&kgdb_cpulock[i]);
986 spin_unlock(&kgdb_lock);
987
988 __flush_cache_all();
989 return;
990}
991
992/*
993 * This function will generate a breakpoint exception. It is used at the
994 * beginning of a program to sync up with a debugger and can be used
995 * otherwise as a quick means to stop program execution and "break" into
996 * the debugger.
997 */
998void breakpoint(void)
999{
1000 if (!initialized)
1001 return;
1002
1003 __asm__ __volatile__(
1004 ".globl breakinst\n\t"
1005 ".set\tnoreorder\n\t"
1006 "nop\n"
1007 "breakinst:\tbreak\n\t"
1008 "nop\n\t"
1009 ".set\treorder"
1010 );
1011}
1012
1013/* Nothing but the break; don't pollute any registers */
1014void async_breakpoint(void)
1015{
1016 __asm__ __volatile__(
1017 ".globl async_breakinst\n\t"
1018 ".set\tnoreorder\n\t"
1019 "nop\n"
1020 "async_breakinst:\tbreak\n\t"
1021 "nop\n\t"
1022 ".set\treorder"
1023 );
1024}
1025
1026void adel(void)
1027{
1028 __asm__ __volatile__(
1029 ".globl\tadel\n\t"
1030 "lui\t$8,0x8000\n\t"
1031 "lw\t$9,1($8)\n\t"
1032 );
1033}
1034
1035/*
1036 * malloc is needed by gdb client in "call func()", even a private one
1037 * will make gdb happy
1038 */
1039static void *malloc(size_t size)
1040{
1041 return kmalloc(size, GFP_ATOMIC);
1042}
1043
1044static void free(void *where)
1045{
1046 kfree(where);
1047}
1048
1049#ifdef CONFIG_GDB_CONSOLE
1050
1051void gdb_putsn(const char *str, int l)
1052{
1053 char outbuf[18];
1054
1055 if (!kgdb_started)
1056 return;
1057
1058 outbuf[0]='O';
1059
1060 while(l) {
1061 int i = (l>8)?8:l;
1062 mem2hex((char *)str, &outbuf[1], i, 0);
1063 outbuf[(i*2)+1]=0;
1064 putpacket(outbuf);
1065 str += i;
1066 l -= i;
1067 }
1068}
1069
1070static void gdb_console_write(struct console *con, const char *s, unsigned n)
1071{
1072 gdb_putsn(s, n);
1073}
1074
1075static struct console gdb_console = {
1076 .name = "gdb",
1077 .write = gdb_console_write,
1078 .flags = CON_PRINTBUFFER,
1079 .index = -1
1080};
1081
1082static int __init register_gdb_console(void)
1083{
1084 register_console(&gdb_console);
1085
1086 return 0;
1087}
1088
1089console_initcall(register_gdb_console);
1090
1091#endif
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
new file mode 100644
index 000000000000..a5b0a389b063
--- /dev/null
+++ b/arch/mips/kernel/genex.S
@@ -0,0 +1,302 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 * Copyright (C) 2002 Maciej W. Rozycki
10 */
11#include <linux/config.h>
12#include <linux/init.h>
13
14#include <asm/asm.h>
15#include <asm/cacheops.h>
16#include <asm/regdef.h>
17#include <asm/fpregdef.h>
18#include <asm/mipsregs.h>
19#include <asm/stackframe.h>
20#include <asm/war.h>
21
22#define PANIC_PIC(msg) \
23 .set push; \
24 .set reorder; \
25 PTR_LA a0,8f; \
26 .set noat; \
27 PTR_LA AT, panic; \
28 jr AT; \
299: b 9b; \
30 .set pop; \
31 TEXT(msg)
32
33 __INIT
34
35NESTED(except_vec0_generic, 0, sp)
36 PANIC_PIC("Exception vector 0 called")
37 END(except_vec0_generic)
38
39NESTED(except_vec1_generic, 0, sp)
40 PANIC_PIC("Exception vector 1 called")
41 END(except_vec1_generic)
42
43/*
44 * General exception vector for all other CPUs.
45 *
46 * Be careful when changing this, it has to be at most 128 bytes
47 * to fit into space reserved for the exception handler.
48 */
49NESTED(except_vec3_generic, 0, sp)
50 .set push
51 .set noat
52#if R5432_CP0_INTERRUPT_WAR
53 mfc0 k0, CP0_INDEX
54#endif
55 mfc0 k1, CP0_CAUSE
56 andi k1, k1, 0x7c
57#ifdef CONFIG_MIPS64
58 dsll k1, k1, 1
59#endif
60 PTR_L k0, exception_handlers(k1)
61 jr k0
62 .set pop
63 END(except_vec3_generic)
64
65/*
66 * General exception handler for CPUs with virtual coherency exception.
67 *
68 * Be careful when changing this, it has to be at most 256 (as a special
69 * exception) bytes to fit into space reserved for the exception handler.
70 */
71NESTED(except_vec3_r4000, 0, sp)
72 .set push
73 .set mips3
74 .set noat
75 mfc0 k1, CP0_CAUSE
76 li k0, 31<<2
77 andi k1, k1, 0x7c
78 .set push
79 .set noreorder
80 .set nomacro
81 beq k1, k0, handle_vced
82 li k0, 14<<2
83 beq k1, k0, handle_vcei
84#ifdef CONFIG_MIPS64
85 dsll k1, k1, 1
86#endif
87 .set pop
88 PTR_L k0, exception_handlers(k1)
89 jr k0
90
91 /*
92 * Big shit, we now may have two dirty primary cache lines for the same
93 * physical address. We can savely invalidate the line pointed to by
94 * c0_badvaddr because after return from this exception handler the
95 * load / store will be re-executed.
96 */
97handle_vced:
98 DMFC0 k0, CP0_BADVADDR
99 li k1, -4 # Is this ...
100 and k0, k1 # ... really needed?
101 mtc0 zero, CP0_TAGLO
102 cache Index_Store_Tag_D,(k0)
103 cache Hit_Writeback_Inv_SD,(k0)
104#ifdef CONFIG_PROC_FS
105 PTR_LA k0, vced_count
106 lw k1, (k0)
107 addiu k1, 1
108 sw k1, (k0)
109#endif
110 eret
111
112handle_vcei:
113 MFC0 k0, CP0_BADVADDR
114 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
115#ifdef CONFIG_PROC_FS
116 PTR_LA k0, vcei_count
117 lw k1, (k0)
118 addiu k1, 1
119 sw k1, (k0)
120#endif
121 eret
122 .set pop
123 END(except_vec3_r4000)
124
125/*
126 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
127 * This is a dedicated interrupt exception vector which reduces the
128 * interrupt processing overhead. The jump instruction will be replaced
129 * at the initialization time.
130 *
131 * Be careful when changing this, it has to be at most 128 bytes
132 * to fit into space reserved for the exception handler.
133 */
134NESTED(except_vec4, 0, sp)
1351: j 1b /* Dummy, will be replaced */
136 END(except_vec4)
137
138/*
139 * EJTAG debug exception handler.
140 * The EJTAG debug exception entry point is 0xbfc00480, which
141 * normally is in the boot PROM, so the boot PROM must do a
142 * unconditional jump to this vector.
143 */
144NESTED(except_vec_ejtag_debug, 0, sp)
145 j ejtag_debug_handler
146 END(except_vec_ejtag_debug)
147
148 __FINIT
149
150/*
151 * EJTAG debug exception handler.
152 */
153NESTED(ejtag_debug_handler, PT_SIZE, sp)
154 .set push
155 .set noat
156 MTC0 k0, CP0_DESAVE
157 mfc0 k0, CP0_DEBUG
158
159 sll k0, k0, 30 # Check for SDBBP.
160 bgez k0, ejtag_return
161
162 PTR_LA k0, ejtag_debug_buffer
163 LONG_S k1, 0(k0)
164 SAVE_ALL
165 move a0, sp
166 jal ejtag_exception_handler
167 RESTORE_ALL
168 PTR_LA k0, ejtag_debug_buffer
169 LONG_L k1, 0(k0)
170
171ejtag_return:
172 MFC0 k0, CP0_DESAVE
173 .set mips32
174 deret
175 .set pop
176 END(ejtag_debug_handler)
177
178/*
179 * This buffer is reserved for the use of the EJTAG debug
180 * handler.
181 */
182 .data
183EXPORT(ejtag_debug_buffer)
184 .fill LONGSIZE
185 .previous
186
187 __INIT
188
189/*
190 * NMI debug exception handler for MIPS reference boards.
191 * The NMI debug exception entry point is 0xbfc00000, which
192 * normally is in the boot PROM, so the boot PROM must do a
193 * unconditional jump to this vector.
194 */
195NESTED(except_vec_nmi, 0, sp)
196 j nmi_handler
197 END(except_vec_nmi)
198
199 __FINIT
200
201NESTED(nmi_handler, PT_SIZE, sp)
202 .set push
203 .set noat
204 .set mips3
205 SAVE_ALL
206 move a0, sp
207 jal nmi_exception_handler
208 RESTORE_ALL
209 eret
210 .set pop
211 END(nmi_handler)
212
213 .macro __build_clear_none
214 .endm
215
216 .macro __build_clear_sti
217 STI
218 .endm
219
220 .macro __build_clear_cli
221 CLI
222 .endm
223
224 .macro __build_clear_fpe
225 cfc1 a1, fcr31
226 li a2, ~(0x3f << 12)
227 and a2, a1
228 ctc1 a2, fcr31
229 STI
230 .endm
231
232 .macro __build_clear_ade
233 MFC0 t0, CP0_BADVADDR
234 PTR_S t0, PT_BVADDR(sp)
235 KMODE
236 .endm
237
238 .macro __BUILD_silent exception
239 .endm
240
241 /* Gas tries to parse the PRINT argument as a string containing
242 string escapes and emits bogus warnings if it believes to
243 recognize an unknown escape code. So make the arguments
244 start with an n and gas will believe \n is ok ... */
245 .macro __BUILD_verbose nexception
246 LONG_L a1, PT_EPC(sp)
247#if CONFIG_MIPS32
248 PRINT("Got \nexception at %08lx\012")
249#endif
250#if CONFIG_MIPS64
251 PRINT("Got \nexception at %016lx\012")
252#endif
253 .endm
254
255 .macro __BUILD_count exception
256 LONG_L t0,exception_count_\exception
257 LONG_ADDIU t0, 1
258 LONG_S t0,exception_count_\exception
259 .comm exception_count\exception, 8, 8
260 .endm
261
262 .macro __BUILD_HANDLER exception handler clear verbose ext
263 .align 5
264 NESTED(handle_\exception, PT_SIZE, sp)
265 .set noat
266 SAVE_ALL
267 FEXPORT(handle_\exception\ext)
268 __BUILD_clear_\clear
269 .set at
270 __BUILD_\verbose \exception
271 move a0, sp
272 jal do_\handler
273 j ret_from_exception
274 END(handle_\exception)
275 .endm
276
277 .macro BUILD_HANDLER exception handler clear verbose
278 __BUILD_HANDLER \exception \handler \clear \verbose _int
279 .endm
280
281 BUILD_HANDLER adel ade ade silent /* #4 */
282 BUILD_HANDLER ades ade ade silent /* #5 */
283 BUILD_HANDLER ibe be cli silent /* #6 */
284 BUILD_HANDLER dbe be cli silent /* #7 */
285 BUILD_HANDLER bp bp sti silent /* #9 */
286 BUILD_HANDLER ri ri sti silent /* #10 */
287 BUILD_HANDLER cpu cpu sti silent /* #11 */
288 BUILD_HANDLER ov ov sti silent /* #12 */
289 BUILD_HANDLER tr tr sti silent /* #13 */
290 BUILD_HANDLER fpe fpe fpe silent /* #15 */
291 BUILD_HANDLER mdmx mdmx sti silent /* #22 */
292 BUILD_HANDLER watch watch sti verbose /* #23 */
293 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
294 BUILD_HANDLER reserved reserved sti verbose /* others */
295
296#ifdef CONFIG_MIPS64
297/* A temporary overflow handler used by check_daddi(). */
298
299 __INIT
300
301 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
302#endif
diff --git a/arch/mips/kernel/genrtc.c b/arch/mips/kernel/genrtc.c
new file mode 100644
index 000000000000..288bf51ad4ec
--- /dev/null
+++ b/arch/mips/kernel/genrtc.c
@@ -0,0 +1,64 @@
1/*
2 * A glue layer that provides RTC read/write to drivers/char/genrtc.c driver
3 * based on MIPS internal RTC routines. It does take care locking
4 * issues so that we are SMP/Preemption safe.
5 *
6 * Copyright (C) 2004 MontaVista Software Inc.
7 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
8 *
9 * Please read the COPYING file for all license details.
10 */
11
12#include <linux/spinlock.h>
13
14#include <asm/rtc.h>
15#include <asm/time.h>
16
17static spinlock_t mips_rtc_lock = SPIN_LOCK_UNLOCKED;
18
19unsigned int get_rtc_time(struct rtc_time *time)
20{
21 unsigned long nowtime;
22
23 spin_lock(&mips_rtc_lock);
24 nowtime = rtc_get_time();
25 to_tm(nowtime, time);
26 time->tm_year -= 1900;
27 spin_unlock(&mips_rtc_lock);
28
29 return RTC_24H;
30}
31
32int set_rtc_time(struct rtc_time *time)
33{
34 unsigned long nowtime;
35 int ret;
36
37 spin_lock(&mips_rtc_lock);
38 nowtime = mktime(time->tm_year+1900, time->tm_mon+1,
39 time->tm_mday, time->tm_hour, time->tm_min,
40 time->tm_sec);
41 ret = rtc_set_time(nowtime);
42 spin_unlock(&mips_rtc_lock);
43
44 return ret;
45}
46
47unsigned int get_rtc_ss(void)
48{
49 struct rtc_time h;
50
51 get_rtc_time(&h);
52 return h.tm_sec;
53}
54
55int get_rtc_pll(struct rtc_pll_info *pll)
56{
57 return -EINVAL;
58}
59
60int set_rtc_pll(struct rtc_pll_info *pll)
61{
62 return -EINVAL;
63}
64
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
new file mode 100644
index 000000000000..a64e87d22014
--- /dev/null
+++ b/arch/mips/kernel/head.S
@@ -0,0 +1,221 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995 Waldorf Electronics
7 * Written by Ralf Baechle and Andreas Busse
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 Ralf Baechle
9 * Copyright (C) 1996 Paul M. Antoine
10 * Modified for DECStation and hence R3000 support by Paul M. Antoine
11 * Further modifications by David S. Miller and Harald Koerfgen
12 * Copyright (C) 1999 Silicon Graphics, Inc.
13 * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
14 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
15 */
16#include <linux/config.h>
17#include <linux/init.h>
18#include <linux/threads.h>
19
20#include <asm/asm.h>
21#include <asm/regdef.h>
22#include <asm/page.h>
23#include <asm/mipsregs.h>
24#include <asm/stackframe.h>
25#ifdef CONFIG_SGI_IP27
26#include <asm/sn/addrs.h>
27#include <asm/sn/sn0/hubni.h>
28#include <asm/sn/klkernvars.h>
29#endif
30
31 .macro ARC64_TWIDDLE_PC
32#if defined(CONFIG_ARC64) || defined(CONFIG_MAPPED_KERNEL)
33 /* We get launched at a XKPHYS address but the kernel is linked to
34 run at a KSEG0 address, so jump there. */
35 PTR_LA t0, \@f
36 jr t0
37\@:
38#endif
39 .endm
40
41#ifdef CONFIG_SGI_IP27
42 /*
43 * outputs the local nasid into res. IP27 stuff.
44 */
45 .macro GET_NASID_ASM res
46 dli \res, LOCAL_HUB_ADDR(NI_STATUS_REV_ID)
47 ld \res, (\res)
48 and \res, NSRI_NODEID_MASK
49 dsrl \res, NSRI_NODEID_SHFT
50 .endm
51#endif /* CONFIG_SGI_IP27 */
52
53 /*
54 * inputs are the text nasid in t1, data nasid in t2.
55 */
56 .macro MAPPED_KERNEL_SETUP_TLB
57#ifdef CONFIG_MAPPED_KERNEL
58 /*
59 * This needs to read the nasid - assume 0 for now.
60 * Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0,
61 * 0+DVG in tlblo_1.
62 */
63 dli t0, 0xffffffffc0000000
64 dmtc0 t0, CP0_ENTRYHI
65 li t0, 0x1c000 # Offset of text into node memory
66 dsll t1, NASID_SHFT # Shift text nasid into place
67 dsll t2, NASID_SHFT # Same for data nasid
68 or t1, t1, t0 # Physical load address of kernel text
69 or t2, t2, t0 # Physical load address of kernel data
70 dsrl t1, 12 # 4K pfn
71 dsrl t2, 12 # 4K pfn
72 dsll t1, 6 # Get pfn into place
73 dsll t2, 6 # Get pfn into place
74 li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _CACHE_CACHABLE_COW) >> 6)
75 or t0, t0, t1
76 mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr
77 li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _PAGE_DIRTY|_CACHE_CACHABLE_COW) >> 6)
78 or t0, t0, t2
79 mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr
80 li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M
81 mtc0 t0, CP0_PAGEMASK
82 li t0, 0 # KMAP_INX
83 mtc0 t0, CP0_INDEX
84 li t0, 1
85 mtc0 t0, CP0_WIRED
86 tlbwi
87#else
88 mtc0 zero, CP0_WIRED
89#endif
90 .endm
91
92 /*
93 * For the moment disable interrupts, mark the kernel mode and
94 * set ST0_KX so that the CPU does not spit fire when using
95 * 64-bit addresses. A full initialization of the CPU's status
96 * register is done later in per_cpu_trap_init().
97 */
98 .macro setup_c0_status set clr
99 .set push
100 mfc0 t0, CP0_STATUS
101 or t0, ST0_CU0|\set|0x1f|\clr
102 xor t0, 0x1f|\clr
103 mtc0 t0, CP0_STATUS
104 .set noreorder
105 sll zero,3 # ehb
106 .set pop
107 .endm
108
109 .macro setup_c0_status_pri
110#ifdef CONFIG_MIPS64
111 setup_c0_status ST0_KX 0
112#else
113 setup_c0_status 0 0
114#endif
115 .endm
116
117 .macro setup_c0_status_sec
118#ifdef CONFIG_MIPS64
119 setup_c0_status ST0_KX ST0_BEV
120#else
121 setup_c0_status 0 ST0_BEV
122#endif
123 .endm
124
125 /*
126 * Reserved space for exception handlers.
127 * Necessary for machines which link their kernels at KSEG0.
128 */
129 .fill 0x400
130
131EXPORT(stext) # used for profiling
132EXPORT(_stext)
133
134 __INIT
135
136NESTED(kernel_entry, 16, sp) # kernel entry point
137 setup_c0_status_pri
138
139#ifdef CONFIG_SGI_IP27
140 GET_NASID_ASM t1
141 move t2, t1 # text and data are here
142 MAPPED_KERNEL_SETUP_TLB
143#endif /* IP27 */
144
145 ARC64_TWIDDLE_PC
146
147 PTR_LA t0, __bss_start # clear .bss
148 LONG_S zero, (t0)
149 PTR_LA t1, __bss_stop - LONGSIZE
1501:
151 PTR_ADDIU t0, LONGSIZE
152 LONG_S zero, (t0)
153 bne t0, t1, 1b
154
155 LONG_S a0, fw_arg0 # firmware arguments
156 LONG_S a1, fw_arg1
157 LONG_S a2, fw_arg2
158 LONG_S a3, fw_arg3
159
160 PTR_LA $28, init_thread_union
161 PTR_ADDIU sp, $28, _THREAD_SIZE - 32
162 set_saved_sp sp, t0, t1
163 PTR_SUBU sp, 4 * SZREG # init stack pointer
164
165 j start_kernel
166 END(kernel_entry)
167
168#ifdef CONFIG_SMP
169/*
170 * SMP slave cpus entry point. Board specific code for bootstrap calls this
171 * function after setting up the stack and gp registers.
172 */
173NESTED(smp_bootstrap, 16, sp)
174 setup_c0_status_sec
175
176#ifdef CONFIG_SGI_IP27
177 GET_NASID_ASM t1
178 dli t0, KLDIR_OFFSET + (KLI_KERN_VARS * KLDIR_ENT_SIZE) + \
179 KLDIR_OFF_POINTER + CAC_BASE
180 dsll t1, NASID_SHFT
181 or t0, t0, t1
182 ld t0, 0(t0) # t0 points to kern_vars struct
183 lh t1, KV_RO_NASID_OFFSET(t0)
184 lh t2, KV_RW_NASID_OFFSET(t0)
185 MAPPED_KERNEL_SETUP_TLB
186 ARC64_TWIDDLE_PC
187#endif /* CONFIG_SGI_IP27 */
188
189 j start_secondary
190 END(smp_bootstrap)
191#endif /* CONFIG_SMP */
192
193 __FINIT
194
195 .comm kernelsp, NR_CPUS * 8, 8
196 .comm pgd_current, NR_CPUS * 8, 8
197
198 .comm fw_arg0, SZREG, SZREG # firmware arguments
199 .comm fw_arg1, SZREG, SZREG
200 .comm fw_arg2, SZREG, SZREG
201 .comm fw_arg3, SZREG, SZREG
202
203 .macro page name, order=0
204 .globl \name
205\name: .size \name, (_PAGE_SIZE << \order)
206 .org . + (_PAGE_SIZE << \order)
207 .type \name, @object
208 .endm
209
210 .data
211 .align PAGE_SHIFT
212
213 /*
214 * ... but on 64-bit we've got three-level pagetables with a
215 * slightly different layout ...
216 */
217 page swapper_pg_dir, _PGD_ORDER
218#ifdef CONFIG_MIPS64
219 page invalid_pmd_table, _PMD_ORDER
220#endif
221 page invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
new file mode 100644
index 000000000000..7eec7568bfea
--- /dev/null
+++ b/arch/mips/kernel/i8259.c
@@ -0,0 +1,331 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
7 *
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 */
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/ioport.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/sysdev.h>
18
19#include <asm/i8259.h>
20#include <asm/io.h>
21
22void enable_8259A_irq(unsigned int irq);
23void disable_8259A_irq(unsigned int irq);
24
25/*
26 * This is the 'legacy' 8259A Programmable Interrupt Controller,
27 * present in the majority of PC/AT boxes.
28 * plus some generic x86 specific things if generic specifics makes
29 * any sense at all.
30 * this file should become arch/i386/kernel/irq.c when the old irq.c
31 * moves to arch independent land
32 */
33
34spinlock_t i8259A_lock = SPIN_LOCK_UNLOCKED;
35
36static void end_8259A_irq (unsigned int irq)
37{
38 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
39 irq_desc[irq].action)
40 enable_8259A_irq(irq);
41}
42
43#define shutdown_8259A_irq disable_8259A_irq
44
45void mask_and_ack_8259A(unsigned int);
46
47static unsigned int startup_8259A_irq(unsigned int irq)
48{
49 enable_8259A_irq(irq);
50
51 return 0; /* never anything pending */
52}
53
54static struct hw_interrupt_type i8259A_irq_type = {
55 "XT-PIC",
56 startup_8259A_irq,
57 shutdown_8259A_irq,
58 enable_8259A_irq,
59 disable_8259A_irq,
60 mask_and_ack_8259A,
61 end_8259A_irq,
62 NULL
63};
64
65/*
66 * 8259A PIC functions to handle ISA devices:
67 */
68
69/*
70 * This contains the irq mask for both 8259A irq controllers,
71 */
72static unsigned int cached_irq_mask = 0xffff;
73
74#define cached_21 (cached_irq_mask)
75#define cached_A1 (cached_irq_mask >> 8)
76
77void disable_8259A_irq(unsigned int irq)
78{
79 unsigned int mask = 1 << irq;
80 unsigned long flags;
81
82 spin_lock_irqsave(&i8259A_lock, flags);
83 cached_irq_mask |= mask;
84 if (irq & 8)
85 outb(cached_A1,0xA1);
86 else
87 outb(cached_21,0x21);
88 spin_unlock_irqrestore(&i8259A_lock, flags);
89}
90
91void enable_8259A_irq(unsigned int irq)
92{
93 unsigned int mask = ~(1 << irq);
94 unsigned long flags;
95
96 spin_lock_irqsave(&i8259A_lock, flags);
97 cached_irq_mask &= mask;
98 if (irq & 8)
99 outb(cached_A1,0xA1);
100 else
101 outb(cached_21,0x21);
102 spin_unlock_irqrestore(&i8259A_lock, flags);
103}
104
105int i8259A_irq_pending(unsigned int irq)
106{
107 unsigned int mask = 1 << irq;
108 unsigned long flags;
109 int ret;
110
111 spin_lock_irqsave(&i8259A_lock, flags);
112 if (irq < 8)
113 ret = inb(0x20) & mask;
114 else
115 ret = inb(0xA0) & (mask >> 8);
116 spin_unlock_irqrestore(&i8259A_lock, flags);
117
118 return ret;
119}
120
121void make_8259A_irq(unsigned int irq)
122{
123 disable_irq_nosync(irq);
124 irq_desc[irq].handler = &i8259A_irq_type;
125 enable_irq(irq);
126}
127
128/*
129 * This function assumes to be called rarely. Switching between
130 * 8259A registers is slow.
131 * This has to be protected by the irq controller spinlock
132 * before being called.
133 */
134static inline int i8259A_irq_real(unsigned int irq)
135{
136 int value;
137 int irqmask = 1 << irq;
138
139 if (irq < 8) {
140 outb(0x0B,0x20); /* ISR register */
141 value = inb(0x20) & irqmask;
142 outb(0x0A,0x20); /* back to the IRR register */
143 return value;
144 }
145 outb(0x0B,0xA0); /* ISR register */
146 value = inb(0xA0) & (irqmask >> 8);
147 outb(0x0A,0xA0); /* back to the IRR register */
148 return value;
149}
150
151/*
152 * Careful! The 8259A is a fragile beast, it pretty
153 * much _has_ to be done exactly like this (mask it
154 * first, _then_ send the EOI, and the order of EOI
155 * to the two 8259s is important!
156 */
157void mask_and_ack_8259A(unsigned int irq)
158{
159 unsigned int irqmask = 1 << irq;
160 unsigned long flags;
161
162 spin_lock_irqsave(&i8259A_lock, flags);
163 /*
164 * Lightweight spurious IRQ detection. We do not want to overdo
165 * spurious IRQ handling - it's usually a sign of hardware problems, so
166 * we only do the checks we can do without slowing down good hardware
167 * nnecesserily.
168 *
169 * Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting
170 * rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A.
171 * Thus we can check spurious 8259A IRQs without doing the quite slow
172 * i8259A_irq_real() call for every IRQ. This does not cover 100% of
173 * spurious interrupts, but should be enough to warn the user that
174 * there is something bad going on ...
175 */
176 if (cached_irq_mask & irqmask)
177 goto spurious_8259A_irq;
178 cached_irq_mask |= irqmask;
179
180handle_real_irq:
181 if (irq & 8) {
182 inb(0xA1); /* DUMMY - (do we need this?) */
183 outb(cached_A1,0xA1);
184 outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
185 outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */
186 } else {
187 inb(0x21); /* DUMMY - (do we need this?) */
188 outb(cached_21,0x21);
189 outb(0x60+irq,0x20); /* 'Specific EOI' to master */
190 }
191 spin_unlock_irqrestore(&i8259A_lock, flags);
192 return;
193
194spurious_8259A_irq:
195 /*
196 * this is the slow path - should happen rarely.
197 */
198 if (i8259A_irq_real(irq))
199 /*
200 * oops, the IRQ _is_ in service according to the
201 * 8259A - not spurious, go handle it.
202 */
203 goto handle_real_irq;
204
205 {
206 static int spurious_irq_mask = 0;
207 /*
208 * At this point we can be sure the IRQ is spurious,
209 * lets ACK and report it. [once per IRQ]
210 */
211 if (!(spurious_irq_mask & irqmask)) {
212 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
213 spurious_irq_mask |= irqmask;
214 }
215 atomic_inc(&irq_err_count);
216 /*
217 * Theoretically we do not have to handle this IRQ,
218 * but in Linux this does not cause problems and is
219 * simpler for us.
220 */
221 goto handle_real_irq;
222 }
223}
224
225static int i8259A_resume(struct sys_device *dev)
226{
227 init_8259A(0);
228 return 0;
229}
230
231static struct sysdev_class i8259_sysdev_class = {
232 set_kset_name("i8259"),
233 .resume = i8259A_resume,
234};
235
236static struct sys_device device_i8259A = {
237 .id = 0,
238 .cls = &i8259_sysdev_class,
239};
240
241static int __init i8259A_init_sysfs(void)
242{
243 int error = sysdev_class_register(&i8259_sysdev_class);
244 if (!error)
245 error = sysdev_register(&device_i8259A);
246 return error;
247}
248
249device_initcall(i8259A_init_sysfs);
250
251void __init init_8259A(int auto_eoi)
252{
253 unsigned long flags;
254
255 spin_lock_irqsave(&i8259A_lock, flags);
256
257 outb(0xff, 0x21); /* mask all of 8259A-1 */
258 outb(0xff, 0xA1); /* mask all of 8259A-2 */
259
260 /*
261 * outb_p - this has to work on a wide range of PC hardware.
262 */
263 outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */
264 outb_p(0x00, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */
265 outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */
266 if (auto_eoi)
267 outb_p(0x03, 0x21); /* master does Auto EOI */
268 else
269 outb_p(0x01, 0x21); /* master expects normal EOI */
270
271 outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */
272 outb_p(0x08, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */
273 outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */
274 outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode
275 is to be investigated) */
276
277 if (auto_eoi)
278 /*
279 * in AEOI mode we just have to mask the interrupt
280 * when acking.
281 */
282 i8259A_irq_type.ack = disable_8259A_irq;
283 else
284 i8259A_irq_type.ack = mask_and_ack_8259A;
285
286 udelay(100); /* wait for 8259A to initialize */
287
288 outb(cached_21, 0x21); /* restore master IRQ mask */
289 outb(cached_A1, 0xA1); /* restore slave IRQ mask */
290
291 spin_unlock_irqrestore(&i8259A_lock, flags);
292}
293
294/*
295 * IRQ2 is cascade interrupt to second interrupt controller
296 */
297static struct irqaction irq2 = {
298 no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL
299};
300
301static struct resource pic1_io_resource = {
302 "pic1", 0x20, 0x3f, IORESOURCE_BUSY
303};
304
305static struct resource pic2_io_resource = {
306 "pic2", 0xa0, 0xbf, IORESOURCE_BUSY
307};
308
309/*
310 * On systems with i8259-style interrupt controllers we assume for
311 * driver compatibility reasons interrupts 0 - 15 to be the i8295
312 * interrupts even if the hardware uses a different interrupt numbering.
313 */
314void __init init_i8259_irqs (void)
315{
316 int i;
317
318 request_resource(&ioport_resource, &pic1_io_resource);
319 request_resource(&ioport_resource, &pic2_io_resource);
320
321 init_8259A(0);
322
323 for (i = 0; i < 16; i++) {
324 irq_desc[i].status = IRQ_DISABLED;
325 irq_desc[i].action = 0;
326 irq_desc[i].depth = 1;
327 irq_desc[i].handler = &i8259A_irq_type;
328 }
329
330 setup_irq(2, &irq2);
331}
diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c
new file mode 100644
index 000000000000..aeda7f58391b
--- /dev/null
+++ b/arch/mips/kernel/init_task.c
@@ -0,0 +1,42 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init_task.h>
5#include <linux/fs.h>
6#include <linux/mqueue.h>
7
8#include <asm/thread_info.h>
9#include <asm/uaccess.h>
10#include <asm/pgtable.h>
11
12static struct fs_struct init_fs = INIT_FS;
13static struct files_struct init_files = INIT_FILES;
14static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
15static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
16struct mm_struct init_mm = INIT_MM(init_mm);
17
18EXPORT_SYMBOL(init_mm);
19
20/*
21 * Initial thread structure.
22 *
23 * We need to make sure that this is 8192-byte aligned due to the
24 * way process stacks are handled. This is done by making sure
25 * the linker maps this in the .text segment right after head.S,
26 * and making head.S ensure the proper alignment.
27 *
28 * The things we do for performance..
29 */
30union thread_union init_thread_union
31 __attribute__((__section__(".data.init_task"),
32 __aligned__(THREAD_SIZE))) =
33 { INIT_THREAD_INFO(init_task) };
34
35/*
36 * Initial task structure.
37 *
38 * All other task structs will be allocated on slabs in fork.c
39 */
40struct task_struct init_task = INIT_TASK(init_task);
41
42EXPORT_SYMBOL(init_task);
diff --git a/arch/mips/kernel/ioctl32.c b/arch/mips/kernel/ioctl32.c
new file mode 100644
index 000000000000..519cd5d0aebb
--- /dev/null
+++ b/arch/mips/kernel/ioctl32.c
@@ -0,0 +1,58 @@
1/*
2 * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
3 *
4 * Copyright (C) 2000 Silicon Graphics, Inc.
5 * Written by Ulf Carlsson (ulfc@engr.sgi.com)
6 * Copyright (C) 2000, 2004 Ralf Baechle
7 * Copyright (C) 2002, 2003 Maciej W. Rozycki
8 */
9#define INCLUDES
10#include "compat_ioctl.c"
11
12#include <linux/config.h>
13#include <linux/types.h>
14#include <linux/compat.h>
15#include <linux/ioctl32.h>
16#include <linux/syscalls.h>
17
18#ifdef CONFIG_SIBYTE_TBPROF
19#include <asm/sibyte/trace_prof.h>
20#endif
21
22#define A(__x) ((unsigned long)(__x))
23
24long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
25
26#define CODE
27#include "compat_ioctl.c"
28
29typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
30
31#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
32#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl32_handler_t)(handler), NULL },
33#define IOCTL_TABLE_START \
34 struct ioctl_trans ioctl_start[] = {
35#define IOCTL_TABLE_END \
36 };
37
38IOCTL_TABLE_START
39
40#include <linux/compat_ioctl.h>
41#define DECLARES
42#include "compat_ioctl.c"
43
44#ifdef CONFIG_SIBYTE_TBPROF
45COMPATIBLE_IOCTL(SBPROF_ZBSTART)
46COMPATIBLE_IOCTL(SBPROF_ZBSTOP)
47COMPATIBLE_IOCTL(SBPROF_ZBWAITFULL)
48#endif /* CONFIG_SIBYTE_TBPROF */
49
50/*HANDLE_IOCTL(RTC_IRQP_READ, w_long)
51COMPATIBLE_IOCTL(RTC_IRQP_SET)
52HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
53COMPATIBLE_IOCTL(RTC_EPOCH_SET)
54*/
55
56IOCTL_TABLE_END
57
58int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/mips/kernel/irix5sys.S b/arch/mips/kernel/irix5sys.S
new file mode 100644
index 000000000000..eeef891093ed
--- /dev/null
+++ b/arch/mips/kernel/irix5sys.S
@@ -0,0 +1,1041 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * 32-bit IRIX5 ABI system call table derived from original file 'irix5sys.h'
7 * created by David S. Miller.
8 *
9 * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
10 * Copyright (C) 2004 Steven J. Hill <sjhill@realitydiluted.com>
11 */
12#include <asm/asm.h>
13
14 /*
15 * Key:
16 * V == Valid and should work as expected for most cases.
17 * HV == Half Valid, some things will work, some likely will not
18 * IV == InValid, certainly will not work at all yet
19 * ?V == ?'ably Valid, I have not done enough looking into it
20 * DC == Don't Care, a rats ass we couldn't give
21 */
22
23 .macro irix5syscalltable
24
25 sys sys_syscall 0 /* 1000 sysindir() V*/
26 sys sys_exit 1 /* 1001 exit() V*/
27 sys sys_fork 0 /* 1002 fork() V*/
28 sys sys_read 3 /* 1003 read() V*/
29 sys sys_write 3 /* 1004 write() V*/
30 sys sys_open 3 /* 1005 open() V*/
31 sys sys_close 1 /* 1006 close() V*/
32 sys irix_unimp 0 /* 1007 (XXX IRIX 4 wait) V*/
33 sys sys_creat 2 /* 1008 creat() V*/
34 sys sys_link 2 /* 1009 link() V*/
35 sys sys_unlink 1 /* 1010 unlink() V*/
36 sys irix_exec 0 /* 1011 exec() V*/
37 sys sys_chdir 1 /* 1012 chdir() V*/
38 sys irix_gtime 0 /* 1013 time() V*/
39 sys irix_unimp 0 /* 1014 (XXX IRIX 4 mknod) V*/
40 sys sys_chmod 2 /* 1015 chmod() V*/
41 sys sys_chown 3 /* 1016 chown() V*/
42 sys irix_brk 1 /* 1017 break() V*/
43 sys irix_unimp 0 /* 1018 (XXX IRIX 4 stat) V*/
44 sys sys_lseek 3 /* 1019 lseek() XXX64bit HV*/
45 sys irix_getpid 0 /* 1020 getpid() V*/
46 sys irix_mount 6 /* 1021 mount() IV*/
47 sys sys_umount 1 /* 1022 umount() V*/
48 sys sys_setuid 1 /* 1023 setuid() V*/
49 sys irix_getuid 0 /* 1024 getuid() V*/
50 sys irix_stime 1 /* 1025 stime() V*/
51 sys irix_unimp 4 /* 1026 XXX ptrace() IV*/
52 sys irix_alarm 1 /* 1027 alarm() V*/
53 sys irix_unimp 0 /* 1028 (XXX IRIX 4 fstat) V*/
54 sys irix_pause 0 /* 1029 pause() V*/
55 sys sys_utime 2 /* 1030 utime() V*/
56 sys irix_unimp 0 /* 1031 nuthin' V*/
57 sys irix_unimp 0 /* 1032 nobody home man... V*/
58 sys sys_access 2 /* 1033 access() V*/
59 sys sys_nice 1 /* 1034 nice() V*/
60 sys irix_statfs 2 /* 1035 statfs() V*/
61 sys sys_sync 0 /* 1036 sync() V*/
62 sys sys_kill 2 /* 1037 kill() V*/
63 sys irix_fstatfs 2 /* 1038 fstatfs() V*/
64 sys irix_setpgrp 1 /* 1039 setpgrp() V*/
65 sys irix_syssgi 0 /* 1040 syssgi() HV*/
66 sys sys_dup 1 /* 1041 dup() V*/
67 sys sys_pipe 0 /* 1042 pipe() V*/
68 sys irix_times 1 /* 1043 times() V*/
69 sys irix_unimp 0 /* 1044 XXX profil() IV*/
70 sys irix_unimp 0 /* 1045 XXX lock() IV*/
71 sys sys_setgid 1 /* 1046 setgid() V*/
72 sys irix_getgid 0 /* 1047 getgid() V*/
73 sys irix_unimp 0 /* 1048 (XXX IRIX 4 ssig) V*/
74 sys irix_msgsys 6 /* 1049 sys_msgsys V*/
75 sys sys_sysmips 4 /* 1050 sysmips() HV*/
76 sys irix_unimp 0 /* 1051 XXX sysacct() IV*/
77 sys irix_shmsys 5 /* 1052 sys_shmsys V*/
78 sys irix_semsys 0 /* 1053 sys_semsys V*/
79 sys irix_ioctl 3 /* 1054 ioctl() HV*/
80 sys irix_uadmin 0 /* 1055 XXX sys_uadmin() HC*/
81 sys irix_sysmp 0 /* 1056 sysmp() HV*/
82 sys irix_utssys 4 /* 1057 sys_utssys() HV*/
83 sys irix_unimp 0 /* 1058 nada enchilada V*/
84 sys irix_exece 0 /* 1059 exece() V*/
85 sys sys_umask 1 /* 1060 umask() V*/
86 sys sys_chroot 1 /* 1061 chroot() V*/
87 sys irix_fcntl 3 /* 1062 fcntl() ?V*/
88 sys irix_ulimit 2 /* 1063 ulimit() HV*/
89 sys irix_unimp 0 /* 1064 XXX AFS shit DC*/
90 sys irix_unimp 0 /* 1065 XXX AFS shit DC*/
91 sys irix_unimp 0 /* 1066 XXX AFS shit DC*/
92 sys irix_unimp 0 /* 1067 XXX AFS shit DC*/
93 sys irix_unimp 0 /* 1068 XXX AFS shit DC*/
94 sys irix_unimp 0 /* 1069 XXX AFS shit DC*/
95 sys irix_unimp 0 /* 1070 XXX AFS shit DC*/
96 sys irix_unimp 0 /* 1071 XXX AFS shit DC*/
97 sys irix_unimp 0 /* 1072 XXX AFS shit DC*/
98 sys irix_unimp 0 /* 1073 XXX AFS shit DC*/
99 sys irix_unimp 0 /* 1074 nuttin' V*/
100 sys irix_unimp 0 /* 1075 XXX sys_getrlimit64()IV*/
101 sys irix_unimp 0 /* 1076 XXX sys_setrlimit64()IV*/
102 sys sys_nanosleep 2 /* 1077 nanosleep() V*/
103 sys irix_lseek64 5 /* 1078 lseek64() ?V*/
104 sys sys_rmdir 1 /* 1079 rmdir() V*/
105 sys sys_mkdir 2 /* 1080 mkdir() V*/
106 sys sys_getdents 3 /* 1081 getdents() V*/
107 sys irix_sginap 1 /* 1082 sys_sginap() V*/
108 sys irix_sgikopt 3 /* 1083 sys_sgikopt() DC*/
109 sys sys_sysfs 3 /* 1084 sysfs() ?V*/
110 sys irix_unimp 0 /* 1085 XXX sys_getmsg() DC*/
111 sys irix_unimp 0 /* 1086 XXX sys_putmsg() DC*/
112 sys sys_poll 3 /* 1087 poll() V*/
113 sys irix_sigreturn 0 /* 1088 sigreturn() ?V*/
114 sys sys_accept 3 /* 1089 accept() V*/
115 sys sys_bind 3 /* 1090 bind() V*/
116 sys sys_connect 3 /* 1091 connect() V*/
117 sys irix_gethostid 0 /* 1092 sys_gethostid() ?V*/
118 sys sys_getpeername 3 /* 1093 getpeername() V*/
119 sys sys_getsockname 3 /* 1094 getsockname() V*/
120 sys sys_getsockopt 5 /* 1095 getsockopt() V*/
121 sys sys_listen 2 /* 1096 listen() V*/
122 sys sys_recv 4 /* 1097 recv() V*/
123 sys sys_recvfrom 6 /* 1098 recvfrom() V*/
124 sys sys_recvmsg 3 /* 1099 recvmsg() V*/
125 sys sys_select 5 /* 1100 select() V*/
126 sys sys_send 4 /* 1101 send() V*/
127 sys sys_sendmsg 3 /* 1102 sendmsg() V*/
128 sys sys_sendto 6 /* 1103 sendto() V*/
129 sys irix_sethostid 1 /* 1104 sys_sethostid() ?V*/
130 sys sys_setsockopt 5 /* 1105 setsockopt() V*/
131 sys sys_shutdown 2 /* 1106 shutdown() ?V*/
132 sys irix_socket 3 /* 1107 socket() V*/
133 sys sys_gethostname 2 /* 1108 sys_gethostname() ?V*/
134 sys sys_sethostname 2 /* 1109 sethostname() ?V*/
135 sys irix_getdomainname 2 /* 1110 sys_getdomainname() ?V*/
136 sys sys_setdomainname 2 /* 1111 setdomainname() ?V*/
137 sys sys_truncate 2 /* 1112 truncate() V*/
138 sys sys_ftruncate 2 /* 1113 ftruncate() V*/
139 sys sys_rename 2 /* 1114 rename() V*/
140 sys sys_symlink 2 /* 1115 symlink() V*/
141 sys sys_readlink 3 /* 1116 readlink() V*/
142 sys irix_unimp 0 /* 1117 XXX IRIX 4 lstat() DC*/
143 sys irix_unimp 0 /* 1118 nothin' V*/
144 sys irix_unimp 0 /* 1119 XXX nfs_svc() DC*/
145 sys irix_unimp 0 /* 1120 XXX nfs_getfh() DC*/
146 sys irix_unimp 0 /* 1121 XXX async_daemon() DC*/
147 sys irix_unimp 0 /* 1122 XXX exportfs() DC*/
148 sys sys_setregid 2 /* 1123 setregid() V*/
149 sys sys_setreuid 2 /* 1124 setreuid() V*/
150 sys sys_getitimer 2 /* 1125 getitimer() V*/
151 sys sys_setitimer 3 /* 1126 setitimer() V*/
152 sys irix_unimp 1 /* 1127 XXX adjtime() IV*/
153 sys irix_gettimeofday 1 /* 1128 gettimeofday() V*/
154 sys irix_unimp 0 /* 1129 XXX sproc() IV*/
155 sys irix_prctl 0 /* 1130 prctl() HV*/
156 sys irix_unimp 0 /* 1131 XXX procblk() IV*/
157 sys irix_unimp 0 /* 1132 XXX sprocsp() IV*/
158 sys irix_unimp 0 /* 1133 XXX sgigsc() IV*/
159 sys irix_mmap32 6 /* 1134 mmap() XXXflags? ?V*/
160 sys sys_munmap 2 /* 1135 munmap() V*/
161 sys sys_mprotect 3 /* 1136 mprotect() V*/
162 sys sys_msync 4 /* 1137 msync() V*/
163 sys irix_madvise 3 /* 1138 madvise() DC*/
164 sys irix_pagelock 3 /* 1139 pagelock() IV*/
165 sys irix_getpagesize 0 /* 1140 getpagesize() V*/
166 sys irix_quotactl 0 /* 1141 quotactl() V*/
167 sys irix_unimp 0 /* 1142 nobody home man V*/
168 sys sys_getpgid 1 /* 1143 BSD getpgrp() V*/
169 sys irix_BSDsetpgrp 2 /* 1143 BSD setpgrp() V*/
170 sys sys_vhangup 0 /* 1144 vhangup() V*/
171 sys sys_fsync 1 /* 1145 fsync() V*/
172 sys sys_fchdir 1 /* 1146 fchdir() V*/
173 sys sys_getrlimit 2 /* 1147 getrlimit() ?V*/
174 sys sys_setrlimit 2 /* 1148 setrlimit() ?V*/
175 sys sys_cacheflush 3 /* 1150 cacheflush() HV*/
176 sys sys_cachectl 3 /* 1151 cachectl() HV*/
177 sys sys_fchown 3 /* 1152 fchown() ?V*/
178 sys sys_fchmod 2 /* 1153 fchmod() ?V*/
179 sys irix_unimp 0 /* 1154 XXX IRIX 4 wait3() V*/
180 sys sys_socketpair 4 /* 1155 socketpair() V*/
181 sys irix_systeminfo 3 /* 1156 systeminfo() IV*/
182 sys irix_uname 1 /* 1157 uname() IV*/
183 sys irix_xstat 3 /* 1158 xstat() V*/
184 sys irix_lxstat 3 /* 1159 lxstat() V*/
185 sys irix_fxstat 3 /* 1160 fxstat() V*/
186 sys irix_xmknod 0 /* 1161 xmknod() ?V*/
187 sys irix_sigaction 4 /* 1162 sigaction() ?V*/
188 sys irix_sigpending 1 /* 1163 sigpending() ?V*/
189 sys irix_sigprocmask 3 /* 1164 sigprocmask() ?V*/
190 sys irix_sigsuspend 0 /* 1165 sigsuspend() ?V*/
191 sys irix_sigpoll_sys 3 /* 1166 sigpoll_sys() IV*/
192 sys irix_swapctl 2 /* 1167 swapctl() IV*/
193 sys irix_getcontext 0 /* 1168 getcontext() HV*/
194 sys irix_setcontext 0 /* 1169 setcontext() HV*/
195 sys irix_waitsys 5 /* 1170 waitsys() IV*/
196 sys irix_sigstack 2 /* 1171 sigstack() HV*/
197 sys irix_sigaltstack 2 /* 1172 sigaltstack() HV*/
198 sys irix_sigsendset 2 /* 1173 sigsendset() IV*/
199 sys irix_statvfs 2 /* 1174 statvfs() V*/
200 sys irix_fstatvfs 2 /* 1175 fstatvfs() V*/
201 sys irix_unimp 0 /* 1176 XXX getpmsg() DC*/
202 sys irix_unimp 0 /* 1177 XXX putpmsg() DC*/
203 sys sys_lchown 3 /* 1178 lchown() V*/
204 sys irix_priocntl 0 /* 1179 priocntl() DC*/
205 sys irix_sigqueue 4 /* 1180 sigqueue() IV*/
206 sys sys_readv 3 /* 1181 readv() V*/
207 sys sys_writev 3 /* 1182 writev() V*/
208 sys irix_truncate64 4 /* 1183 truncate64() XX32bit HV*/
209 sys irix_ftruncate64 4 /* 1184 ftruncate64()XX32bit HV*/
210 sys irix_mmap64 0 /* 1185 mmap64() XX32bit HV*/
211 sys irix_dmi 0 /* 1186 dmi() DC*/
212 sys irix_pread 6 /* 1187 pread() IV*/
213 sys irix_pwrite 6 /* 1188 pwrite() IV*/
214 sys sys_fsync 1 /* 1189 fdatasync() XXPOSIX HV*/
215 sys irix_sgifastpath 7 /* 1190 sgifastpath() WHEEE IV*/
216 sys irix_unimp 0 /* 1191 XXX attr_get() DC*/
217 sys irix_unimp 0 /* 1192 XXX attr_getf() DC*/
218 sys irix_unimp 0 /* 1193 XXX attr_set() DC*/
219 sys irix_unimp 0 /* 1194 XXX attr_setf() DC*/
220 sys irix_unimp 0 /* 1195 XXX attr_remove() DC*/
221 sys irix_unimp 0 /* 1196 XXX attr_removef() DC*/
222 sys irix_unimp 0 /* 1197 XXX attr_list() DC*/
223 sys irix_unimp 0 /* 1198 XXX attr_listf() DC*/
224 sys irix_unimp 0 /* 1199 XXX attr_multi() DC*/
225 sys irix_unimp 0 /* 1200 XXX attr_multif() DC*/
226 sys irix_statvfs64 2 /* 1201 statvfs64() V*/
227 sys irix_fstatvfs64 2 /* 1202 fstatvfs64() V*/
228 sys irix_getmountid 2 /* 1203 getmountid()XXXfsids HV*/
229 sys irix_nsproc 5 /* 1204 nsproc() IV*/
230 sys irix_getdents64 3 /* 1205 getdents64() HV*/
231 sys irix_unimp 0 /* 1206 XXX DFS garbage DC*/
232 sys irix_ngetdents 4 /* 1207 ngetdents() XXXeop HV*/
233 sys irix_ngetdents64 4 /* 1208 ngetdents64() XXXeop HV*/
234 sys irix_unimp 0 /* 1209 nothin' V*/
235 sys irix_unimp 0 /* 1210 XXX pidsprocsp() */
236 sys irix_unimp 0 /* 1211 XXX rexec() */
237 sys irix_unimp 0 /* 1212 XXX timer_create() */
238 sys irix_unimp 0 /* 1213 XXX timer_delete() */
239 sys irix_unimp 0 /* 1214 XXX timer_settime() */
240 sys irix_unimp 0 /* 1215 XXX timer_gettime() */
241 sys irix_unimp 0 /* 1216 XXX timer_setoverrun() */
242 sys sys_sched_rr_get_interval 2 /* 1217 sched_rr_get_interval()V*/
243 sys sys_sched_yield 0 /* 1218 sched_yield() V*/
244 sys sys_sched_getscheduler 1 /* 1219 sched_getscheduler() V*/
245 sys sys_sched_setscheduler 3 /* 1220 sched_setscheduler() V*/
246 sys sys_sched_getparam 2 /* 1221 sched_getparam() V*/
247 sys sys_sched_setparam 2 /* 1222 sched_setparam() V*/
248 sys irix_unimp 0 /* 1223 XXX usync_cntl() */
249 sys irix_unimp 0 /* 1224 XXX psema_cntl() */
250 sys irix_unimp 0 /* 1225 XXX restartreturn() */
251
252 /* Just to pad things out nicely. */
253 sys irix_unimp 0
254 sys irix_unimp 0
255 sys irix_unimp 0
256 sys irix_unimp 0
257 sys irix_unimp 0
258 sys irix_unimp 0
259 sys irix_unimp 0
260 sys irix_unimp 0
261 sys irix_unimp 0
262 sys irix_unimp 0
263 sys irix_unimp 0
264 sys irix_unimp 0
265 sys irix_unimp 0
266 sys irix_unimp 0
267 sys irix_unimp 0
268 sys irix_unimp 0
269 sys irix_unimp 0
270 sys irix_unimp 0
271 sys irix_unimp 0
272 sys irix_unimp 0
273 sys irix_unimp 0
274 sys irix_unimp 0
275 sys irix_unimp 0
276 sys irix_unimp 0
277 sys irix_unimp 0
278 sys irix_unimp 0
279 sys irix_unimp 0
280 sys irix_unimp 0
281 sys irix_unimp 0
282 sys irix_unimp 0
283 sys irix_unimp 0
284 sys irix_unimp 0
285 sys irix_unimp 0
286 sys irix_unimp 0
287 sys irix_unimp 0
288 sys irix_unimp 0
289 sys irix_unimp 0
290 sys irix_unimp 0
291 sys irix_unimp 0
292 sys irix_unimp 0
293 sys irix_unimp 0
294 sys irix_unimp 0
295 sys irix_unimp 0
296 sys irix_unimp 0
297 sys irix_unimp 0
298 sys irix_unimp 0
299 sys irix_unimp 0
300 sys irix_unimp 0
301 sys irix_unimp 0
302 sys irix_unimp 0
303 sys irix_unimp 0
304 sys irix_unimp 0
305 sys irix_unimp 0
306 sys irix_unimp 0
307 sys irix_unimp 0
308 sys irix_unimp 0
309 sys irix_unimp 0
310 sys irix_unimp 0
311 sys irix_unimp 0
312 sys irix_unimp 0
313 sys irix_unimp 0
314 sys irix_unimp 0
315 sys irix_unimp 0
316 sys irix_unimp 0
317 sys irix_unimp 0
318 sys irix_unimp 0
319 sys irix_unimp 0
320 sys irix_unimp 0
321 sys irix_unimp 0
322 sys irix_unimp 0
323 sys irix_unimp 0
324 sys irix_unimp 0
325 sys irix_unimp 0
326 sys irix_unimp 0
327 sys irix_unimp 0
328 sys irix_unimp 0
329 sys irix_unimp 0
330 sys irix_unimp 0
331 sys irix_unimp 0
332 sys irix_unimp 0
333 sys irix_unimp 0
334 sys irix_unimp 0
335 sys irix_unimp 0
336 sys irix_unimp 0
337 sys irix_unimp 0
338 sys irix_unimp 0
339 sys irix_unimp 0
340 sys irix_unimp 0
341 sys irix_unimp 0
342 sys irix_unimp 0
343 sys irix_unimp 0
344 sys irix_unimp 0
345 sys irix_unimp 0
346 sys irix_unimp 0
347 sys irix_unimp 0
348 sys irix_unimp 0
349 sys irix_unimp 0
350 sys irix_unimp 0
351 sys irix_unimp 0
352 sys irix_unimp 0
353 sys irix_unimp 0
354 sys irix_unimp 0
355 sys irix_unimp 0
356 sys irix_unimp 0
357 sys irix_unimp 0
358 sys irix_unimp 0
359 sys irix_unimp 0
360 sys irix_unimp 0
361 sys irix_unimp 0
362 sys irix_unimp 0
363 sys irix_unimp 0
364 sys irix_unimp 0
365 sys irix_unimp 0
366 sys irix_unimp 0
367 sys irix_unimp 0
368 sys irix_unimp 0
369 sys irix_unimp 0
370 sys irix_unimp 0
371 sys irix_unimp 0
372 sys irix_unimp 0
373 sys irix_unimp 0
374 sys irix_unimp 0
375 sys irix_unimp 0
376 sys irix_unimp 0
377 sys irix_unimp 0
378 sys irix_unimp 0
379 sys irix_unimp 0
380 sys irix_unimp 0
381 sys irix_unimp 0
382 sys irix_unimp 0
383 sys irix_unimp 0
384 sys irix_unimp 0
385 sys irix_unimp 0
386 sys irix_unimp 0
387 sys irix_unimp 0
388 sys irix_unimp 0
389 sys irix_unimp 0
390 sys irix_unimp 0
391 sys irix_unimp 0
392 sys irix_unimp 0
393 sys irix_unimp 0
394 sys irix_unimp 0
395 sys irix_unimp 0
396 sys irix_unimp 0
397 sys irix_unimp 0
398 sys irix_unimp 0
399 sys irix_unimp 0
400 sys irix_unimp 0
401 sys irix_unimp 0
402 sys irix_unimp 0
403 sys irix_unimp 0
404 sys irix_unimp 0
405 sys irix_unimp 0
406 sys irix_unimp 0
407 sys irix_unimp 0
408 sys irix_unimp 0
409 sys irix_unimp 0
410 sys irix_unimp 0
411 sys irix_unimp 0
412 sys irix_unimp 0
413 sys irix_unimp 0
414 sys irix_unimp 0
415 sys irix_unimp 0
416 sys irix_unimp 0
417 sys irix_unimp 0
418 sys irix_unimp 0
419 sys irix_unimp 0
420 sys irix_unimp 0
421 sys irix_unimp 0
422 sys irix_unimp 0
423 sys irix_unimp 0
424 sys irix_unimp 0
425 sys irix_unimp 0
426 sys irix_unimp 0
427 sys irix_unimp 0
428 sys irix_unimp 0
429 sys irix_unimp 0
430 sys irix_unimp 0
431 sys irix_unimp 0
432 sys irix_unimp 0
433 sys irix_unimp 0
434 sys irix_unimp 0
435 sys irix_unimp 0
436 sys irix_unimp 0
437 sys irix_unimp 0
438 sys irix_unimp 0
439 sys irix_unimp 0
440 sys irix_unimp 0
441 sys irix_unimp 0
442 sys irix_unimp 0
443 sys irix_unimp 0
444 sys irix_unimp 0
445 sys irix_unimp 0
446 sys irix_unimp 0
447 sys irix_unimp 0
448 sys irix_unimp 0
449 sys irix_unimp 0
450 sys irix_unimp 0
451 sys irix_unimp 0
452 sys irix_unimp 0
453 sys irix_unimp 0
454 sys irix_unimp 0
455 sys irix_unimp 0
456 sys irix_unimp 0
457 sys irix_unimp 0
458 sys irix_unimp 0
459 sys irix_unimp 0
460 sys irix_unimp 0
461 sys irix_unimp 0
462 sys irix_unimp 0
463 sys irix_unimp 0
464 sys irix_unimp 0
465 sys irix_unimp 0
466 sys irix_unimp 0
467 sys irix_unimp 0
468 sys irix_unimp 0
469 sys irix_unimp 0
470 sys irix_unimp 0
471 sys irix_unimp 0
472 sys irix_unimp 0
473 sys irix_unimp 0
474 sys irix_unimp 0
475 sys irix_unimp 0
476 sys irix_unimp 0
477 sys irix_unimp 0
478 sys irix_unimp 0
479 sys irix_unimp 0
480 sys irix_unimp 0
481 sys irix_unimp 0
482 sys irix_unimp 0
483 sys irix_unimp 0
484 sys irix_unimp 0
485 sys irix_unimp 0
486 sys irix_unimp 0
487 sys irix_unimp 0
488 sys irix_unimp 0
489 sys irix_unimp 0
490 sys irix_unimp 0
491 sys irix_unimp 0
492 sys irix_unimp 0
493 sys irix_unimp 0
494 sys irix_unimp 0
495 sys irix_unimp 0
496 sys irix_unimp 0
497 sys irix_unimp 0
498 sys irix_unimp 0
499 sys irix_unimp 0
500 sys irix_unimp 0
501 sys irix_unimp 0
502 sys irix_unimp 0
503 sys irix_unimp 0
504 sys irix_unimp 0
505 sys irix_unimp 0
506 sys irix_unimp 0
507 sys irix_unimp 0
508 sys irix_unimp 0
509 sys irix_unimp 0
510 sys irix_unimp 0
511 sys irix_unimp 0
512 sys irix_unimp 0
513 sys irix_unimp 0
514 sys irix_unimp 0
515 sys irix_unimp 0
516 sys irix_unimp 0
517 sys irix_unimp 0
518 sys irix_unimp 0
519 sys irix_unimp 0
520 sys irix_unimp 0
521 sys irix_unimp 0
522 sys irix_unimp 0
523 sys irix_unimp 0
524 sys irix_unimp 0
525 sys irix_unimp 0
526 sys irix_unimp 0
527 sys irix_unimp 0
528 sys irix_unimp 0
529 sys irix_unimp 0
530 sys irix_unimp 0
531 sys irix_unimp 0
532 sys irix_unimp 0
533 sys irix_unimp 0
534 sys irix_unimp 0
535 sys irix_unimp 0
536 sys irix_unimp 0
537 sys irix_unimp 0
538 sys irix_unimp 0
539 sys irix_unimp 0
540 sys irix_unimp 0
541 sys irix_unimp 0
542 sys irix_unimp 0
543 sys irix_unimp 0
544 sys irix_unimp 0
545 sys irix_unimp 0
546 sys irix_unimp 0
547 sys irix_unimp 0
548 sys irix_unimp 0
549 sys irix_unimp 0
550 sys irix_unimp 0
551 sys irix_unimp 0
552 sys irix_unimp 0
553 sys irix_unimp 0
554 sys irix_unimp 0
555 sys irix_unimp 0
556 sys irix_unimp 0
557 sys irix_unimp 0
558 sys irix_unimp 0
559 sys irix_unimp 0
560 sys irix_unimp 0
561 sys irix_unimp 0
562 sys irix_unimp 0
563 sys irix_unimp 0
564 sys irix_unimp 0
565 sys irix_unimp 0
566 sys irix_unimp 0
567 sys irix_unimp 0
568 sys irix_unimp 0
569 sys irix_unimp 0
570 sys irix_unimp 0
571 sys irix_unimp 0
572 sys irix_unimp 0
573 sys irix_unimp 0
574 sys irix_unimp 0
575 sys irix_unimp 0
576 sys irix_unimp 0
577 sys irix_unimp 0
578 sys irix_unimp 0
579 sys irix_unimp 0
580 sys irix_unimp 0
581 sys irix_unimp 0
582 sys irix_unimp 0
583 sys irix_unimp 0
584 sys irix_unimp 0
585 sys irix_unimp 0
586 sys irix_unimp 0
587 sys irix_unimp 0
588 sys irix_unimp 0
589 sys irix_unimp 0
590 sys irix_unimp 0
591 sys irix_unimp 0
592 sys irix_unimp 0
593 sys irix_unimp 0
594 sys irix_unimp 0
595 sys irix_unimp 0
596 sys irix_unimp 0
597 sys irix_unimp 0
598 sys irix_unimp 0
599 sys irix_unimp 0
600 sys irix_unimp 0
601 sys irix_unimp 0
602 sys irix_unimp 0
603 sys irix_unimp 0
604 sys irix_unimp 0
605 sys irix_unimp 0
606 sys irix_unimp 0
607 sys irix_unimp 0
608 sys irix_unimp 0
609 sys irix_unimp 0
610 sys irix_unimp 0
611 sys irix_unimp 0
612 sys irix_unimp 0
613 sys irix_unimp 0
614 sys irix_unimp 0
615 sys irix_unimp 0
616 sys irix_unimp 0
617 sys irix_unimp 0
618 sys irix_unimp 0
619 sys irix_unimp 0
620 sys irix_unimp 0
621 sys irix_unimp 0
622 sys irix_unimp 0
623 sys irix_unimp 0
624 sys irix_unimp 0
625 sys irix_unimp 0
626 sys irix_unimp 0
627 sys irix_unimp 0
628 sys irix_unimp 0
629 sys irix_unimp 0
630 sys irix_unimp 0
631 sys irix_unimp 0
632 sys irix_unimp 0
633 sys irix_unimp 0
634 sys irix_unimp 0
635 sys irix_unimp 0
636 sys irix_unimp 0
637 sys irix_unimp 0
638 sys irix_unimp 0
639 sys irix_unimp 0
640 sys irix_unimp 0
641 sys irix_unimp 0
642 sys irix_unimp 0
643 sys irix_unimp 0
644 sys irix_unimp 0
645 sys irix_unimp 0
646 sys irix_unimp 0
647 sys irix_unimp 0
648 sys irix_unimp 0
649 sys irix_unimp 0
650 sys irix_unimp 0
651 sys irix_unimp 0
652 sys irix_unimp 0
653 sys irix_unimp 0
654 sys irix_unimp 0
655 sys irix_unimp 0
656 sys irix_unimp 0
657 sys irix_unimp 0
658 sys irix_unimp 0
659 sys irix_unimp 0
660 sys irix_unimp 0
661 sys irix_unimp 0
662 sys irix_unimp 0
663 sys irix_unimp 0
664 sys irix_unimp 0
665 sys irix_unimp 0
666 sys irix_unimp 0
667 sys irix_unimp 0
668 sys irix_unimp 0
669 sys irix_unimp 0
670 sys irix_unimp 0
671 sys irix_unimp 0
672 sys irix_unimp 0
673 sys irix_unimp 0
674 sys irix_unimp 0
675 sys irix_unimp 0
676 sys irix_unimp 0
677 sys irix_unimp 0
678 sys irix_unimp 0
679 sys irix_unimp 0
680 sys irix_unimp 0
681 sys irix_unimp 0
682 sys irix_unimp 0
683 sys irix_unimp 0
684 sys irix_unimp 0
685 sys irix_unimp 0
686 sys irix_unimp 0
687 sys irix_unimp 0
688 sys irix_unimp 0
689 sys irix_unimp 0
690 sys irix_unimp 0
691 sys irix_unimp 0
692 sys irix_unimp 0
693 sys irix_unimp 0
694 sys irix_unimp 0
695 sys irix_unimp 0
696 sys irix_unimp 0
697 sys irix_unimp 0
698 sys irix_unimp 0
699 sys irix_unimp 0
700 sys irix_unimp 0
701 sys irix_unimp 0
702 sys irix_unimp 0
703 sys irix_unimp 0
704 sys irix_unimp 0
705 sys irix_unimp 0
706 sys irix_unimp 0
707 sys irix_unimp 0
708 sys irix_unimp 0
709 sys irix_unimp 0
710 sys irix_unimp 0
711 sys irix_unimp 0
712 sys irix_unimp 0
713 sys irix_unimp 0
714 sys irix_unimp 0
715 sys irix_unimp 0
716 sys irix_unimp 0
717 sys irix_unimp 0
718 sys irix_unimp 0
719 sys irix_unimp 0
720 sys irix_unimp 0
721 sys irix_unimp 0
722 sys irix_unimp 0
723 sys irix_unimp 0
724 sys irix_unimp 0
725 sys irix_unimp 0
726 sys irix_unimp 0
727 sys irix_unimp 0
728 sys irix_unimp 0
729 sys irix_unimp 0
730 sys irix_unimp 0
731 sys irix_unimp 0
732 sys irix_unimp 0
733 sys irix_unimp 0
734 sys irix_unimp 0
735 sys irix_unimp 0
736 sys irix_unimp 0
737 sys irix_unimp 0
738 sys irix_unimp 0
739 sys irix_unimp 0
740 sys irix_unimp 0
741 sys irix_unimp 0
742 sys irix_unimp 0
743 sys irix_unimp 0
744 sys irix_unimp 0
745 sys irix_unimp 0
746 sys irix_unimp 0
747 sys irix_unimp 0
748 sys irix_unimp 0
749 sys irix_unimp 0
750 sys irix_unimp 0
751 sys irix_unimp 0
752 sys irix_unimp 0
753 sys irix_unimp 0
754 sys irix_unimp 0
755 sys irix_unimp 0
756 sys irix_unimp 0
757 sys irix_unimp 0
758 sys irix_unimp 0
759 sys irix_unimp 0
760 sys irix_unimp 0
761 sys irix_unimp 0
762 sys irix_unimp 0
763 sys irix_unimp 0
764 sys irix_unimp 0
765 sys irix_unimp 0
766 sys irix_unimp 0
767 sys irix_unimp 0
768 sys irix_unimp 0
769 sys irix_unimp 0
770 sys irix_unimp 0
771 sys irix_unimp 0
772 sys irix_unimp 0
773 sys irix_unimp 0
774 sys irix_unimp 0
775 sys irix_unimp 0
776 sys irix_unimp 0
777 sys irix_unimp 0
778 sys irix_unimp 0
779 sys irix_unimp 0
780 sys irix_unimp 0
781 sys irix_unimp 0
782 sys irix_unimp 0
783 sys irix_unimp 0
784 sys irix_unimp 0
785 sys irix_unimp 0
786 sys irix_unimp 0
787 sys irix_unimp 0
788 sys irix_unimp 0
789 sys irix_unimp 0
790 sys irix_unimp 0
791 sys irix_unimp 0
792 sys irix_unimp 0
793 sys irix_unimp 0
794 sys irix_unimp 0
795 sys irix_unimp 0
796 sys irix_unimp 0
797 sys irix_unimp 0
798 sys irix_unimp 0
799 sys irix_unimp 0
800 sys irix_unimp 0
801 sys irix_unimp 0
802 sys irix_unimp 0
803 sys irix_unimp 0
804 sys irix_unimp 0
805 sys irix_unimp 0
806 sys irix_unimp 0
807 sys irix_unimp 0
808 sys irix_unimp 0
809 sys irix_unimp 0
810 sys irix_unimp 0
811 sys irix_unimp 0
812 sys irix_unimp 0
813 sys irix_unimp 0
814 sys irix_unimp 0
815 sys irix_unimp 0
816 sys irix_unimp 0
817 sys irix_unimp 0
818 sys irix_unimp 0
819 sys irix_unimp 0
820 sys irix_unimp 0
821 sys irix_unimp 0
822 sys irix_unimp 0
823 sys irix_unimp 0
824 sys irix_unimp 0
825 sys irix_unimp 0
826 sys irix_unimp 0
827 sys irix_unimp 0
828 sys irix_unimp 0
829 sys irix_unimp 0
830 sys irix_unimp 0
831 sys irix_unimp 0
832 sys irix_unimp 0
833 sys irix_unimp 0
834 sys irix_unimp 0
835 sys irix_unimp 0
836 sys irix_unimp 0
837 sys irix_unimp 0
838 sys irix_unimp 0
839 sys irix_unimp 0
840 sys irix_unimp 0
841 sys irix_unimp 0
842 sys irix_unimp 0
843 sys irix_unimp 0
844 sys irix_unimp 0
845 sys irix_unimp 0
846 sys irix_unimp 0
847 sys irix_unimp 0
848 sys irix_unimp 0
849 sys irix_unimp 0
850 sys irix_unimp 0
851 sys irix_unimp 0
852 sys irix_unimp 0
853 sys irix_unimp 0
854 sys irix_unimp 0
855 sys irix_unimp 0
856 sys irix_unimp 0
857 sys irix_unimp 0
858 sys irix_unimp 0
859 sys irix_unimp 0
860 sys irix_unimp 0
861 sys irix_unimp 0
862 sys irix_unimp 0
863 sys irix_unimp 0
864 sys irix_unimp 0
865 sys irix_unimp 0
866 sys irix_unimp 0
867 sys irix_unimp 0
868 sys irix_unimp 0
869 sys irix_unimp 0
870 sys irix_unimp 0
871 sys irix_unimp 0
872 sys irix_unimp 0
873 sys irix_unimp 0
874 sys irix_unimp 0
875 sys irix_unimp 0
876 sys irix_unimp 0
877 sys irix_unimp 0
878 sys irix_unimp 0
879 sys irix_unimp 0
880 sys irix_unimp 0
881 sys irix_unimp 0
882 sys irix_unimp 0
883 sys irix_unimp 0
884 sys irix_unimp 0
885 sys irix_unimp 0
886 sys irix_unimp 0
887 sys irix_unimp 0
888 sys irix_unimp 0
889 sys irix_unimp 0
890 sys irix_unimp 0
891 sys irix_unimp 0
892 sys irix_unimp 0
893 sys irix_unimp 0
894 sys irix_unimp 0
895 sys irix_unimp 0
896 sys irix_unimp 0
897 sys irix_unimp 0
898 sys irix_unimp 0
899 sys irix_unimp 0
900 sys irix_unimp 0
901 sys irix_unimp 0
902 sys irix_unimp 0
903 sys irix_unimp 0
904 sys irix_unimp 0
905 sys irix_unimp 0
906 sys irix_unimp 0
907 sys irix_unimp 0
908 sys irix_unimp 0
909 sys irix_unimp 0
910 sys irix_unimp 0
911 sys irix_unimp 0
912 sys irix_unimp 0
913 sys irix_unimp 0
914 sys irix_unimp 0
915 sys irix_unimp 0
916 sys irix_unimp 0
917 sys irix_unimp 0
918 sys irix_unimp 0
919 sys irix_unimp 0
920 sys irix_unimp 0
921 sys irix_unimp 0
922 sys irix_unimp 0
923 sys irix_unimp 0
924 sys irix_unimp 0
925 sys irix_unimp 0
926 sys irix_unimp 0
927 sys irix_unimp 0
928 sys irix_unimp 0
929 sys irix_unimp 0
930 sys irix_unimp 0
931 sys irix_unimp 0
932 sys irix_unimp 0
933 sys irix_unimp 0
934 sys irix_unimp 0
935 sys irix_unimp 0
936 sys irix_unimp 0
937 sys irix_unimp 0
938 sys irix_unimp 0
939 sys irix_unimp 0
940 sys irix_unimp 0
941 sys irix_unimp 0
942 sys irix_unimp 0
943 sys irix_unimp 0
944 sys irix_unimp 0
945 sys irix_unimp 0
946 sys irix_unimp 0
947 sys irix_unimp 0
948 sys irix_unimp 0
949 sys irix_unimp 0
950 sys irix_unimp 0
951 sys irix_unimp 0
952 sys irix_unimp 0
953 sys irix_unimp 0
954 sys irix_unimp 0
955 sys irix_unimp 0
956 sys irix_unimp 0
957 sys irix_unimp 0
958 sys irix_unimp 0
959 sys irix_unimp 0
960 sys irix_unimp 0
961 sys irix_unimp 0
962 sys irix_unimp 0
963 sys irix_unimp 0
964 sys irix_unimp 0
965 sys irix_unimp 0
966 sys irix_unimp 0
967 sys irix_unimp 0
968 sys irix_unimp 0
969 sys irix_unimp 0
970 sys irix_unimp 0
971 sys irix_unimp 0
972 sys irix_unimp 0
973 sys irix_unimp 0
974 sys irix_unimp 0
975 sys irix_unimp 0
976 sys irix_unimp 0
977 sys irix_unimp 0
978 sys irix_unimp 0
979 sys irix_unimp 0
980 sys irix_unimp 0
981 sys irix_unimp 0
982 sys irix_unimp 0
983 sys irix_unimp 0
984 sys irix_unimp 0
985 sys irix_unimp 0
986 sys irix_unimp 0
987 sys irix_unimp 0
988 sys irix_unimp 0
989 sys irix_unimp 0
990 sys irix_unimp 0
991 sys irix_unimp 0
992 sys irix_unimp 0
993 sys irix_unimp 0
994 sys irix_unimp 0
995 sys irix_unimp 0
996 sys irix_unimp 0
997 sys irix_unimp 0
998 sys irix_unimp 0
999 sys irix_unimp 0
1000 sys irix_unimp 0
1001 sys irix_unimp 0
1002 sys irix_unimp 0
1003 sys irix_unimp 0
1004 sys irix_unimp 0
1005 sys irix_unimp 0
1006 sys irix_unimp 0
1007 sys irix_unimp 0
1008 sys irix_unimp 0
1009 sys irix_unimp 0
1010 sys irix_unimp 0
1011 sys irix_unimp 0
1012 sys irix_unimp 0
1013 sys irix_unimp 0
1014 sys irix_unimp 0
1015 sys irix_unimp 0
1016 sys irix_unimp 0
1017 sys irix_unimp 0
1018 sys irix_unimp 0
1019 sys irix_unimp 0
1020 sys irix_unimp 0
1021 sys irix_unimp 0
1022 sys irix_unimp 0
1023 sys irix_unimp 0
1024 sys irix_unimp 0
1025 sys irix_unimp 0
1026 sys irix_unimp 0
1027
1028 .endm
1029
1030 /*
1031 * Pre-compute the number of _instruction_ bytes needed to load
1032 * or store the arguments 6-8. Negative values are ignored.
1033 */
1034 .macro sys function, nargs
1035 PTR \function
1036 LONG (\nargs << 2) - (5 << 2)
1037 .endm
1038
1039 .align 4
1040EXPORT(sys_call_table_irix5)
1041 irix5syscalltable
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
new file mode 100644
index 000000000000..4af20cd91f9f
--- /dev/null
+++ b/arch/mips/kernel/irixelf.c
@@ -0,0 +1,1326 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * irixelf.c: Code to load IRIX ELF executables conforming to the MIPS ABI.
7 * Based off of work by Eric Youngdale.
8 *
9 * Copyright (C) 1993 - 1994 Eric Youngdale <ericy@cais.com>
10 * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
11 * Copyright (C) 2004 Steven J. Hill <sjhill@realitydiluted.com>
12 */
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/stat.h>
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/a.out.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/signal.h>
23#include <linux/binfmts.h>
24#include <linux/string.h>
25#include <linux/file.h>
26#include <linux/fcntl.h>
27#include <linux/ptrace.h>
28#include <linux/slab.h>
29#include <linux/shm.h>
30#include <linux/personality.h>
31#include <linux/elfcore.h>
32#include <linux/smp_lock.h>
33
34#include <asm/uaccess.h>
35#include <asm/mipsregs.h>
36#include <asm/prctl.h>
37
38#define DLINFO_ITEMS 12
39
40#include <linux/elf.h>
41
42#undef DEBUG_ELF
43
44static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
45static int load_irix_library(struct file *);
46static int irix_core_dump(long signr, struct pt_regs * regs,
47 struct file *file);
48
49static struct linux_binfmt irix_format = {
50 NULL, THIS_MODULE, load_irix_binary, load_irix_library,
51 irix_core_dump, PAGE_SIZE
52};
53
54#ifndef elf_addr_t
55#define elf_addr_t unsigned long
56#endif
57
58#ifdef DEBUG_ELF
59/* Debugging routines. */
60static char *get_elf_p_type(Elf32_Word p_type)
61{
62 int i = (int) p_type;
63
64 switch(i) {
65 case PT_NULL: return("PT_NULL"); break;
66 case PT_LOAD: return("PT_LOAD"); break;
67 case PT_DYNAMIC: return("PT_DYNAMIC"); break;
68 case PT_INTERP: return("PT_INTERP"); break;
69 case PT_NOTE: return("PT_NOTE"); break;
70 case PT_SHLIB: return("PT_SHLIB"); break;
71 case PT_PHDR: return("PT_PHDR"); break;
72 case PT_LOPROC: return("PT_LOPROC/REGINFO"); break;
73 case PT_HIPROC: return("PT_HIPROC"); break;
74 default: return("PT_BOGUS"); break;
75 }
76}
77
78static void print_elfhdr(struct elfhdr *ehp)
79{
80 int i;
81
82 printk("ELFHDR: e_ident<");
83 for(i = 0; i < (EI_NIDENT - 1); i++) printk("%x ", ehp->e_ident[i]);
84 printk("%x>\n", ehp->e_ident[i]);
85 printk(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
86 (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
87 (unsigned long) ehp->e_version);
88 printk(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
89 "e_flags[%08lx]\n",
90 (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
91 (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
92 printk(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
93 (unsigned short) ehp->e_ehsize, (unsigned short) ehp->e_phentsize,
94 (unsigned short) ehp->e_phnum);
95 printk(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
96 (unsigned short) ehp->e_shentsize, (unsigned short) ehp->e_shnum,
97 (unsigned short) ehp->e_shstrndx);
98}
99
100static void print_phdr(int i, struct elf_phdr *ep)
101{
102 printk("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
103 "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
104 (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
105 (unsigned long) ep->p_paddr);
106 printk(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
107 "p_align[%08lx]\n", (unsigned long) ep->p_filesz,
108 (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
109 (unsigned long) ep->p_align);
110}
111
112static void dump_phdrs(struct elf_phdr *ep, int pnum)
113{
114 int i;
115
116 for(i = 0; i < pnum; i++, ep++) {
117 if((ep->p_type == PT_LOAD) ||
118 (ep->p_type == PT_INTERP) ||
119 (ep->p_type == PT_PHDR))
120 print_phdr(i, ep);
121 }
122}
123#endif /* (DEBUG_ELF) */
124
125static void set_brk(unsigned long start, unsigned long end)
126{
127 start = PAGE_ALIGN(start);
128 end = PAGE_ALIGN(end);
129 if (end <= start)
130 return;
131 down_write(&current->mm->mmap_sem);
132 do_brk(start, end - start);
133 up_write(&current->mm->mmap_sem);
134}
135
136
137/* We need to explicitly zero any fractional pages
138 * after the data section (i.e. bss). This would
139 * contain the junk from the file that should not
140 * be in memory.
141 */
142static void padzero(unsigned long elf_bss)
143{
144 unsigned long nbyte;
145
146 nbyte = elf_bss & (PAGE_SIZE-1);
147 if (nbyte) {
148 nbyte = PAGE_SIZE - nbyte;
149 clear_user((void *) elf_bss, nbyte);
150 }
151}
152
153unsigned long * create_irix_tables(char * p, int argc, int envc,
154 struct elfhdr * exec, unsigned int load_addr,
155 unsigned int interp_load_addr,
156 struct pt_regs *regs, struct elf_phdr *ephdr)
157{
158 elf_addr_t *argv;
159 elf_addr_t *envp;
160 elf_addr_t *sp, *csp;
161
162#ifdef DEBUG_ELF
163 printk("create_irix_tables: p[%p] argc[%d] envc[%d] "
164 "load_addr[%08x] interp_load_addr[%08x]\n",
165 p, argc, envc, load_addr, interp_load_addr);
166#endif
167 sp = (elf_addr_t *) (~15UL & (unsigned long) p);
168 csp = sp;
169 csp -= exec ? DLINFO_ITEMS*2 : 2;
170 csp -= envc+1;
171 csp -= argc+1;
172 csp -= 1; /* argc itself */
173 if ((unsigned long)csp & 15UL) {
174 sp -= (16UL - ((unsigned long)csp & 15UL)) / sizeof(*sp);
175 }
176
177 /*
178 * Put the ELF interpreter info on the stack
179 */
180#define NEW_AUX_ENT(nr, id, val) \
181 __put_user ((id), sp+(nr*2)); \
182 __put_user ((val), sp+(nr*2+1)); \
183
184 sp -= 2;
185 NEW_AUX_ENT(0, AT_NULL, 0);
186
187 if(exec) {
188 sp -= 11*2;
189
190 NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
191 NEW_AUX_ENT (1, AT_PHENT, sizeof (struct elf_phdr));
192 NEW_AUX_ENT (2, AT_PHNUM, exec->e_phnum);
193 NEW_AUX_ENT (3, AT_PAGESZ, ELF_EXEC_PAGESIZE);
194 NEW_AUX_ENT (4, AT_BASE, interp_load_addr);
195 NEW_AUX_ENT (5, AT_FLAGS, 0);
196 NEW_AUX_ENT (6, AT_ENTRY, (elf_addr_t) exec->e_entry);
197 NEW_AUX_ENT (7, AT_UID, (elf_addr_t) current->uid);
198 NEW_AUX_ENT (8, AT_EUID, (elf_addr_t) current->euid);
199 NEW_AUX_ENT (9, AT_GID, (elf_addr_t) current->gid);
200 NEW_AUX_ENT (10, AT_EGID, (elf_addr_t) current->egid);
201 }
202#undef NEW_AUX_ENT
203
204 sp -= envc+1;
205 envp = sp;
206 sp -= argc+1;
207 argv = sp;
208
209 __put_user((elf_addr_t)argc,--sp);
210 current->mm->arg_start = (unsigned long) p;
211 while (argc-->0) {
212 __put_user((unsigned long)p,argv++);
213 p += strlen_user(p);
214 }
215 __put_user((unsigned long) NULL, argv);
216 current->mm->arg_end = current->mm->env_start = (unsigned long) p;
217 while (envc-->0) {
218 __put_user((unsigned long)p,envp++);
219 p += strlen_user(p);
220 }
221 __put_user((unsigned long) NULL, envp);
222 current->mm->env_end = (unsigned long) p;
223 return sp;
224}
225
226
227/* This is much more generalized than the library routine read function,
228 * so we keep this separate. Technically the library read function
229 * is only provided so that we can read a.out libraries that have
230 * an ELF header.
231 */
232static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
233 struct file * interpreter,
234 unsigned int *interp_load_addr)
235{
236 struct elf_phdr *elf_phdata = NULL;
237 struct elf_phdr *eppnt;
238 unsigned int len;
239 unsigned int load_addr;
240 int elf_bss;
241 int retval;
242 unsigned int last_bss;
243 int error;
244 int i;
245 unsigned int k;
246
247 elf_bss = 0;
248 last_bss = 0;
249 error = load_addr = 0;
250
251#ifdef DEBUG_ELF
252 print_elfhdr(interp_elf_ex);
253#endif
254
255 /* First of all, some simple consistency checks */
256 if ((interp_elf_ex->e_type != ET_EXEC &&
257 interp_elf_ex->e_type != ET_DYN) ||
258 !irix_elf_check_arch(interp_elf_ex) ||
259 !interpreter->f_op->mmap) {
260 printk("IRIX interp has bad e_type %d\n", interp_elf_ex->e_type);
261 return 0xffffffff;
262 }
263
264 /* Now read in all of the header information */
265 if(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
266 printk("IRIX interp header bigger than a page (%d)\n",
267 (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
268 return 0xffffffff;
269 }
270
271 elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
272 GFP_KERNEL);
273
274 if(!elf_phdata) {
275 printk("Cannot kmalloc phdata for IRIX interp.\n");
276 return 0xffffffff;
277 }
278
279 /* If the size of this structure has changed, then punt, since
280 * we will be doing the wrong thing.
281 */
282 if(interp_elf_ex->e_phentsize != 32) {
283 printk("IRIX interp e_phentsize == %d != 32 ",
284 interp_elf_ex->e_phentsize);
285 kfree(elf_phdata);
286 return 0xffffffff;
287 }
288
289 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
290 (char *) elf_phdata,
291 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
292
293#ifdef DEBUG_ELF
294 dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
295#endif
296
297 eppnt = elf_phdata;
298 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
299 if(eppnt->p_type == PT_LOAD) {
300 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
301 int elf_prot = 0;
302 unsigned long vaddr = 0;
303 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
304 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
305 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
306 elf_type |= MAP_FIXED;
307 vaddr = eppnt->p_vaddr;
308
309#ifdef DEBUG_ELF
310 printk("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
311 interpreter, vaddr,
312 (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
313 (unsigned long) elf_prot, (unsigned long) elf_type,
314 (unsigned long) (eppnt->p_offset & 0xfffff000));
315#endif
316 down_write(&current->mm->mmap_sem);
317 error = do_mmap(interpreter, vaddr,
318 eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
319 elf_prot, elf_type,
320 eppnt->p_offset & 0xfffff000);
321 up_write(&current->mm->mmap_sem);
322
323 if(error < 0 && error > -1024) {
324 printk("Aieee IRIX interp mmap error=%d\n", error);
325 break; /* Real error */
326 }
327#ifdef DEBUG_ELF
328 printk("error=%08lx ", (unsigned long) error);
329#endif
330 if(!load_addr && interp_elf_ex->e_type == ET_DYN) {
331 load_addr = error;
332#ifdef DEBUG_ELF
333 printk("load_addr = error ");
334#endif
335 }
336
337 /* Find the end of the file mapping for this phdr, and keep
338 * track of the largest address we see for this.
339 */
340 k = eppnt->p_vaddr + eppnt->p_filesz;
341 if(k > elf_bss) elf_bss = k;
342
343 /* Do the same thing for the memory mapping - between
344 * elf_bss and last_bss is the bss section.
345 */
346 k = eppnt->p_memsz + eppnt->p_vaddr;
347 if(k > last_bss) last_bss = k;
348#ifdef DEBUG_ELF
349 printk("\n");
350#endif
351 }
352 }
353
354 /* Now use mmap to map the library into memory. */
355 if(error < 0 && error > -1024) {
356#ifdef DEBUG_ELF
357 printk("got error %d\n", error);
358#endif
359 kfree(elf_phdata);
360 return 0xffffffff;
361 }
362
363 /* Now fill out the bss section. First pad the last page up
364 * to the page boundary, and then perform a mmap to make sure
365 * that there are zero-mapped pages up to and including the
366 * last bss page.
367 */
368#ifdef DEBUG_ELF
369 printk("padzero(%08lx) ", (unsigned long) (elf_bss));
370#endif
371 padzero(elf_bss);
372 len = (elf_bss + 0xfff) & 0xfffff000; /* What we have mapped so far */
373
374#ifdef DEBUG_ELF
375 printk("last_bss[%08lx] len[%08lx]\n", (unsigned long) last_bss,
376 (unsigned long) len);
377#endif
378
379 /* Map the last of the bss segment */
380 if (last_bss > len) {
381 down_write(&current->mm->mmap_sem);
382 do_brk(len, (last_bss - len));
383 up_write(&current->mm->mmap_sem);
384 }
385 kfree(elf_phdata);
386
387 *interp_load_addr = load_addr;
388 return ((unsigned int) interp_elf_ex->e_entry);
389}
390
391/* Check sanity of IRIX elf executable header. */
392static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
393{
394 if (memcmp(ehp->e_ident, ELFMAG, SELFMAG) != 0)
395 return -ENOEXEC;
396
397 /* First of all, some simple consistency checks */
398 if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
399 !irix_elf_check_arch(ehp) || !bprm->file->f_op->mmap) {
400 return -ENOEXEC;
401 }
402
403 /* Only support MIPS ARCH2 or greater IRIX binaries for now. */
404 if(!(ehp->e_flags & EF_MIPS_ARCH) && !(ehp->e_flags & 0x04)) {
405 return -ENOEXEC;
406 }
407
408 /* XXX Don't support N32 or 64bit binaries yet because they can
409 * XXX and do execute 64 bit instructions and expect all registers
410 * XXX to be 64 bit as well. We need to make the kernel save
411 * XXX all registers as 64bits on cpu's capable of this at
412 * XXX exception time plus frob the XTLB exception vector.
413 */
414 if((ehp->e_flags & 0x20)) {
415 return -ENOEXEC;
416 }
417
418 return 0; /* It's ok. */
419}
420
421#define IRIX_INTERP_PREFIX "/usr/gnemul/irix"
422
423/* Look for an IRIX ELF interpreter. */
424static inline int look_for_irix_interpreter(char **name,
425 struct file **interpreter,
426 struct elfhdr *interp_elf_ex,
427 struct elf_phdr *epp,
428 struct linux_binprm *bprm, int pnum)
429{
430 int i;
431 int retval = -EINVAL;
432 struct file *file = NULL;
433
434 *name = NULL;
435 for(i = 0; i < pnum; i++, epp++) {
436 if (epp->p_type != PT_INTERP)
437 continue;
438
439 /* It is illegal to have two interpreters for one executable. */
440 if (*name != NULL)
441 goto out;
442
443 *name = kmalloc((epp->p_filesz + strlen(IRIX_INTERP_PREFIX)),
444 GFP_KERNEL);
445 if (!*name)
446 return -ENOMEM;
447
448 strcpy(*name, IRIX_INTERP_PREFIX);
449 retval = kernel_read(bprm->file, epp->p_offset, (*name + 16),
450 epp->p_filesz);
451 if (retval < 0)
452 goto out;
453
454 file = open_exec(*name);
455 if (IS_ERR(file)) {
456 retval = PTR_ERR(file);
457 goto out;
458 }
459 retval = kernel_read(file, 0, bprm->buf, 128);
460 if (retval < 0)
461 goto dput_and_out;
462
463 *interp_elf_ex = *(struct elfhdr *) bprm->buf;
464 }
465 *interpreter = file;
466 return 0;
467
468dput_and_out:
469 fput(file);
470out:
471 kfree(*name);
472 return retval;
473}
474
475static inline int verify_irix_interpreter(struct elfhdr *ihp)
476{
477 if (memcmp(ihp->e_ident, ELFMAG, SELFMAG) != 0)
478 return -ELIBBAD;
479 return 0;
480}
481
482#define EXEC_MAP_FLAGS (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE)
483
484static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnum,
485 unsigned int *estack, unsigned int *laddr,
486 unsigned int *scode, unsigned int *ebss,
487 unsigned int *ecode, unsigned int *edata,
488 unsigned int *ebrk)
489{
490 unsigned int tmp;
491 int i, prot;
492
493 for(i = 0; i < pnum; i++, epp++) {
494 if(epp->p_type != PT_LOAD)
495 continue;
496
497 /* Map it. */
498 prot = (epp->p_flags & PF_R) ? PROT_READ : 0;
499 prot |= (epp->p_flags & PF_W) ? PROT_WRITE : 0;
500 prot |= (epp->p_flags & PF_X) ? PROT_EXEC : 0;
501 down_write(&current->mm->mmap_sem);
502 (void) do_mmap(fp, (epp->p_vaddr & 0xfffff000),
503 (epp->p_filesz + (epp->p_vaddr & 0xfff)),
504 prot, EXEC_MAP_FLAGS,
505 (epp->p_offset & 0xfffff000));
506 up_write(&current->mm->mmap_sem);
507
508 /* Fixup location tracking vars. */
509 if((epp->p_vaddr & 0xfffff000) < *estack)
510 *estack = (epp->p_vaddr & 0xfffff000);
511 if(!*laddr)
512 *laddr = epp->p_vaddr - epp->p_offset;
513 if(epp->p_vaddr < *scode)
514 *scode = epp->p_vaddr;
515
516 tmp = epp->p_vaddr + epp->p_filesz;
517 if(tmp > *ebss)
518 *ebss = tmp;
519 if((epp->p_flags & PF_X) && *ecode < tmp)
520 *ecode = tmp;
521 if(*edata < tmp)
522 *edata = tmp;
523
524 tmp = epp->p_vaddr + epp->p_memsz;
525 if(tmp > *ebrk)
526 *ebrk = tmp;
527 }
528
529}
530
531static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
532 struct file *interp, unsigned int *iladdr,
533 int pnum, mm_segment_t old_fs,
534 unsigned int *eentry)
535{
536 int i;
537
538 *eentry = 0xffffffff;
539 for(i = 0; i < pnum; i++, epp++) {
540 if(epp->p_type != PT_INTERP)
541 continue;
542
543 /* We should have fielded this error elsewhere... */
544 if(*eentry != 0xffffffff)
545 return -1;
546
547 set_fs(old_fs);
548 *eentry = load_irix_interp(ihp, interp, iladdr);
549 old_fs = get_fs();
550 set_fs(get_ds());
551
552 fput(interp);
553
554 if (*eentry == 0xffffffff)
555 return -1;
556 }
557 return 0;
558}
559
560/*
561 * IRIX maps a page at 0x200000 that holds information about the
562 * process and the system, here we map the page and fill the
563 * structure
564 */
565void irix_map_prda_page (void)
566{
567 unsigned long v;
568 struct prda *pp;
569
570 down_write(&current->mm->mmap_sem);
571 v = do_brk (PRDA_ADDRESS, PAGE_SIZE);
572 up_write(&current->mm->mmap_sem);
573
574 if (v < 0)
575 return;
576
577 pp = (struct prda *) v;
578 pp->prda_sys.t_pid = current->pid;
579 pp->prda_sys.t_prid = read_c0_prid();
580 pp->prda_sys.t_rpid = current->pid;
581
582 /* We leave the rest set to zero */
583}
584
585
586
587/* These are the functions used to load ELF style executables and shared
588 * libraries. There is no binary dependent code anywhere else.
589 */
590static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
591{
592 struct elfhdr elf_ex, interp_elf_ex;
593 struct file *interpreter;
594 struct elf_phdr *elf_phdata, *elf_ihdr, *elf_ephdr;
595 unsigned int load_addr, elf_bss, elf_brk;
596 unsigned int elf_entry, interp_load_addr = 0;
597 unsigned int start_code, end_code, end_data, elf_stack;
598 int retval, has_interp, has_ephdr, size, i;
599 char *elf_interpreter;
600 mm_segment_t old_fs;
601
602 load_addr = 0;
603 has_interp = has_ephdr = 0;
604 elf_ihdr = elf_ephdr = 0;
605 elf_ex = *((struct elfhdr *) bprm->buf);
606 retval = -ENOEXEC;
607
608 if (verify_binary(&elf_ex, bprm))
609 goto out;
610
611#ifdef DEBUG_ELF
612 print_elfhdr(&elf_ex);
613#endif
614
615 /* Now read in all of the header information */
616 size = elf_ex.e_phentsize * elf_ex.e_phnum;
617 if (size > 65536)
618 goto out;
619 elf_phdata = kmalloc(size, GFP_KERNEL);
620 if (elf_phdata == NULL) {
621 retval = -ENOMEM;
622 goto out;
623 }
624
625 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *)elf_phdata, size);
626
627 if (retval < 0)
628 goto out_free_ph;
629
630#ifdef DEBUG_ELF
631 dump_phdrs(elf_phdata, elf_ex.e_phnum);
632#endif
633
634 /* Set some things for later. */
635 for(i = 0; i < elf_ex.e_phnum; i++) {
636 switch(elf_phdata[i].p_type) {
637 case PT_INTERP:
638 has_interp = 1;
639 elf_ihdr = &elf_phdata[i];
640 break;
641 case PT_PHDR:
642 has_ephdr = 1;
643 elf_ephdr = &elf_phdata[i];
644 break;
645 };
646 }
647#ifdef DEBUG_ELF
648 printk("\n");
649#endif
650
651 elf_bss = 0;
652 elf_brk = 0;
653
654 elf_stack = 0xffffffff;
655 elf_interpreter = NULL;
656 start_code = 0xffffffff;
657 end_code = 0;
658 end_data = 0;
659
660 retval = look_for_irix_interpreter(&elf_interpreter,
661 &interpreter,
662 &interp_elf_ex, elf_phdata, bprm,
663 elf_ex.e_phnum);
664 if (retval)
665 goto out_free_file;
666
667 if (elf_interpreter) {
668 retval = verify_irix_interpreter(&interp_elf_ex);
669 if(retval)
670 goto out_free_interp;
671 }
672
673 /* OK, we are done with that, now set up the arg stuff,
674 * and then start this sucker up.
675 */
676 retval = -E2BIG;
677 if (!bprm->sh_bang && !bprm->p)
678 goto out_free_interp;
679
680 /* Flush all traces of the currently running executable */
681 retval = flush_old_exec(bprm);
682 if (retval)
683 goto out_free_dentry;
684
685 /* OK, This is the point of no return */
686 current->mm->end_data = 0;
687 current->mm->end_code = 0;
688 current->mm->mmap = NULL;
689 current->flags &= ~PF_FORKNOEXEC;
690 elf_entry = (unsigned int) elf_ex.e_entry;
691
692 /* Do this so that we can load the interpreter, if need be. We will
693 * change some of these later.
694 */
695 set_mm_counter(current->mm, rss, 0);
696 setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
697 current->mm->start_stack = bprm->p;
698
699 /* At this point, we assume that the image should be loaded at
700 * fixed address, not at a variable address.
701 */
702 old_fs = get_fs();
703 set_fs(get_ds());
704
705 map_executable(bprm->file, elf_phdata, elf_ex.e_phnum, &elf_stack,
706 &load_addr, &start_code, &elf_bss, &end_code,
707 &end_data, &elf_brk);
708
709 if(elf_interpreter) {
710 retval = map_interpreter(elf_phdata, &interp_elf_ex,
711 interpreter, &interp_load_addr,
712 elf_ex.e_phnum, old_fs, &elf_entry);
713 kfree(elf_interpreter);
714 if(retval) {
715 set_fs(old_fs);
716 printk("Unable to load IRIX ELF interpreter\n");
717 send_sig(SIGSEGV, current, 0);
718 retval = 0;
719 goto out_free_file;
720 }
721 }
722
723 set_fs(old_fs);
724
725 kfree(elf_phdata);
726 set_personality(PER_IRIX32);
727 set_binfmt(&irix_format);
728 compute_creds(bprm);
729 current->flags &= ~PF_FORKNOEXEC;
730 bprm->p = (unsigned long)
731 create_irix_tables((char *)bprm->p, bprm->argc, bprm->envc,
732 (elf_interpreter ? &elf_ex : NULL),
733 load_addr, interp_load_addr, regs, elf_ephdr);
734 current->mm->start_brk = current->mm->brk = elf_brk;
735 current->mm->end_code = end_code;
736 current->mm->start_code = start_code;
737 current->mm->end_data = end_data;
738 current->mm->start_stack = bprm->p;
739
740 /* Calling set_brk effectively mmaps the pages that we need for the
741 * bss and break sections.
742 */
743 set_brk(elf_bss, elf_brk);
744
745 /*
746 * IRIX maps a page at 0x200000 which holds some system
747 * information. Programs depend on this.
748 */
749 irix_map_prda_page ();
750
751 padzero(elf_bss);
752
753#ifdef DEBUG_ELF
754 printk("(start_brk) %lx\n" , (long) current->mm->start_brk);
755 printk("(end_code) %lx\n" , (long) current->mm->end_code);
756 printk("(start_code) %lx\n" , (long) current->mm->start_code);
757 printk("(end_data) %lx\n" , (long) current->mm->end_data);
758 printk("(start_stack) %lx\n" , (long) current->mm->start_stack);
759 printk("(brk) %lx\n" , (long) current->mm->brk);
760#endif
761
762#if 0 /* XXX No fucking way dude... */
763 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
764 * and some applications "depend" upon this behavior.
765 * Since we do not have the power to recompile these, we
766 * emulate the SVr4 behavior. Sigh.
767 */
768 down_write(&current->mm->mmap_sem);
769 (void) do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
770 MAP_FIXED | MAP_PRIVATE, 0);
771 up_write(&current->mm->mmap_sem);
772#endif
773
774 start_thread(regs, elf_entry, bprm->p);
775 if (current->ptrace & PT_PTRACED)
776 send_sig(SIGTRAP, current, 0);
777 return 0;
778out:
779 return retval;
780
781out_free_dentry:
782 allow_write_access(interpreter);
783 fput(interpreter);
784out_free_interp:
785 if (elf_interpreter)
786 kfree(elf_interpreter);
787out_free_file:
788out_free_ph:
789 kfree (elf_phdata);
790 goto out;
791}
792
793/* This is really simpleminded and specialized - we are loading an
794 * a.out library that is given an ELF header.
795 */
796static int load_irix_library(struct file *file)
797{
798 struct elfhdr elf_ex;
799 struct elf_phdr *elf_phdata = NULL;
800 unsigned int len = 0;
801 int elf_bss = 0;
802 int retval;
803 unsigned int bss;
804 int error;
805 int i,j, k;
806
807 error = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
808 if (error != sizeof(elf_ex))
809 return -ENOEXEC;
810
811 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
812 return -ENOEXEC;
813
814 /* First of all, some simple consistency checks. */
815 if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
816 !irix_elf_check_arch(&elf_ex) || !file->f_op->mmap)
817 return -ENOEXEC;
818
819 /* Now read in all of the header information. */
820 if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
821 return -ENOEXEC;
822
823 elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
824 if (elf_phdata == NULL)
825 return -ENOMEM;
826
827 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata,
828 sizeof(struct elf_phdr) * elf_ex.e_phnum);
829
830 j = 0;
831 for(i=0; i<elf_ex.e_phnum; i++)
832 if((elf_phdata + i)->p_type == PT_LOAD) j++;
833
834 if(j != 1) {
835 kfree(elf_phdata);
836 return -ENOEXEC;
837 }
838
839 while(elf_phdata->p_type != PT_LOAD) elf_phdata++;
840
841 /* Now use mmap to map the library into memory. */
842 down_write(&current->mm->mmap_sem);
843 error = do_mmap(file,
844 elf_phdata->p_vaddr & 0xfffff000,
845 elf_phdata->p_filesz + (elf_phdata->p_vaddr & 0xfff),
846 PROT_READ | PROT_WRITE | PROT_EXEC,
847 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
848 elf_phdata->p_offset & 0xfffff000);
849 up_write(&current->mm->mmap_sem);
850
851 k = elf_phdata->p_vaddr + elf_phdata->p_filesz;
852 if (k > elf_bss) elf_bss = k;
853
854 if (error != (elf_phdata->p_vaddr & 0xfffff000)) {
855 kfree(elf_phdata);
856 return error;
857 }
858
859 padzero(elf_bss);
860
861 len = (elf_phdata->p_filesz + elf_phdata->p_vaddr+ 0xfff) & 0xfffff000;
862 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
863 if (bss > len) {
864 down_write(&current->mm->mmap_sem);
865 do_brk(len, bss-len);
866 up_write(&current->mm->mmap_sem);
867 }
868 kfree(elf_phdata);
869 return 0;
870}
871
872/* Called through irix_syssgi() to map an elf image given an FD,
873 * a phdr ptr USER_PHDRP in userspace, and a count CNT telling how many
874 * phdrs there are in the USER_PHDRP array. We return the vaddr the
875 * first phdr was successfully mapped to.
876 */
877unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt)
878{
879 struct elf_phdr *hp;
880 struct file *filp;
881 int i, retval;
882
883#ifdef DEBUG_ELF
884 printk("irix_mapelf: fd[%d] user_phdrp[%p] cnt[%d]\n",
885 fd, user_phdrp, cnt);
886#endif
887
888 /* First get the verification out of the way. */
889 hp = user_phdrp;
890 if (!access_ok(VERIFY_READ, hp, (sizeof(struct elf_phdr) * cnt))) {
891#ifdef DEBUG_ELF
892 printk("irix_mapelf: access_ok fails!\n");
893#endif
894 return -EFAULT;
895 }
896
897#ifdef DEBUG_ELF
898 dump_phdrs(user_phdrp, cnt);
899#endif
900
901 for(i = 0; i < cnt; i++, hp++)
902 if(hp->p_type != PT_LOAD) {
903 printk("irix_mapelf: One section is not PT_LOAD!\n");
904 return -ENOEXEC;
905 }
906
907 filp = fget(fd);
908 if (!filp)
909 return -EACCES;
910 if(!filp->f_op) {
911 printk("irix_mapelf: Bogon filp!\n");
912 fput(filp);
913 return -EACCES;
914 }
915
916 hp = user_phdrp;
917 for(i = 0; i < cnt; i++, hp++) {
918 int prot;
919
920 prot = (hp->p_flags & PF_R) ? PROT_READ : 0;
921 prot |= (hp->p_flags & PF_W) ? PROT_WRITE : 0;
922 prot |= (hp->p_flags & PF_X) ? PROT_EXEC : 0;
923 down_write(&current->mm->mmap_sem);
924 retval = do_mmap(filp, (hp->p_vaddr & 0xfffff000),
925 (hp->p_filesz + (hp->p_vaddr & 0xfff)),
926 prot, (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
927 (hp->p_offset & 0xfffff000));
928 up_write(&current->mm->mmap_sem);
929
930 if(retval != (hp->p_vaddr & 0xfffff000)) {
931 printk("irix_mapelf: do_mmap fails with %d!\n", retval);
932 fput(filp);
933 return retval;
934 }
935 }
936
937#ifdef DEBUG_ELF
938 printk("irix_mapelf: Success, returning %08lx\n",
939 (unsigned long) user_phdrp->p_vaddr);
940#endif
941 fput(filp);
942 return user_phdrp->p_vaddr;
943}
944
945/*
946 * ELF core dumper
947 *
948 * Modelled on fs/exec.c:aout_core_dump()
949 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
950 */
951
952/* These are the only things you should do on a core-file: use only these
953 * functions to write out all the necessary info.
954 */
955static int dump_write(struct file *file, const void *addr, int nr)
956{
957 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
958}
959
960static int dump_seek(struct file *file, off_t off)
961{
962 if (file->f_op->llseek) {
963 if (file->f_op->llseek(file, off, 0) != off)
964 return 0;
965 } else
966 file->f_pos = off;
967 return 1;
968}
969
970/* Decide whether a segment is worth dumping; default is yes to be
971 * sure (missing info is worse than too much; etc).
972 * Personally I'd include everything, and use the coredump limit...
973 *
974 * I think we should skip something. But I am not sure how. H.J.
975 */
976static inline int maydump(struct vm_area_struct *vma)
977{
978 if (!(vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)))
979 return 0;
980#if 1
981 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
982 return 1;
983 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
984 return 0;
985#endif
986 return 1;
987}
988
989#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
990
991/* An ELF note in memory. */
992struct memelfnote
993{
994 const char *name;
995 int type;
996 unsigned int datasz;
997 void *data;
998};
999
1000static int notesize(struct memelfnote *en)
1001{
1002 int sz;
1003
1004 sz = sizeof(struct elf_note);
1005 sz += roundup(strlen(en->name), 4);
1006 sz += roundup(en->datasz, 4);
1007
1008 return sz;
1009}
1010
1011/* #define DEBUG */
1012
1013#define DUMP_WRITE(addr, nr) \
1014 if (!dump_write(file, (addr), (nr))) \
1015 goto end_coredump;
1016#define DUMP_SEEK(off) \
1017 if (!dump_seek(file, (off))) \
1018 goto end_coredump;
1019
1020static int writenote(struct memelfnote *men, struct file *file)
1021{
1022 struct elf_note en;
1023
1024 en.n_namesz = strlen(men->name);
1025 en.n_descsz = men->datasz;
1026 en.n_type = men->type;
1027
1028 DUMP_WRITE(&en, sizeof(en));
1029 DUMP_WRITE(men->name, en.n_namesz);
1030 /* XXX - cast from long long to long to avoid need for libgcc.a */
1031 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1032 DUMP_WRITE(men->data, men->datasz);
1033 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1034
1035 return 1;
1036
1037end_coredump:
1038 return 0;
1039}
1040#undef DUMP_WRITE
1041#undef DUMP_SEEK
1042
1043#define DUMP_WRITE(addr, nr) \
1044 if (!dump_write(file, (addr), (nr))) \
1045 goto end_coredump;
1046#define DUMP_SEEK(off) \
1047 if (!dump_seek(file, (off))) \
1048 goto end_coredump;
1049
1050/* Actual dumper.
1051 *
1052 * This is a two-pass process; first we find the offsets of the bits,
1053 * and then they are actually written out. If we run out of core limit
1054 * we just truncate.
1055 */
1056static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1057{
1058 int has_dumped = 0;
1059 mm_segment_t fs;
1060 int segs;
1061 int i;
1062 size_t size;
1063 struct vm_area_struct *vma;
1064 struct elfhdr elf;
1065 off_t offset = 0, dataoff;
1066 int limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1067 int numnote = 4;
1068 struct memelfnote notes[4];
1069 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1070 elf_fpregset_t fpu; /* NT_PRFPREG */
1071 struct elf_prpsinfo psinfo; /* NT_PRPSINFO */
1072
1073 /* Count what's needed to dump, up to the limit of coredump size. */
1074 segs = 0;
1075 size = 0;
1076 for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1077 if (maydump(vma))
1078 {
1079 int sz = vma->vm_end-vma->vm_start;
1080
1081 if (size+sz >= limit)
1082 break;
1083 else
1084 size += sz;
1085 }
1086
1087 segs++;
1088 }
1089#ifdef DEBUG
1090 printk("irix_core_dump: %d segs taking %d bytes\n", segs, size);
1091#endif
1092
1093 /* Set up header. */
1094 memcpy(elf.e_ident, ELFMAG, SELFMAG);
1095 elf.e_ident[EI_CLASS] = ELFCLASS32;
1096 elf.e_ident[EI_DATA] = ELFDATA2LSB;
1097 elf.e_ident[EI_VERSION] = EV_CURRENT;
1098 elf.e_ident[EI_OSABI] = ELF_OSABI;
1099 memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1100
1101 elf.e_type = ET_CORE;
1102 elf.e_machine = ELF_ARCH;
1103 elf.e_version = EV_CURRENT;
1104 elf.e_entry = 0;
1105 elf.e_phoff = sizeof(elf);
1106 elf.e_shoff = 0;
1107 elf.e_flags = 0;
1108 elf.e_ehsize = sizeof(elf);
1109 elf.e_phentsize = sizeof(struct elf_phdr);
1110 elf.e_phnum = segs+1; /* Include notes. */
1111 elf.e_shentsize = 0;
1112 elf.e_shnum = 0;
1113 elf.e_shstrndx = 0;
1114
1115 fs = get_fs();
1116 set_fs(KERNEL_DS);
1117
1118 has_dumped = 1;
1119 current->flags |= PF_DUMPCORE;
1120
1121 DUMP_WRITE(&elf, sizeof(elf));
1122 offset += sizeof(elf); /* Elf header. */
1123 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers. */
1124
1125 /* Set up the notes in similar form to SVR4 core dumps made
1126 * with info from their /proc.
1127 */
1128 memset(&psinfo, 0, sizeof(psinfo));
1129 memset(&prstatus, 0, sizeof(prstatus));
1130
1131 notes[0].name = "CORE";
1132 notes[0].type = NT_PRSTATUS;
1133 notes[0].datasz = sizeof(prstatus);
1134 notes[0].data = &prstatus;
1135 prstatus.pr_info.si_signo = prstatus.pr_cursig = signr;
1136 prstatus.pr_sigpend = current->pending.signal.sig[0];
1137 prstatus.pr_sighold = current->blocked.sig[0];
1138 psinfo.pr_pid = prstatus.pr_pid = current->pid;
1139 psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid;
1140 psinfo.pr_pgrp = prstatus.pr_pgrp = process_group(current);
1141 psinfo.pr_sid = prstatus.pr_sid = current->signal->session;
1142 if (current->pid == current->tgid) {
1143 /*
1144 * This is the record for the group leader. Add in the
1145 * cumulative times of previous dead threads. This total
1146 * won't include the time of each live thread whose state
1147 * is included in the core dump. The final total reported
1148 * to our parent process when it calls wait4 will include
1149 * those sums as well as the little bit more time it takes
1150 * this and each other thread to finish dying after the
1151 * core dump synchronization phase.
1152 */
1153 jiffies_to_timeval(current->utime + current->signal->utime,
1154 &prstatus.pr_utime);
1155 jiffies_to_timeval(current->stime + current->signal->stime,
1156 &prstatus.pr_stime);
1157 } else {
1158 jiffies_to_timeval(current->utime, &prstatus.pr_utime);
1159 jiffies_to_timeval(current->stime, &prstatus.pr_stime);
1160 }
1161 jiffies_to_timeval(current->signal->cutime, &prstatus.pr_cutime);
1162 jiffies_to_timeval(current->signal->cstime, &prstatus.pr_cstime);
1163
1164 if (sizeof(elf_gregset_t) != sizeof(struct pt_regs)) {
1165 printk("sizeof(elf_gregset_t) (%d) != sizeof(struct pt_regs) "
1166 "(%d)\n", sizeof(elf_gregset_t), sizeof(struct pt_regs));
1167 } else {
1168 *(struct pt_regs *)&prstatus.pr_reg = *regs;
1169 }
1170
1171 notes[1].name = "CORE";
1172 notes[1].type = NT_PRPSINFO;
1173 notes[1].datasz = sizeof(psinfo);
1174 notes[1].data = &psinfo;
1175 i = current->state ? ffz(~current->state) + 1 : 0;
1176 psinfo.pr_state = i;
1177 psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
1178 psinfo.pr_zomb = psinfo.pr_sname == 'Z';
1179 psinfo.pr_nice = task_nice(current);
1180 psinfo.pr_flag = current->flags;
1181 psinfo.pr_uid = current->uid;
1182 psinfo.pr_gid = current->gid;
1183 {
1184 int i, len;
1185
1186 set_fs(fs);
1187
1188 len = current->mm->arg_end - current->mm->arg_start;
1189 len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len;
1190 copy_from_user(&psinfo.pr_psargs,
1191 (const char *)current->mm->arg_start, len);
1192 for(i = 0; i < len; i++)
1193 if (psinfo.pr_psargs[i] == 0)
1194 psinfo.pr_psargs[i] = ' ';
1195 psinfo.pr_psargs[len] = 0;
1196
1197 set_fs(KERNEL_DS);
1198 }
1199 strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
1200
1201 notes[2].name = "CORE";
1202 notes[2].type = NT_TASKSTRUCT;
1203 notes[2].datasz = sizeof(*current);
1204 notes[2].data = current;
1205
1206 /* Try to dump the FPU. */
1207 prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
1208 if (!prstatus.pr_fpvalid) {
1209 numnote--;
1210 } else {
1211 notes[3].name = "CORE";
1212 notes[3].type = NT_PRFPREG;
1213 notes[3].datasz = sizeof(fpu);
1214 notes[3].data = &fpu;
1215 }
1216
1217 /* Write notes phdr entry. */
1218 {
1219 struct elf_phdr phdr;
1220 int sz = 0;
1221
1222 for(i = 0; i < numnote; i++)
1223 sz += notesize(&notes[i]);
1224
1225 phdr.p_type = PT_NOTE;
1226 phdr.p_offset = offset;
1227 phdr.p_vaddr = 0;
1228 phdr.p_paddr = 0;
1229 phdr.p_filesz = sz;
1230 phdr.p_memsz = 0;
1231 phdr.p_flags = 0;
1232 phdr.p_align = 0;
1233
1234 offset += phdr.p_filesz;
1235 DUMP_WRITE(&phdr, sizeof(phdr));
1236 }
1237
1238 /* Page-align dumped data. */
1239 dataoff = offset = roundup(offset, PAGE_SIZE);
1240
1241 /* Write program headers for segments dump. */
1242 for(vma = current->mm->mmap, i = 0;
1243 i < segs && vma != NULL; vma = vma->vm_next) {
1244 struct elf_phdr phdr;
1245 size_t sz;
1246
1247 i++;
1248
1249 sz = vma->vm_end - vma->vm_start;
1250
1251 phdr.p_type = PT_LOAD;
1252 phdr.p_offset = offset;
1253 phdr.p_vaddr = vma->vm_start;
1254 phdr.p_paddr = 0;
1255 phdr.p_filesz = maydump(vma) ? sz : 0;
1256 phdr.p_memsz = sz;
1257 offset += phdr.p_filesz;
1258 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1259 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1260 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1261 phdr.p_align = PAGE_SIZE;
1262
1263 DUMP_WRITE(&phdr, sizeof(phdr));
1264 }
1265
1266 for(i = 0; i < numnote; i++)
1267 if (!writenote(&notes[i], file))
1268 goto end_coredump;
1269
1270 set_fs(fs);
1271
1272 DUMP_SEEK(dataoff);
1273
1274 for(i = 0, vma = current->mm->mmap;
1275 i < segs && vma != NULL;
1276 vma = vma->vm_next) {
1277 unsigned long addr = vma->vm_start;
1278 unsigned long len = vma->vm_end - vma->vm_start;
1279
1280 if (!maydump(vma))
1281 continue;
1282 i++;
1283#ifdef DEBUG
1284 printk("elf_core_dump: writing %08lx %lx\n", addr, len);
1285#endif
1286 DUMP_WRITE((void *)addr, len);
1287 }
1288
1289 if ((off_t) file->f_pos != offset) {
1290 /* Sanity check. */
1291 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1292 (off_t) file->f_pos, offset);
1293 }
1294
1295end_coredump:
1296 set_fs(fs);
1297 return has_dumped;
1298}
1299
1300static int __init init_irix_binfmt(void)
1301{
1302 int init_inventory(void);
1303 extern asmlinkage unsigned long sys_call_table;
1304 extern asmlinkage unsigned long sys_call_table_irix5;
1305
1306 init_inventory();
1307
1308 /*
1309 * Copy the IRIX5 syscall table (8000 bytes) into the main syscall
1310 * table. The IRIX5 calls are located by an offset of 8000 bytes
1311 * from the beginning of the main table.
1312 */
1313 memcpy((void *) ((unsigned long) &sys_call_table + 8000),
1314 &sys_call_table_irix5, 8000);
1315
1316 return register_binfmt(&irix_format);
1317}
1318
1319static void __exit exit_irix_binfmt(void)
1320{
1321 /* Remove the IRIX ELF loaders. */
1322 unregister_binfmt(&irix_format);
1323}
1324
1325module_init(init_irix_binfmt)
1326module_exit(exit_irix_binfmt)
diff --git a/arch/mips/kernel/irixinv.c b/arch/mips/kernel/irixinv.c
new file mode 100644
index 000000000000..60aa98cd1791
--- /dev/null
+++ b/arch/mips/kernel/irixinv.c
@@ -0,0 +1,77 @@
1/*
2 * Support the inventory interface for IRIX binaries
3 * This is invoked before the mm layer is working, so we do not
4 * use the linked lists for the inventory yet.
5 *
6 * Miguel de Icaza, 1997.
7 */
8#include <linux/mm.h>
9#include <asm/inventory.h>
10#include <asm/uaccess.h>
11
12#define MAX_INVENTORY 50
13int inventory_items = 0;
14
15static inventory_t inventory [MAX_INVENTORY];
16
17void add_to_inventory (int class, int type, int controller, int unit, int state)
18{
19 inventory_t *ni = &inventory [inventory_items];
20
21 if (inventory_items == MAX_INVENTORY)
22 return;
23
24 ni->inv_class = class;
25 ni->inv_type = type;
26 ni->inv_controller = controller;
27 ni->inv_unit = unit;
28 ni->inv_state = state;
29 ni->inv_next = ni;
30 inventory_items++;
31}
32
33int dump_inventory_to_user (void *userbuf, int size)
34{
35 inventory_t *inv = &inventory [0];
36 inventory_t *user = userbuf;
37 int v;
38
39 if (!access_ok(VERIFY_WRITE, userbuf, size))
40 return -EFAULT;
41
42 for (v = 0; v < inventory_items; v++){
43 inv = &inventory [v];
44 copy_to_user (user, inv, sizeof (inventory_t));
45 user++;
46 }
47 return inventory_items * sizeof (inventory_t);
48}
49
50int __init init_inventory(void)
51{
52 /*
53 * gross hack while we put the right bits all over the kernel
54 * most likely this will not let just anyone run the X server
55 * until we put the right values all over the place
56 */
57 add_to_inventory (10, 3, 0, 0, 16400);
58 add_to_inventory (1, 1, 150, -1, 12);
59 add_to_inventory (1, 3, 0, 0, 8976);
60 add_to_inventory (1, 2, 0, 0, 8976);
61 add_to_inventory (4, 8, 0, 0, 2);
62 add_to_inventory (5, 5, 0, 0, 1);
63 add_to_inventory (3, 3, 0, 0, 32768);
64 add_to_inventory (3, 4, 0, 0, 32768);
65 add_to_inventory (3, 8, 0, 0, 524288);
66 add_to_inventory (3, 9, 0, 0, 64);
67 add_to_inventory (3, 1, 0, 0, 67108864);
68 add_to_inventory (12, 3, 0, 0, 16);
69 add_to_inventory (8, 7, 17, 0, 16777472);
70 add_to_inventory (8, 0, 0, 0, 1);
71 add_to_inventory (2, 1, 0, 13, 2);
72 add_to_inventory (2, 2, 0, 2, 0);
73 add_to_inventory (2, 2, 0, 1, 0);
74 add_to_inventory (7, 14, 0, 0, 6);
75
76 return 0;
77}
diff --git a/arch/mips/kernel/irixioctl.c b/arch/mips/kernel/irixioctl.c
new file mode 100644
index 000000000000..4cd3d38a22c2
--- /dev/null
+++ b/arch/mips/kernel/irixioctl.c
@@ -0,0 +1,261 @@
1/*
2 * irixioctl.c: A fucking mess...
3 *
4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/smp.h>
12#include <linux/smp_lock.h>
13#include <linux/sockios.h>
14#include <linux/syscalls.h>
15#include <linux/tty.h>
16#include <linux/file.h>
17
18#include <asm/uaccess.h>
19#include <asm/ioctl.h>
20#include <asm/ioctls.h>
21
22#undef DEBUG_IOCTLS
23#undef DEBUG_MISSING_IOCTL
24
25struct irix_termios {
26 tcflag_t c_iflag, c_oflag, c_cflag, c_lflag;
27 cc_t c_cc[NCCS];
28};
29
30extern void start_tty(struct tty_struct *tty);
31static struct tty_struct *get_tty(int fd)
32{
33 struct file *filp;
34 struct tty_struct *ttyp = NULL;
35
36 spin_lock(&current->files->file_lock);
37 filp = fcheck(fd);
38 if(filp && filp->private_data) {
39 ttyp = (struct tty_struct *) filp->private_data;
40
41 if(ttyp->magic != TTY_MAGIC)
42 ttyp =NULL;
43 }
44 spin_unlock(&current->files->file_lock);
45 return ttyp;
46}
47
48static struct tty_struct *get_real_tty(struct tty_struct *tp)
49{
50 if (tp->driver->type == TTY_DRIVER_TYPE_PTY &&
51 tp->driver->subtype == PTY_TYPE_MASTER)
52 return tp->link;
53 else
54 return tp;
55}
56
57asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
58{
59 struct tty_struct *tp, *rtp;
60 mm_segment_t old_fs;
61 int error = 0;
62
63#ifdef DEBUG_IOCTLS
64 printk("[%s:%d] irix_ioctl(%d, ", current->comm, current->pid, fd);
65#endif
66 switch(cmd) {
67 case 0x00005401:
68#ifdef DEBUG_IOCTLS
69 printk("TCGETA, %08lx) ", arg);
70#endif
71 error = sys_ioctl(fd, TCGETA, arg);
72 break;
73
74 case 0x0000540d: {
75 struct termios kt;
76 struct irix_termios *it = (struct irix_termios *) arg;
77
78#ifdef DEBUG_IOCTLS
79 printk("TCGETS, %08lx) ", arg);
80#endif
81 if(!access_ok(VERIFY_WRITE, it, sizeof(*it))) {
82 error = -EFAULT;
83 break;
84 }
85 old_fs = get_fs(); set_fs(get_ds());
86 error = sys_ioctl(fd, TCGETS, (unsigned long) &kt);
87 set_fs(old_fs);
88 if (error)
89 break;
90 __put_user(kt.c_iflag, &it->c_iflag);
91 __put_user(kt.c_oflag, &it->c_oflag);
92 __put_user(kt.c_cflag, &it->c_cflag);
93 __put_user(kt.c_lflag, &it->c_lflag);
94 for(error = 0; error < NCCS; error++)
95 __put_user(kt.c_cc[error], &it->c_cc[error]);
96 error = 0;
97 break;
98 }
99
100 case 0x0000540e: {
101 struct termios kt;
102 struct irix_termios *it = (struct irix_termios *) arg;
103
104#ifdef DEBUG_IOCTLS
105 printk("TCSETS, %08lx) ", arg);
106#endif
107 if (!access_ok(VERIFY_READ, it, sizeof(*it))) {
108 error = -EFAULT;
109 break;
110 }
111 old_fs = get_fs(); set_fs(get_ds());
112 error = sys_ioctl(fd, TCGETS, (unsigned long) &kt);
113 set_fs(old_fs);
114 if(error)
115 break;
116 __get_user(kt.c_iflag, &it->c_iflag);
117 __get_user(kt.c_oflag, &it->c_oflag);
118 __get_user(kt.c_cflag, &it->c_cflag);
119 __get_user(kt.c_lflag, &it->c_lflag);
120 for(error = 0; error < NCCS; error++)
121 __get_user(kt.c_cc[error], &it->c_cc[error]);
122 old_fs = get_fs(); set_fs(get_ds());
123 error = sys_ioctl(fd, TCSETS, (unsigned long) &kt);
124 set_fs(old_fs);
125 break;
126 }
127
128 case 0x0000540f:
129#ifdef DEBUG_IOCTLS
130 printk("TCSETSW, %08lx) ", arg);
131#endif
132 error = sys_ioctl(fd, TCSETSW, arg);
133 break;
134
135 case 0x00005471:
136#ifdef DEBUG_IOCTLS
137 printk("TIOCNOTTY, %08lx) ", arg);
138#endif
139 error = sys_ioctl(fd, TIOCNOTTY, arg);
140 break;
141
142 case 0x00007416:
143#ifdef DEBUG_IOCTLS
144 printk("TIOCGSID, %08lx) ", arg);
145#endif
146 tp = get_tty(fd);
147 if(!tp) {
148 error = -EINVAL;
149 break;
150 }
151 rtp = get_real_tty(tp);
152#ifdef DEBUG_IOCTLS
153 printk("rtp->session=%d ", rtp->session);
154#endif
155 error = put_user(rtp->session, (unsigned long *) arg);
156 break;
157
158 case 0x746e:
159 /* TIOCSTART, same effect as hitting ^Q */
160#ifdef DEBUG_IOCTLS
161 printk("TIOCSTART, %08lx) ", arg);
162#endif
163 tp = get_tty(fd);
164 if(!tp) {
165 error = -EINVAL;
166 break;
167 }
168 rtp = get_real_tty(tp);
169 start_tty(rtp);
170 break;
171
172 case 0x20006968:
173#ifdef DEBUG_IOCTLS
174 printk("SIOCGETLABEL, %08lx) ", arg);
175#endif
176 error = -ENOPKG;
177 break;
178
179 case 0x40047477:
180#ifdef DEBUG_IOCTLS
181 printk("TIOCGPGRP, %08lx) ", arg);
182#endif
183 error = sys_ioctl(fd, TIOCGPGRP, arg);
184#ifdef DEBUG_IOCTLS
185 printk("arg=%d ", *(int *)arg);
186#endif
187 break;
188
189 case 0x40087468:
190#ifdef DEBUG_IOCTLS
191 printk("TIOCGWINSZ, %08lx) ", arg);
192#endif
193 error = sys_ioctl(fd, TIOCGWINSZ, arg);
194 break;
195
196 case 0x8004667e:
197#ifdef DEBUG_IOCTLS
198 printk("FIONBIO, %08lx) arg=%d ", arg, *(int *)arg);
199#endif
200 error = sys_ioctl(fd, FIONBIO, arg);
201 break;
202
203 case 0x80047476:
204#ifdef DEBUG_IOCTLS
205 printk("TIOCSPGRP, %08lx) arg=%d ", arg, *(int *)arg);
206#endif
207 error = sys_ioctl(fd, TIOCSPGRP, arg);
208 break;
209
210 case 0x8020690c:
211#ifdef DEBUG_IOCTLS
212 printk("SIOCSIFADDR, %08lx) arg=%d ", arg, *(int *)arg);
213#endif
214 error = sys_ioctl(fd, SIOCSIFADDR, arg);
215 break;
216
217 case 0x80206910:
218#ifdef DEBUG_IOCTLS
219 printk("SIOCSIFFLAGS, %08lx) arg=%d ", arg, *(int *)arg);
220#endif
221 error = sys_ioctl(fd, SIOCSIFFLAGS, arg);
222 break;
223
224 case 0xc0206911:
225#ifdef DEBUG_IOCTLS
226 printk("SIOCGIFFLAGS, %08lx) arg=%d ", arg, *(int *)arg);
227#endif
228 error = sys_ioctl(fd, SIOCGIFFLAGS, arg);
229 break;
230
231 case 0xc020691b:
232#ifdef DEBUG_IOCTLS
233 printk("SIOCGIFMETRIC, %08lx) arg=%d ", arg, *(int *)arg);
234#endif
235 error = sys_ioctl(fd, SIOCGIFMETRIC, arg);
236 break;
237
238 default: {
239#ifdef DEBUG_MISSING_IOCTL
240 char *msg = "Unimplemented IOCTL cmd tell linux@engr.sgi.com\n";
241
242#ifdef DEBUG_IOCTLS
243 printk("UNIMP_IOCTL, %08lx)\n", arg);
244#endif
245 old_fs = get_fs(); set_fs(get_ds());
246 sys_write(2, msg, strlen(msg));
247 set_fs(old_fs);
248 printk("[%s:%d] Does unimplemented IRIX ioctl cmd %08lx\n",
249 current->comm, current->pid, cmd);
250 do_exit(255);
251#else
252 error = sys_ioctl (fd, cmd, arg);
253#endif
254 }
255
256 };
257#ifdef DEBUG_IOCTLS
258 printk("error=%d\n", error);
259#endif
260 return error;
261}
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
new file mode 100644
index 000000000000..3f956f809fa4
--- /dev/null
+++ b/arch/mips/kernel/irixsig.c
@@ -0,0 +1,853 @@
1/*
2 * irixsig.c: WHEEE, IRIX signals! YOW, am I compatible or what?!?!
3 *
4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
5 * Copyright (C) 1997 - 2000 Ralf Baechle (ralf@gnu.org)
6 * Copyright (C) 2000 Silicon Graphics, Inc.
7 */
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/mm.h>
11#include <linux/errno.h>
12#include <linux/smp.h>
13#include <linux/smp_lock.h>
14#include <linux/time.h>
15#include <linux/ptrace.h>
16
17#include <asm/ptrace.h>
18#include <asm/uaccess.h>
19
20#undef DEBUG_SIG
21
22#define _S(nr) (1<<((nr)-1))
23
24#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
25
26typedef struct {
27 unsigned long sig[4];
28} irix_sigset_t;
29
30struct sigctx_irix5 {
31 u32 rmask, cp0_status;
32 u64 pc;
33 u64 regs[32];
34 u64 fpregs[32];
35 u32 usedfp, fpcsr, fpeir, sstk_flags;
36 u64 hi, lo;
37 u64 cp0_cause, cp0_badvaddr, _unused0;
38 irix_sigset_t sigset;
39 u64 weird_fpu_thing;
40 u64 _unused1[31];
41};
42
43#ifdef DEBUG_SIG
44/* Debugging */
45static inline void dump_irix5_sigctx(struct sigctx_irix5 *c)
46{
47 int i;
48
49 printk("misc: rmask[%08lx] status[%08lx] pc[%08lx]\n",
50 (unsigned long) c->rmask,
51 (unsigned long) c->cp0_status,
52 (unsigned long) c->pc);
53 printk("regs: ");
54 for(i = 0; i < 16; i++)
55 printk("[%d]<%08lx> ", i, (unsigned long) c->regs[i]);
56 printk("\nregs: ");
57 for(i = 16; i < 32; i++)
58 printk("[%d]<%08lx> ", i, (unsigned long) c->regs[i]);
59 printk("\nfpregs: ");
60 for(i = 0; i < 16; i++)
61 printk("[%d]<%08lx> ", i, (unsigned long) c->fpregs[i]);
62 printk("\nfpregs: ");
63 for(i = 16; i < 32; i++)
64 printk("[%d]<%08lx> ", i, (unsigned long) c->fpregs[i]);
65 printk("misc: usedfp[%d] fpcsr[%08lx] fpeir[%08lx] stk_flgs[%08lx]\n",
66 (int) c->usedfp, (unsigned long) c->fpcsr,
67 (unsigned long) c->fpeir, (unsigned long) c->sstk_flags);
68 printk("misc: hi[%08lx] lo[%08lx] cause[%08lx] badvaddr[%08lx]\n",
69 (unsigned long) c->hi, (unsigned long) c->lo,
70 (unsigned long) c->cp0_cause, (unsigned long) c->cp0_badvaddr);
71 printk("misc: sigset<0>[%08lx] sigset<1>[%08lx] sigset<2>[%08lx] "
72 "sigset<3>[%08lx]\n", (unsigned long) c->sigset.sig[0],
73 (unsigned long) c->sigset.sig[1],
74 (unsigned long) c->sigset.sig[2],
75 (unsigned long) c->sigset.sig[3]);
76}
77#endif
78
79static void setup_irix_frame(struct k_sigaction *ka, struct pt_regs *regs,
80 int signr, sigset_t *oldmask)
81{
82 unsigned long sp;
83 struct sigctx_irix5 *ctx;
84 int i;
85
86 sp = regs->regs[29];
87 sp -= sizeof(struct sigctx_irix5);
88 sp &= ~(0xf);
89 ctx = (struct sigctx_irix5 *) sp;
90 if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)))
91 goto segv_and_exit;
92
93 __put_user(0, &ctx->weird_fpu_thing);
94 __put_user(~(0x00000001), &ctx->rmask);
95 __put_user(0, &ctx->regs[0]);
96 for(i = 1; i < 32; i++)
97 __put_user((u64) regs->regs[i], &ctx->regs[i]);
98
99 __put_user((u64) regs->hi, &ctx->hi);
100 __put_user((u64) regs->lo, &ctx->lo);
101 __put_user((u64) regs->cp0_epc, &ctx->pc);
102 __put_user(!!used_math(), &ctx->usedfp);
103 __put_user((u64) regs->cp0_cause, &ctx->cp0_cause);
104 __put_user((u64) regs->cp0_badvaddr, &ctx->cp0_badvaddr);
105
106 __put_user(0, &ctx->sstk_flags); /* XXX sigstack unimp... todo... */
107
108 __copy_to_user(&ctx->sigset, oldmask, sizeof(irix_sigset_t));
109
110#ifdef DEBUG_SIG
111 dump_irix5_sigctx(ctx);
112#endif
113
114 regs->regs[4] = (unsigned long) signr;
115 regs->regs[5] = 0; /* XXX sigcode XXX */
116 regs->regs[6] = regs->regs[29] = sp;
117 regs->regs[7] = (unsigned long) ka->sa.sa_handler;
118 regs->regs[25] = regs->cp0_epc = (unsigned long) ka->sa_restorer;
119
120 return;
121
122segv_and_exit:
123 force_sigsegv(signr, current);
124}
125
126static void inline
127setup_irix_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
128 int signr, sigset_t *oldmask, siginfo_t *info)
129{
130 printk("Aiee: setup_tr_frame wants to be written");
131 do_exit(SIGSEGV);
132}
133
134static inline void handle_signal(unsigned long sig, siginfo_t *info,
135 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs)
136{
137 switch(regs->regs[0]) {
138 case ERESTARTNOHAND:
139 regs->regs[2] = EINTR;
140 break;
141 case ERESTARTSYS:
142 if(!(ka->sa.sa_flags & SA_RESTART)) {
143 regs->regs[2] = EINTR;
144 break;
145 }
146 /* fallthrough */
147 case ERESTARTNOINTR: /* Userland will reload $v0. */
148 regs->cp0_epc -= 8;
149 }
150
151 regs->regs[0] = 0; /* Don't deal with this again. */
152
153 if (ka->sa.sa_flags & SA_SIGINFO)
154 setup_irix_rt_frame(ka, regs, sig, oldset, info);
155 else
156 setup_irix_frame(ka, regs, sig, oldset);
157
158 if (!(ka->sa.sa_flags & SA_NODEFER)) {
159 spin_lock_irq(&current->sighand->siglock);
160 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
161 sigaddset(&current->blocked,sig);
162 recalc_sigpending();
163 spin_unlock_irq(&current->sighand->siglock);
164 }
165}
166
167asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs)
168{
169 struct k_sigaction ka;
170 siginfo_t info;
171 int signr;
172
173 /*
174 * We want the common case to go fast, which is why we may in certain
175 * cases get here from kernel mode. Just return without doing anything
176 * if so.
177 */
178 if (!user_mode(regs))
179 return 1;
180
181 if (try_to_freeze(0))
182 goto no_signal;
183
184 if (!oldset)
185 oldset = &current->blocked;
186
187 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
188 if (signr > 0) {
189 handle_signal(signr, &info, &ka, oldset, regs);
190 return 1;
191 }
192
193no_signal:
194 /*
195 * Who's code doesn't conform to the restartable syscall convention
196 * dies here!!! The li instruction, a single machine instruction,
197 * must directly be followed by the syscall instruction.
198 */
199 if (regs->regs[0]) {
200 if (regs->regs[2] == ERESTARTNOHAND ||
201 regs->regs[2] == ERESTARTSYS ||
202 regs->regs[2] == ERESTARTNOINTR) {
203 regs->cp0_epc -= 8;
204 }
205 }
206 return 0;
207}
208
209asmlinkage void
210irix_sigreturn(struct pt_regs *regs)
211{
212 struct sigctx_irix5 *context, *magic;
213 unsigned long umask, mask;
214 u64 *fregs;
215 int sig, i, base = 0;
216 sigset_t blocked;
217
218 /* Always make any pending restarted system calls return -EINTR */
219 current_thread_info()->restart_block.fn = do_no_restart_syscall;
220
221 if (regs->regs[2] == 1000)
222 base = 1;
223
224 context = (struct sigctx_irix5 *) regs->regs[base + 4];
225 magic = (struct sigctx_irix5 *) regs->regs[base + 5];
226 sig = (int) regs->regs[base + 6];
227#ifdef DEBUG_SIG
228 printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n",
229 current->comm, current->pid, context, magic, sig);
230#endif
231 if (!context)
232 context = magic;
233 if (!access_ok(VERIFY_READ, context, sizeof(struct sigctx_irix5)))
234 goto badframe;
235
236#ifdef DEBUG_SIG
237 dump_irix5_sigctx(context);
238#endif
239
240 __get_user(regs->cp0_epc, &context->pc);
241 umask = context->rmask; mask = 2;
242 for (i = 1; i < 32; i++, mask <<= 1) {
243 if(umask & mask)
244 __get_user(regs->regs[i], &context->regs[i]);
245 }
246 __get_user(regs->hi, &context->hi);
247 __get_user(regs->lo, &context->lo);
248
249 if ((umask & 1) && context->usedfp) {
250 fregs = (u64 *) &current->thread.fpu;
251 for(i = 0; i < 32; i++)
252 fregs[i] = (u64) context->fpregs[i];
253 __get_user(current->thread.fpu.hard.fcr31, &context->fpcsr);
254 }
255
256 /* XXX do sigstack crapola here... XXX */
257
258 if (__copy_from_user(&blocked, &context->sigset, sizeof(blocked)))
259 goto badframe;
260
261 sigdelsetmask(&blocked, ~_BLOCKABLE);
262 spin_lock_irq(&current->sighand->siglock);
263 current->blocked = blocked;
264 recalc_sigpending();
265 spin_unlock_irq(&current->sighand->siglock);
266
267 /*
268 * Don't let your children do this ...
269 */
270 if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
271 do_syscall_trace(regs, 1);
272 __asm__ __volatile__(
273 "move\t$29,%0\n\t"
274 "j\tsyscall_exit"
275 :/* no outputs */
276 :"r" (&regs));
277 /* Unreached */
278
279badframe:
280 force_sig(SIGSEGV, current);
281}
282
283struct sigact_irix5 {
284 int flags;
285 void (*handler)(int);
286 u32 sigset[4];
287 int _unused0[2];
288};
289
290#ifdef DEBUG_SIG
291static inline void dump_sigact_irix5(struct sigact_irix5 *p)
292{
293 printk("<f[%d] hndlr[%08lx] msk[%08lx]>", p->flags,
294 (unsigned long) p->handler,
295 (unsigned long) p->sigset[0]);
296}
297#endif
298
299asmlinkage int
300irix_sigaction(int sig, const struct sigaction *act,
301 struct sigaction *oact, void *trampoline)
302{
303 struct k_sigaction new_ka, old_ka;
304 int ret;
305
306#ifdef DEBUG_SIG
307 printk(" (%d,%s,%s,%08lx) ", sig, (!new ? "0" : "NEW"),
308 (!old ? "0" : "OLD"), trampoline);
309 if(new) {
310 dump_sigact_irix5(new); printk(" ");
311 }
312#endif
313 if (act) {
314 sigset_t mask;
315 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
316 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
317 __get_user(new_ka.sa.sa_flags, &act->sa_flags))
318 return -EFAULT;
319
320 __copy_from_user(&mask, &act->sa_mask, sizeof(sigset_t));
321
322 /*
323 * Hmmm... methinks IRIX libc always passes a valid trampoline
324 * value for all invocations of sigaction. Will have to
325 * investigate. POSIX POSIX, die die die...
326 */
327 new_ka.sa_restorer = trampoline;
328 }
329
330/* XXX Implement SIG_SETMASK32 for IRIX compatibility */
331 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
332
333 if (!ret && oact) {
334 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
335 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
336 __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
337 return -EFAULT;
338 __copy_to_user(&old_ka.sa.sa_mask, &oact->sa_mask,
339 sizeof(sigset_t));
340 }
341
342 return ret;
343}
344
345asmlinkage int irix_sigpending(irix_sigset_t *set)
346{
347 return do_sigpending(set, sizeof(*set));
348}
349
350asmlinkage int irix_sigprocmask(int how, irix_sigset_t *new, irix_sigset_t *old)
351{
352 sigset_t oldbits, newbits;
353
354 if (new) {
355 if (!access_ok(VERIFY_READ, new, sizeof(*new)))
356 return -EFAULT;
357 __copy_from_user(&newbits, new, sizeof(unsigned long)*4);
358 sigdelsetmask(&newbits, ~_BLOCKABLE);
359
360 spin_lock_irq(&current->sighand->siglock);
361 oldbits = current->blocked;
362
363 switch(how) {
364 case 1:
365 sigorsets(&newbits, &oldbits, &newbits);
366 break;
367
368 case 2:
369 sigandsets(&newbits, &oldbits, &newbits);
370 break;
371
372 case 3:
373 break;
374
375 case 256:
376 siginitset(&newbits, newbits.sig[0]);
377 break;
378
379 default:
380 return -EINVAL;
381 }
382 recalc_sigpending();
383 spin_unlock_irq(&current->sighand->siglock);
384 }
385 if(old) {
386 if (!access_ok(VERIFY_WRITE, old, sizeof(*old)))
387 return -EFAULT;
388 __copy_to_user(old, &current->blocked, sizeof(unsigned long)*4);
389 }
390
391 return 0;
392}
393
394asmlinkage int irix_sigsuspend(struct pt_regs *regs)
395{
396 sigset_t *uset, saveset, newset;
397
398 uset = (sigset_t *) regs->regs[4];
399 if (copy_from_user(&newset, uset, sizeof(sigset_t)))
400 return -EFAULT;
401 sigdelsetmask(&newset, ~_BLOCKABLE);
402
403 spin_lock_irq(&current->sighand->siglock);
404 saveset = current->blocked;
405 current->blocked = newset;
406 recalc_sigpending();
407 spin_unlock_irq(&current->sighand->siglock);
408
409 regs->regs[2] = -EINTR;
410 while (1) {
411 current->state = TASK_INTERRUPTIBLE;
412 schedule();
413 if (do_irix_signal(&saveset, regs))
414 return -EINTR;
415 }
416}
417
418/* hate hate hate... */
419struct irix5_siginfo {
420 int sig, code, error;
421 union {
422 char unused[128 - (3 * 4)]; /* Safety net. */
423 struct {
424 int pid;
425 union {
426 int uid;
427 struct {
428 int utime, status, stime;
429 } child;
430 } procdata;
431 } procinfo;
432
433 unsigned long fault_addr;
434
435 struct {
436 int fd;
437 long band;
438 } fileinfo;
439
440 unsigned long sigval;
441 } stuff;
442};
443
444static inline unsigned long timespectojiffies(struct timespec *value)
445{
446 unsigned long sec = (unsigned) value->tv_sec;
447 long nsec = value->tv_nsec;
448
449 if (sec > (LONG_MAX / HZ))
450 return LONG_MAX;
451 nsec += 1000000000L / HZ - 1;
452 nsec /= 1000000000L / HZ;
453 return HZ * sec + nsec;
454}
455
456asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
457 struct timespec *tp)
458{
459 long expire = MAX_SCHEDULE_TIMEOUT;
460 sigset_t kset;
461 int i, sig, error, timeo = 0;
462
463#ifdef DEBUG_SIG
464 printk("[%s:%d] irix_sigpoll_sys(%p,%p,%p)\n",
465 current->comm, current->pid, set, info, tp);
466#endif
467
468 /* Must always specify the signal set. */
469 if (!set)
470 return -EINVAL;
471
472 if (!access_ok(VERIFY_READ, set, sizeof(kset))) {
473 error = -EFAULT;
474 goto out;
475 }
476
477 __copy_from_user(&kset, set, sizeof(set));
478 if (error)
479 goto out;
480
481 if (info && clear_user(info, sizeof(*info))) {
482 error = -EFAULT;
483 goto out;
484 }
485
486 if (tp) {
487 if (!access_ok(VERIFY_READ, tp, sizeof(*tp)))
488 return -EFAULT;
489 if (!tp->tv_sec && !tp->tv_nsec) {
490 error = -EINVAL;
491 goto out;
492 }
493 expire = timespectojiffies(tp)+(tp->tv_sec||tp->tv_nsec);
494 }
495
496 while(1) {
497 long tmp = 0;
498
499 current->state = TASK_INTERRUPTIBLE;
500 expire = schedule_timeout(expire);
501
502 for (i=0; i<=4; i++)
503 tmp |= (current->pending.signal.sig[i] & kset.sig[i]);
504
505 if (tmp)
506 break;
507 if (!expire) {
508 timeo = 1;
509 break;
510 }
511 if (signal_pending(current))
512 return -EINTR;
513 }
514 if (timeo)
515 return -EAGAIN;
516
517 for(sig = 1; i <= 65 /* IRIX_NSIG */; sig++) {
518 if (sigismember (&kset, sig))
519 continue;
520 if (sigismember (&current->pending.signal, sig)) {
521 /* XXX need more than this... */
522 if (info)
523 info->sig = sig;
524 error = 0;
525 goto out;
526 }
527 }
528
529 /* Should not get here, but do something sane if we do. */
530 error = -EINTR;
531
532out:
533 return error;
534}
535
536/* This is here because of irix5_siginfo definition. */
537#define IRIX_P_PID 0
538#define IRIX_P_PGID 2
539#define IRIX_P_ALL 7
540
541extern int getrusage(struct task_struct *, int, struct rusage __user *);
542
543#define W_EXITED 1
544#define W_TRAPPED 2
545#define W_STOPPED 4
546#define W_CONT 8
547#define W_NOHANG 64
548
549#define W_MASK (W_EXITED | W_TRAPPED | W_STOPPED | W_CONT | W_NOHANG)
550
551asmlinkage int irix_waitsys(int type, int pid, struct irix5_siginfo *info,
552 int options, struct rusage *ru)
553{
554 int flag, retval;
555 DECLARE_WAITQUEUE(wait, current);
556 struct task_struct *tsk;
557 struct task_struct *p;
558 struct list_head *_p;
559
560 if (!info) {
561 retval = -EINVAL;
562 goto out;
563 }
564 if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) {
565 retval = -EFAULT;
566 goto out;
567 }
568 if (ru) {
569 if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru))) {
570 retval = -EFAULT;
571 goto out;
572 }
573 }
574 if (options & ~(W_MASK)) {
575 retval = -EINVAL;
576 goto out;
577 }
578 if (type != IRIX_P_PID && type != IRIX_P_PGID && type != IRIX_P_ALL) {
579 retval = -EINVAL;
580 goto out;
581 }
582 add_wait_queue(&current->signal->wait_chldexit, &wait);
583repeat:
584 flag = 0;
585 current->state = TASK_INTERRUPTIBLE;
586 read_lock(&tasklist_lock);
587 tsk = current;
588 list_for_each(_p,&tsk->children) {
589 p = list_entry(_p,struct task_struct,sibling);
590 if ((type == IRIX_P_PID) && p->pid != pid)
591 continue;
592 if ((type == IRIX_P_PGID) && process_group(p) != pid)
593 continue;
594 if ((p->exit_signal != SIGCHLD))
595 continue;
596 flag = 1;
597 switch (p->state) {
598 case TASK_STOPPED:
599 if (!p->exit_code)
600 continue;
601 if (!(options & (W_TRAPPED|W_STOPPED)) &&
602 !(p->ptrace & PT_PTRACED))
603 continue;
604 read_unlock(&tasklist_lock);
605
606 /* move to end of parent's list to avoid starvation */
607 write_lock_irq(&tasklist_lock);
608 remove_parent(p);
609 add_parent(p, p->parent);
610 write_unlock_irq(&tasklist_lock);
611 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
612 if (!retval && ru) {
613 retval |= __put_user(SIGCHLD, &info->sig);
614 retval |= __put_user(0, &info->code);
615 retval |= __put_user(p->pid, &info->stuff.procinfo.pid);
616 retval |= __put_user((p->exit_code >> 8) & 0xff,
617 &info->stuff.procinfo.procdata.child.status);
618 retval |= __put_user(p->utime, &info->stuff.procinfo.procdata.child.utime);
619 retval |= __put_user(p->stime, &info->stuff.procinfo.procdata.child.stime);
620 }
621 if (!retval) {
622 p->exit_code = 0;
623 }
624 goto end_waitsys;
625
626 case EXIT_ZOMBIE:
627 current->signal->cutime += p->utime + p->signal->cutime;
628 current->signal->cstime += p->stime + p->signal->cstime;
629 if (ru != NULL)
630 getrusage(p, RUSAGE_BOTH, ru);
631 __put_user(SIGCHLD, &info->sig);
632 __put_user(1, &info->code); /* CLD_EXITED */
633 __put_user(p->pid, &info->stuff.procinfo.pid);
634 __put_user((p->exit_code >> 8) & 0xff,
635 &info->stuff.procinfo.procdata.child.status);
636 __put_user(p->utime,
637 &info->stuff.procinfo.procdata.child.utime);
638 __put_user(p->stime,
639 &info->stuff.procinfo.procdata.child.stime);
640 retval = 0;
641 if (p->real_parent != p->parent) {
642 write_lock_irq(&tasklist_lock);
643 remove_parent(p);
644 p->parent = p->real_parent;
645 add_parent(p, p->parent);
646 do_notify_parent(p, SIGCHLD);
647 write_unlock_irq(&tasklist_lock);
648 } else
649 release_task(p);
650 goto end_waitsys;
651 default:
652 continue;
653 }
654 tsk = next_thread(tsk);
655 }
656 read_unlock(&tasklist_lock);
657 if (flag) {
658 retval = 0;
659 if (options & W_NOHANG)
660 goto end_waitsys;
661 retval = -ERESTARTSYS;
662 if (signal_pending(current))
663 goto end_waitsys;
664 current->state = TASK_INTERRUPTIBLE;
665 schedule();
666 goto repeat;
667 }
668 retval = -ECHILD;
669end_waitsys:
670 current->state = TASK_RUNNING;
671 remove_wait_queue(&current->signal->wait_chldexit, &wait);
672
673out:
674 return retval;
675}
676
677struct irix5_context {
678 u32 flags;
679 u32 link;
680 u32 sigmask[4];
681 struct { u32 sp, size, flags; } stack;
682 int regs[36];
683 u32 fpregs[32];
684 u32 fpcsr;
685 u32 _unused0;
686 u32 _unused1[47];
687 u32 weird_graphics_thing;
688};
689
690asmlinkage int irix_getcontext(struct pt_regs *regs)
691{
692 int i, base = 0;
693 struct irix5_context *ctx;
694 unsigned long flags;
695
696 if (regs->regs[2] == 1000)
697 base = 1;
698 ctx = (struct irix5_context *) regs->regs[base + 4];
699
700#ifdef DEBUG_SIG
701 printk("[%s:%d] irix_getcontext(%p)\n",
702 current->comm, current->pid, ctx);
703#endif
704
705 if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)))
706 return -EFAULT;
707
708 __put_user(current->thread.irix_oldctx, &ctx->link);
709
710 __copy_to_user(&ctx->sigmask, &current->blocked, sizeof(irix_sigset_t));
711
712 /* XXX Do sigstack stuff someday... */
713 __put_user(0, &ctx->stack.sp);
714 __put_user(0, &ctx->stack.size);
715 __put_user(0, &ctx->stack.flags);
716
717 __put_user(0, &ctx->weird_graphics_thing);
718 __put_user(0, &ctx->regs[0]);
719 for (i = 1; i < 32; i++)
720 __put_user(regs->regs[i], &ctx->regs[i]);
721 __put_user(regs->lo, &ctx->regs[32]);
722 __put_user(regs->hi, &ctx->regs[33]);
723 __put_user(regs->cp0_cause, &ctx->regs[34]);
724 __put_user(regs->cp0_epc, &ctx->regs[35]);
725
726 flags = 0x0f;
727 if (!used_math()) {
728 flags &= ~(0x08);
729 } else {
730 /* XXX wheee... */
731 printk("Wheee, no code for saving IRIX FPU context yet.\n");
732 }
733 __put_user(flags, &ctx->flags);
734
735 return 0;
736}
737
738asmlinkage unsigned long irix_setcontext(struct pt_regs *regs)
739{
740 int error, base = 0;
741 struct irix5_context *ctx;
742
743 if(regs->regs[2] == 1000)
744 base = 1;
745 ctx = (struct irix5_context *) regs->regs[base + 4];
746
747#ifdef DEBUG_SIG
748 printk("[%s:%d] irix_setcontext(%p)\n",
749 current->comm, current->pid, ctx);
750#endif
751
752 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))) {
753 error = -EFAULT;
754 goto out;
755 }
756
757 if (ctx->flags & 0x02) {
758 /* XXX sigstack garbage, todo... */
759 printk("Wheee, cannot do sigstack stuff in setcontext\n");
760 }
761
762 if (ctx->flags & 0x04) {
763 int i;
764
765 /* XXX extra control block stuff... todo... */
766 for(i = 1; i < 32; i++)
767 regs->regs[i] = ctx->regs[i];
768 regs->lo = ctx->regs[32];
769 regs->hi = ctx->regs[33];
770 regs->cp0_epc = ctx->regs[35];
771 }
772
773 if (ctx->flags & 0x08) {
774 /* XXX fpu context, blah... */
775 printk("Wheee, cannot restore FPU context yet...\n");
776 }
777 current->thread.irix_oldctx = ctx->link;
778 error = regs->regs[2];
779
780out:
781 return error;
782}
783
784struct irix_sigstack { unsigned long sp; int status; };
785
786asmlinkage int irix_sigstack(struct irix_sigstack *new, struct irix_sigstack *old)
787{
788 int error = -EFAULT;
789
790#ifdef DEBUG_SIG
791 printk("[%s:%d] irix_sigstack(%p,%p)\n",
792 current->comm, current->pid, new, old);
793#endif
794 if(new) {
795 if (!access_ok(VERIFY_READ, new, sizeof(*new)))
796 goto out;
797 }
798
799 if(old) {
800 if (!access_ok(VERIFY_WRITE, old, sizeof(*old)))
801 goto out;
802 }
803 error = 0;
804
805out:
806 return error;
807}
808
809struct irix_sigaltstack { unsigned long sp; int size; int status; };
810
811asmlinkage int irix_sigaltstack(struct irix_sigaltstack *new,
812 struct irix_sigaltstack *old)
813{
814 int error = -EFAULT;
815
816#ifdef DEBUG_SIG
817 printk("[%s:%d] irix_sigaltstack(%p,%p)\n",
818 current->comm, current->pid, new, old);
819#endif
820 if (new) {
821 if (!access_ok(VERIFY_READ, new, sizeof(*new)))
822 goto out;
823 }
824
825 if (old) {
826 if (!access_ok(VERIFY_WRITE, old, sizeof(*old)))
827 goto out;
828 }
829 error = 0;
830
831out:
832 error = 0;
833
834 return error;
835}
836
837struct irix_procset {
838 int cmd, ltype, lid, rtype, rid;
839};
840
841asmlinkage int irix_sigsendset(struct irix_procset *pset, int sig)
842{
843 if (!access_ok(VERIFY_READ, pset, sizeof(*pset)))
844 return -EFAULT;
845
846#ifdef DEBUG_SIG
847 printk("[%s:%d] irix_sigsendset([%d,%d,%d,%d,%d],%d)\n",
848 current->comm, current->pid,
849 pset->cmd, pset->ltype, pset->lid, pset->rtype, pset->rid,
850 sig);
851#endif
852 return -EINVAL;
853}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
new file mode 100644
index 000000000000..43c00ac0b88d
--- /dev/null
+++ b/arch/mips/kernel/irq-msc01.c
@@ -0,0 +1,189 @@
1/*
2 * Copyright (c) 2004 MIPS Inc
3 * Author: chris@mips.com
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10#include <linux/module.h>
11#include <linux/interrupt.h>
12#include <linux/kernel.h>
13#include <asm/ptrace.h>
14#include <linux/sched.h>
15#include <linux/kernel_stat.h>
16#include <asm/io.h>
17#include <asm/irq.h>
18#include <asm/msc01_ic.h>
19
20static unsigned long _icctrl_msc;
21#define MSC01_IC_REG_BASE _icctrl_msc
22
23#define MSCIC_WRITE(reg, data) do { *(volatile u32 *)(reg) = data; } while (0)
24#define MSCIC_READ(reg, data) do { data = *(volatile u32 *)(reg); } while (0)
25
26static unsigned int irq_base;
27
28/* mask off an interrupt */
29static inline void mask_msc_irq(unsigned int irq)
30{
31 if (irq < (irq_base + 32))
32 MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
33 else
34 MSCIC_WRITE(MSC01_IC_DISH, 1<<(irq - irq_base - 32));
35}
36
37/* unmask an interrupt */
38static inline void unmask_msc_irq(unsigned int irq)
39{
40 if (irq < (irq_base + 32))
41 MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
42 else
43 MSCIC_WRITE(MSC01_IC_ENAH, 1<<(irq - irq_base - 32));
44}
45
46/*
47 * Enables the IRQ on SOC-it
48 */
49static void enable_msc_irq(unsigned int irq)
50{
51 unmask_msc_irq(irq);
52}
53
54/*
55 * Initialize the IRQ on SOC-it
56 */
57static unsigned int startup_msc_irq(unsigned int irq)
58{
59 unmask_msc_irq(irq);
60 return 0;
61}
62
63/*
64 * Disables the IRQ on SOC-it
65 */
66static void disable_msc_irq(unsigned int irq)
67{
68 mask_msc_irq(irq);
69}
70
71/*
72 * Masks and ACKs an IRQ
73 */
74static void level_mask_and_ack_msc_irq(unsigned int irq)
75{
76 mask_msc_irq(irq);
77 if (!cpu_has_ei)
78 MSCIC_WRITE(MSC01_IC_EOI, 0);
79}
80
81/*
82 * Masks and ACKs an IRQ
83 */
84static void edge_mask_and_ack_msc_irq(unsigned int irq)
85{
86 mask_msc_irq(irq);
87 if (!cpu_has_ei)
88 MSCIC_WRITE(MSC01_IC_EOI, 0);
89 else {
90 u32 r;
91 MSCIC_READ(MSC01_IC_SUP+irq*8, r);
92 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
93 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
94 }
95}
96
97/*
98 * End IRQ processing
99 */
100static void end_msc_irq(unsigned int irq)
101{
102 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
103 unmask_msc_irq(irq);
104}
105
106/*
107 * Interrupt handler for interrupts coming from SOC-it.
108 */
109void ll_msc_irq(struct pt_regs *regs)
110{
111 unsigned int irq;
112
113 /* read the interrupt vector register */
114 MSCIC_READ(MSC01_IC_VEC, irq);
115 if (irq < 64)
116 do_IRQ(irq + irq_base, regs);
117 else {
118 /* Ignore spurious interrupt */
119 }
120}
121
122void
123msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
124{
125 MSCIC_WRITE(MSC01_IC_RAMW,
126 (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
127}
128
129#define shutdown_msc_irq disable_msc_irq
130
131struct hw_interrupt_type msc_levelirq_type = {
132 "SOC-it-Level",
133 startup_msc_irq,
134 shutdown_msc_irq,
135 enable_msc_irq,
136 disable_msc_irq,
137 level_mask_and_ack_msc_irq,
138 end_msc_irq,
139 NULL
140};
141
142struct hw_interrupt_type msc_edgeirq_type = {
143 "SOC-it-Edge",
144 startup_msc_irq,
145 shutdown_msc_irq,
146 enable_msc_irq,
147 disable_msc_irq,
148 edge_mask_and_ack_msc_irq,
149 end_msc_irq,
150 NULL
151};
152
153
154void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
155{
156 extern void (*board_bind_eic_interrupt)(unsigned int irq, unsigned int regset);
157
158 _icctrl_msc = (unsigned long) ioremap (MIPS_MSC01_IC_REG_BASE, 0x40000);
159
160 /* Reset interrupt controller - initialises all registers to 0 */
161 MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT);
162
163 board_bind_eic_interrupt = &msc_bind_eic_interrupt;
164
165 for (; nirq >= 0; nirq--, imp++) {
166 int n = imp->im_irq;
167
168 switch (imp->im_type) {
169 case MSC01_IRQ_EDGE:
170 irq_desc[base+n].handler = &msc_edgeirq_type;
171 if (cpu_has_ei)
172 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
173 else
174 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
175 break;
176 case MSC01_IRQ_LEVEL:
177 irq_desc[base+n].handler = &msc_levelirq_type;
178 if (cpu_has_ei)
179 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
180 else
181 MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl);
182 }
183 }
184
185 irq_base = base;
186
187 MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); /* Enable interrupt generation */
188
189}
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
new file mode 100644
index 000000000000..088bbbc869e6
--- /dev/null
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -0,0 +1,161 @@
1/*
2 * Copyright 2002 Momentum Computer
3 * Author: mdharm@momenco.com
4 * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/kernel.h>
14#include <asm/ptrace.h>
15#include <linux/sched.h>
16#include <linux/kernel_stat.h>
17#include <asm/io.h>
18#include <asm/irq.h>
19#include <linux/mv643xx.h>
20
21static unsigned int irq_base;
22
23static inline int ls1bit32(unsigned int x)
24{
25 int b = 31, s;
26
27 s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
28 s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
29 s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
30 s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
31 s = 1; if (x << 1 == 0) s = 0; b -= s;
32
33 return b;
34}
35
36/* mask off an interrupt -- 1 is enable, 0 is disable */
37static inline void mask_mv64340_irq(unsigned int irq)
38{
39 uint32_t value;
40
41 if (irq < (irq_base + 32)) {
42 value = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
43 value &= ~(1 << (irq - irq_base));
44 MV_WRITE(MV64340_INTERRUPT0_MASK_0_LOW, value);
45 } else {
46 value = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
47 value &= ~(1 << (irq - irq_base - 32));
48 MV_WRITE(MV64340_INTERRUPT0_MASK_0_HIGH, value);
49 }
50}
51
52/* unmask an interrupt -- 1 is enable, 0 is disable */
53static inline void unmask_mv64340_irq(unsigned int irq)
54{
55 uint32_t value;
56
57 if (irq < (irq_base + 32)) {
58 value = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
59 value |= 1 << (irq - irq_base);
60 MV_WRITE(MV64340_INTERRUPT0_MASK_0_LOW, value);
61 } else {
62 value = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
63 value |= 1 << (irq - irq_base - 32);
64 MV_WRITE(MV64340_INTERRUPT0_MASK_0_HIGH, value);
65 }
66}
67
68/*
69 * Enables the IRQ on Marvell Chip
70 */
71static void enable_mv64340_irq(unsigned int irq)
72{
73 unmask_mv64340_irq(irq);
74}
75
76/*
77 * Initialize the IRQ on Marvell Chip
78 */
79static unsigned int startup_mv64340_irq(unsigned int irq)
80{
81 unmask_mv64340_irq(irq);
82 return 0;
83}
84
85/*
86 * Disables the IRQ on Marvell Chip
87 */
88static void disable_mv64340_irq(unsigned int irq)
89{
90 mask_mv64340_irq(irq);
91}
92
93/*
94 * Masks and ACKs an IRQ
95 */
96static void mask_and_ack_mv64340_irq(unsigned int irq)
97{
98 mask_mv64340_irq(irq);
99}
100
101/*
102 * End IRQ processing
103 */
104static void end_mv64340_irq(unsigned int irq)
105{
106 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
107 unmask_mv64340_irq(irq);
108}
109
110/*
111 * Interrupt handler for interrupts coming from the Marvell chip.
112 * It could be built in ethernet ports etc...
113 */
114void ll_mv64340_irq(struct pt_regs *regs)
115{
116 unsigned int irq_src_low, irq_src_high;
117 unsigned int irq_mask_low, irq_mask_high;
118
119 /* read the interrupt status registers */
120 irq_mask_low = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
121 irq_mask_high = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
122 irq_src_low = MV_READ(MV64340_MAIN_INTERRUPT_CAUSE_LOW);
123 irq_src_high = MV_READ(MV64340_MAIN_INTERRUPT_CAUSE_HIGH);
124
125 /* mask for just the interrupts we want */
126 irq_src_low &= irq_mask_low;
127 irq_src_high &= irq_mask_high;
128
129 if (irq_src_low)
130 do_IRQ(ls1bit32(irq_src_low) + irq_base, regs);
131 else
132 do_IRQ(ls1bit32(irq_src_high) + irq_base + 32, regs);
133}
134
135#define shutdown_mv64340_irq disable_mv64340_irq
136
137struct hw_interrupt_type mv64340_irq_type = {
138 "MV-64340",
139 startup_mv64340_irq,
140 shutdown_mv64340_irq,
141 enable_mv64340_irq,
142 disable_mv64340_irq,
143 mask_and_ack_mv64340_irq,
144 end_mv64340_irq,
145 NULL
146};
147
148void __init mv64340_irq_init(unsigned int base)
149{
150 int i;
151
152 /* Reset irq handlers pointers to NULL */
153 for (i = base; i < base + 64; i++) {
154 irq_desc[i].status = IRQ_DISABLED;
155 irq_desc[i].action = 0;
156 irq_desc[i].depth = 2;
157 irq_desc[i].handler = &mv64340_irq_type;
158 }
159
160 irq_base = base;
161}
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
new file mode 100644
index 000000000000..f5d779fd0355
--- /dev/null
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2003 Ralf Baechle
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * Handler for RM7000 extended interrupts. These are a non-standard
10 * feature so we handle them separately from standard interrupts.
11 */
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15
16#include <asm/irq_cpu.h>
17#include <asm/mipsregs.h>
18#include <asm/system.h>
19
20static int irq_base;
21
22static inline void unmask_rm7k_irq(unsigned int irq)
23{
24 set_c0_intcontrol(0x100 << (irq - irq_base));
25}
26
27static inline void mask_rm7k_irq(unsigned int irq)
28{
29 clear_c0_intcontrol(0x100 << (irq - irq_base));
30}
31
32static inline void rm7k_cpu_irq_enable(unsigned int irq)
33{
34 unsigned long flags;
35
36 local_irq_save(flags);
37 unmask_rm7k_irq(irq);
38 local_irq_restore(flags);
39}
40
41static void rm7k_cpu_irq_disable(unsigned int irq)
42{
43 unsigned long flags;
44
45 local_irq_save(flags);
46 mask_rm7k_irq(irq);
47 local_irq_restore(flags);
48}
49
50static unsigned int rm7k_cpu_irq_startup(unsigned int irq)
51{
52 rm7k_cpu_irq_enable(irq);
53
54 return 0;
55}
56
57#define rm7k_cpu_irq_shutdown rm7k_cpu_irq_disable
58
59/*
60 * While we ack the interrupt interrupts are disabled and thus we don't need
61 * to deal with concurrency issues. Same for rm7k_cpu_irq_end.
62 */
63static void rm7k_cpu_irq_ack(unsigned int irq)
64{
65 mask_rm7k_irq(irq);
66}
67
68static void rm7k_cpu_irq_end(unsigned int irq)
69{
70 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
71 unmask_rm7k_irq(irq);
72}
73
74static hw_irq_controller rm7k_irq_controller = {
75 "RM7000",
76 rm7k_cpu_irq_startup,
77 rm7k_cpu_irq_shutdown,
78 rm7k_cpu_irq_enable,
79 rm7k_cpu_irq_disable,
80 rm7k_cpu_irq_ack,
81 rm7k_cpu_irq_end,
82};
83
84void __init rm7k_cpu_irq_init(int base)
85{
86 int i;
87
88 clear_c0_intcontrol(0x00000f00); /* Mask all */
89
90 for (i = base; i < base + 4; i++) {
91 irq_desc[i].status = IRQ_DISABLED;
92 irq_desc[i].action = NULL;
93 irq_desc[i].depth = 1;
94 irq_desc[i].handler = &rm7k_irq_controller;
95 }
96
97 irq_base = base;
98}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
new file mode 100644
index 000000000000..bdd130296256
--- /dev/null
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright (C) 2003 Ralf Baechle
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * Handler for RM9000 extended interrupts. These are a non-standard
10 * feature so we handle them separately from standard interrupts.
11 */
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16
17#include <asm/irq_cpu.h>
18#include <asm/mipsregs.h>
19#include <asm/system.h>
20
21static int irq_base;
22
23static inline void unmask_rm9k_irq(unsigned int irq)
24{
25 set_c0_intcontrol(0x1000 << (irq - irq_base));
26}
27
28static inline void mask_rm9k_irq(unsigned int irq)
29{
30 clear_c0_intcontrol(0x1000 << (irq - irq_base));
31}
32
33static inline void rm9k_cpu_irq_enable(unsigned int irq)
34{
35 unsigned long flags;
36
37 local_irq_save(flags);
38 unmask_rm9k_irq(irq);
39 local_irq_restore(flags);
40}
41
42static void rm9k_cpu_irq_disable(unsigned int irq)
43{
44 unsigned long flags;
45
46 local_irq_save(flags);
47 mask_rm9k_irq(irq);
48 local_irq_restore(flags);
49}
50
51static unsigned int rm9k_cpu_irq_startup(unsigned int irq)
52{
53 rm9k_cpu_irq_enable(irq);
54
55 return 0;
56}
57
58#define rm9k_cpu_irq_shutdown rm9k_cpu_irq_disable
59
60/*
61 * Performance counter interrupts are global on all processors.
62 */
63static void local_rm9k_perfcounter_irq_startup(void *args)
64{
65 unsigned int irq = (unsigned int) args;
66
67 rm9k_cpu_irq_enable(irq);
68}
69
70static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq)
71{
72 on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1);
73
74 return 0;
75}
76
77static void local_rm9k_perfcounter_irq_shutdown(void *args)
78{
79 unsigned int irq = (unsigned int) args;
80 unsigned long flags;
81
82 local_irq_save(flags);
83 mask_rm9k_irq(irq);
84 local_irq_restore(flags);
85}
86
87static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
88{
89 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
90}
91
92
93/*
94 * While we ack the interrupt interrupts are disabled and thus we don't need
95 * to deal with concurrency issues. Same for rm9k_cpu_irq_end.
96 */
97static void rm9k_cpu_irq_ack(unsigned int irq)
98{
99 mask_rm9k_irq(irq);
100}
101
102static void rm9k_cpu_irq_end(unsigned int irq)
103{
104 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
105 unmask_rm9k_irq(irq);
106}
107
108static hw_irq_controller rm9k_irq_controller = {
109 "RM9000",
110 rm9k_cpu_irq_startup,
111 rm9k_cpu_irq_shutdown,
112 rm9k_cpu_irq_enable,
113 rm9k_cpu_irq_disable,
114 rm9k_cpu_irq_ack,
115 rm9k_cpu_irq_end,
116};
117
118static hw_irq_controller rm9k_perfcounter_irq = {
119 "RM9000",
120 rm9k_perfcounter_irq_startup,
121 rm9k_perfcounter_irq_shutdown,
122 rm9k_cpu_irq_enable,
123 rm9k_cpu_irq_disable,
124 rm9k_cpu_irq_ack,
125 rm9k_cpu_irq_end,
126};
127
128unsigned int rm9000_perfcount_irq;
129
130EXPORT_SYMBOL(rm9000_perfcount_irq);
131
132void __init rm9k_cpu_irq_init(int base)
133{
134 int i;
135
136 clear_c0_intcontrol(0x0000f000); /* Mask all */
137
138 for (i = base; i < base + 4; i++) {
139 irq_desc[i].status = IRQ_DISABLED;
140 irq_desc[i].action = NULL;
141 irq_desc[i].depth = 1;
142 irq_desc[i].handler = &rm9k_irq_controller;
143 }
144
145 rm9000_perfcount_irq = base + 1;
146 irq_desc[rm9000_perfcount_irq].handler = &rm9k_perfcounter_irq;
147
148 irq_base = base;
149}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
new file mode 100644
index 000000000000..441157a1f994
--- /dev/null
+++ b/arch/mips/kernel/irq.c
@@ -0,0 +1,140 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
7 *
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 */
11#include <linux/config.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/kernel_stat.h>
17#include <linux/module.h>
18#include <linux/proc_fs.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/random.h>
22#include <linux/sched.h>
23#include <linux/seq_file.h>
24#include <linux/kallsyms.h>
25
26#include <asm/atomic.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29
30/*
31 * 'what should we do if we get a hw irq event on an illegal vector'.
32 * each architecture has to answer this themselves.
33 */
34void ack_bad_irq(unsigned int irq)
35{
36 printk("unexpected IRQ # %d\n", irq);
37}
38
39atomic_t irq_err_count;
40
41#undef do_IRQ
42
43/*
44 * do_IRQ handles all normal device IRQ's (the special
45 * SMP cross-CPU interrupts have their own specific
46 * handlers).
47 */
48asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
49{
50 irq_enter();
51
52 __do_IRQ(irq, regs);
53
54 irq_exit();
55
56 return 1;
57}
58
59/*
60 * Generic, controller-independent functions:
61 */
62
63int show_interrupts(struct seq_file *p, void *v)
64{
65 int i = *(loff_t *) v, j;
66 struct irqaction * action;
67 unsigned long flags;
68
69 if (i == 0) {
70 seq_printf(p, " ");
71 for (j=0; j<NR_CPUS; j++)
72 if (cpu_online(j))
73 seq_printf(p, "CPU%d ",j);
74 seq_putc(p, '\n');
75 }
76
77 if (i < NR_IRQS) {
78 spin_lock_irqsave(&irq_desc[i].lock, flags);
79 action = irq_desc[i].action;
80 if (!action)
81 goto skip;
82 seq_printf(p, "%3d: ",i);
83#ifndef CONFIG_SMP
84 seq_printf(p, "%10u ", kstat_irqs(i));
85#else
86 for (j = 0; j < NR_CPUS; j++)
87 if (cpu_online(j))
88 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
89#endif
90 seq_printf(p, " %14s", irq_desc[i].handler->typename);
91 seq_printf(p, " %s", action->name);
92
93 for (action=action->next; action; action = action->next)
94 seq_printf(p, ", %s", action->name);
95
96 seq_putc(p, '\n');
97skip:
98 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
99 } else if (i == NR_IRQS) {
100 seq_putc(p, '\n');
101 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
102 }
103 return 0;
104}
105
106#ifdef CONFIG_KGDB
107extern void breakpoint(void);
108extern void set_debug_traps(void);
109
110static int kgdb_flag = 1;
111static int __init nokgdb(char *str)
112{
113 kgdb_flag = 0;
114 return 1;
115}
116__setup("nokgdb", nokgdb);
117#endif
118
119void __init init_IRQ(void)
120{
121 int i;
122
123 for (i = 0; i < NR_IRQS; i++) {
124 irq_desc[i].status = IRQ_DISABLED;
125 irq_desc[i].action = NULL;
126 irq_desc[i].depth = 1;
127 irq_desc[i].handler = &no_irq_type;
128 spin_lock_init(&irq_desc[i].lock);
129 }
130
131 arch_init_irq();
132
133#ifdef CONFIG_KGDB
134 if (kgdb_flag) {
135 printk("Wait for gdb client connection ...\n");
136 set_debug_traps();
137 breakpoint();
138 }
139#endif
140}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
new file mode 100644
index 000000000000..2b936cf1ef70
--- /dev/null
+++ b/arch/mips/kernel/irq_cpu.c
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2001 MontaVista Software Inc.
3 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
4 *
5 * Copyright (C) 2001 Ralf Baechle
6 *
7 * This file define the irq handler for MIPS CPU interrupts.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15/*
16 * Almost all MIPS CPUs define 8 interrupt sources. They are typically
17 * level triggered (i.e., cannot be cleared from CPU; must be cleared from
18 * device). The first two are software interrupts which we don't really
19 * use or support. The last one is usually the CPU timer interrupt if
20 * counter register is present or, for CPUs with an external FPU, by
21 * convention it's the FPU exception interrupt.
22 *
23 * Don't even think about using this on SMP. You have been warned.
24 *
25 * This file exports one global function:
26 * void mips_cpu_irq_init(int irq_base);
27 */
28#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/kernel.h>
31
32#include <asm/irq_cpu.h>
33#include <asm/mipsregs.h>
34#include <asm/system.h>
35
36static int mips_cpu_irq_base;
37
38static inline void unmask_mips_irq(unsigned int irq)
39{
40 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
41 set_c0_status(0x100 << (irq - mips_cpu_irq_base));
42}
43
44static inline void mask_mips_irq(unsigned int irq)
45{
46 clear_c0_status(0x100 << (irq - mips_cpu_irq_base));
47}
48
49static inline void mips_cpu_irq_enable(unsigned int irq)
50{
51 unsigned long flags;
52
53 local_irq_save(flags);
54 unmask_mips_irq(irq);
55 local_irq_restore(flags);
56}
57
58static void mips_cpu_irq_disable(unsigned int irq)
59{
60 unsigned long flags;
61
62 local_irq_save(flags);
63 mask_mips_irq(irq);
64 local_irq_restore(flags);
65}
66
67static unsigned int mips_cpu_irq_startup(unsigned int irq)
68{
69 mips_cpu_irq_enable(irq);
70
71 return 0;
72}
73
74#define mips_cpu_irq_shutdown mips_cpu_irq_disable
75
76/*
77 * While we ack the interrupt interrupts are disabled and thus we don't need
78 * to deal with concurrency issues. Same for mips_cpu_irq_end.
79 */
80static void mips_cpu_irq_ack(unsigned int irq)
81{
82 /* Only necessary for soft interrupts */
83 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
84
85 mask_mips_irq(irq);
86}
87
88static void mips_cpu_irq_end(unsigned int irq)
89{
90 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
91 unmask_mips_irq(irq);
92}
93
94static hw_irq_controller mips_cpu_irq_controller = {
95 "MIPS",
96 mips_cpu_irq_startup,
97 mips_cpu_irq_shutdown,
98 mips_cpu_irq_enable,
99 mips_cpu_irq_disable,
100 mips_cpu_irq_ack,
101 mips_cpu_irq_end,
102 NULL /* no affinity stuff for UP */
103};
104
105
106void __init mips_cpu_irq_init(int irq_base)
107{
108 int i;
109
110 for (i = irq_base; i < irq_base + 8; i++) {
111 irq_desc[i].status = IRQ_DISABLED;
112 irq_desc[i].action = NULL;
113 irq_desc[i].depth = 1;
114 irq_desc[i].handler = &mips_cpu_irq_controller;
115 }
116
117 mips_cpu_irq_base = irq_base;
118}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
new file mode 100644
index 000000000000..993abc868e54
--- /dev/null
+++ b/arch/mips/kernel/linux32.c
@@ -0,0 +1,1469 @@
1/*
2 * Conversion between 32-bit and 64-bit native system calls.
3 *
4 * Copyright (C) 2000 Silicon Graphics, Inc.
5 * Written by Ulf Carlsson (ulfc@engr.sgi.com)
6 * sys32_execve from ia64/ia32 code, Feb 2000, Kanoj Sarcar (kanoj@sgi.com)
7 */
8#include <linux/config.h>
9#include <linux/compiler.h>
10#include <linux/mm.h>
11#include <linux/errno.h>
12#include <linux/file.h>
13#include <linux/smp_lock.h>
14#include <linux/highuid.h>
15#include <linux/dirent.h>
16#include <linux/resource.h>
17#include <linux/highmem.h>
18#include <linux/time.h>
19#include <linux/times.h>
20#include <linux/poll.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/filter.h>
24#include <linux/shm.h>
25#include <linux/sem.h>
26#include <linux/msg.h>
27#include <linux/icmpv6.h>
28#include <linux/syscalls.h>
29#include <linux/sysctl.h>
30#include <linux/utime.h>
31#include <linux/utsname.h>
32#include <linux/personality.h>
33#include <linux/timex.h>
34#include <linux/dnotify.h>
35#include <linux/module.h>
36#include <linux/binfmts.h>
37#include <linux/security.h>
38#include <linux/compat.h>
39#include <linux/vfs.h>
40
41#include <net/sock.h>
42#include <net/scm.h>
43
44#include <asm/ipc.h>
45#include <asm/sim.h>
46#include <asm/uaccess.h>
47#include <asm/mmu_context.h>
48#include <asm/mman.h>
49
50/* Use this to get at 32-bit user passed pointers. */
51/* A() macro should be used for places where you e.g.
52 have some internal variable u32 and just want to get
53 rid of a compiler warning. AA() has to be used in
54 places where you want to convert a function argument
55 to 32bit pointer or when you e.g. access pt_regs
56 structure and want to consider 32bit registers only.
57 */
58#define A(__x) ((unsigned long)(__x))
59#define AA(__x) ((unsigned long)((int)__x))
60
61#ifdef __MIPSEB__
62#define merge_64(r1,r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
63#endif
64#ifdef __MIPSEL__
65#define merge_64(r1,r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
66#endif
67
68/*
69 * Revalidate the inode. This is required for proper NFS attribute caching.
70 */
71
72int cp_compat_stat(struct kstat *stat, struct compat_stat *statbuf)
73{
74 struct compat_stat tmp;
75
76 if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
77 return -EOVERFLOW;
78
79 memset(&tmp, 0, sizeof(tmp));
80 tmp.st_dev = new_encode_dev(stat->dev);
81 tmp.st_ino = stat->ino;
82 tmp.st_mode = stat->mode;
83 tmp.st_nlink = stat->nlink;
84 SET_UID(tmp.st_uid, stat->uid);
85 SET_GID(tmp.st_gid, stat->gid);
86 tmp.st_rdev = new_encode_dev(stat->rdev);
87 tmp.st_size = stat->size;
88 tmp.st_atime = stat->atime.tv_sec;
89 tmp.st_mtime = stat->mtime.tv_sec;
90 tmp.st_ctime = stat->ctime.tv_sec;
91#ifdef STAT_HAVE_NSEC
92 tmp.st_atime_nsec = stat->atime.tv_nsec;
93 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
94 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
95#endif
96 tmp.st_blocks = stat->blocks;
97 tmp.st_blksize = stat->blksize;
98 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
99}
100
101asmlinkage unsigned long
102sys32_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
103 unsigned long flags, unsigned long fd, unsigned long pgoff)
104{
105 struct file * file = NULL;
106 unsigned long error;
107
108 error = -EINVAL;
109 if (!(flags & MAP_ANONYMOUS)) {
110 error = -EBADF;
111 file = fget(fd);
112 if (!file)
113 goto out;
114 }
115 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
116
117 down_write(&current->mm->mmap_sem);
118 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
119 up_write(&current->mm->mmap_sem);
120 if (file)
121 fput(file);
122
123out:
124 return error;
125}
126
127
128asmlinkage int sys_truncate64(const char *path, unsigned int high,
129 unsigned int low)
130{
131 if ((int)high < 0)
132 return -EINVAL;
133 return sys_truncate(path, ((long) high << 32) | low);
134}
135
136asmlinkage int sys_ftruncate64(unsigned int fd, unsigned int high,
137 unsigned int low)
138{
139 if ((int)high < 0)
140 return -EINVAL;
141 return sys_ftruncate(fd, ((long) high << 32) | low);
142}
143
144/*
145 * sys_execve() executes a new program.
146 */
147asmlinkage int sys32_execve(nabi_no_regargs struct pt_regs regs)
148{
149 int error;
150 char * filename;
151
152 filename = getname(compat_ptr(regs.regs[4]));
153 error = PTR_ERR(filename);
154 if (IS_ERR(filename))
155 goto out;
156 error = compat_do_execve(filename, compat_ptr(regs.regs[5]),
157 compat_ptr(regs.regs[6]), &regs);
158 putname(filename);
159
160out:
161 return error;
162}
163
164struct dirent32 {
165 unsigned int d_ino;
166 unsigned int d_off;
167 unsigned short d_reclen;
168 char d_name[NAME_MAX + 1];
169};
170
171static void
172xlate_dirent(void *dirent64, void *dirent32, long n)
173{
174 long off;
175 struct dirent *dirp;
176 struct dirent32 *dirp32;
177
178 off = 0;
179 while (off < n) {
180 dirp = (struct dirent *)(dirent64 + off);
181 dirp32 = (struct dirent32 *)(dirent32 + off);
182 off += dirp->d_reclen;
183 dirp32->d_ino = dirp->d_ino;
184 dirp32->d_off = (unsigned int)dirp->d_off;
185 dirp32->d_reclen = dirp->d_reclen;
186 strncpy(dirp32->d_name, dirp->d_name, dirp->d_reclen - ((3 * 4) + 2));
187 }
188 return;
189}
190
191asmlinkage long
192sys32_getdents(unsigned int fd, void * dirent32, unsigned int count)
193{
194 long n;
195 void *dirent64;
196
197 dirent64 = (void *)((unsigned long)(dirent32 + (sizeof(long) - 1)) & ~(sizeof(long) - 1));
198 if ((n = sys_getdents(fd, dirent64, count - (dirent64 - dirent32))) < 0)
199 return(n);
200 xlate_dirent(dirent64, dirent32, n);
201 return(n);
202}
203
204asmlinkage int old_readdir(unsigned int fd, void * dirent, unsigned int count);
205
206asmlinkage int
207sys32_readdir(unsigned int fd, void * dirent32, unsigned int count)
208{
209 int n;
210 struct dirent dirent64;
211
212 if ((n = old_readdir(fd, &dirent64, count)) < 0)
213 return(n);
214 xlate_dirent(&dirent64, dirent32, dirent64.d_reclen);
215 return(n);
216}
217
218struct rusage32 {
219 struct compat_timeval ru_utime;
220 struct compat_timeval ru_stime;
221 int ru_maxrss;
222 int ru_ixrss;
223 int ru_idrss;
224 int ru_isrss;
225 int ru_minflt;
226 int ru_majflt;
227 int ru_nswap;
228 int ru_inblock;
229 int ru_oublock;
230 int ru_msgsnd;
231 int ru_msgrcv;
232 int ru_nsignals;
233 int ru_nvcsw;
234 int ru_nivcsw;
235};
236
237static int
238put_rusage (struct rusage32 *ru, struct rusage *r)
239{
240 int err;
241
242 if (!access_ok(VERIFY_WRITE, ru, sizeof *ru))
243 return -EFAULT;
244
245 err = __put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec);
246 err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec);
247 err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec);
248 err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec);
249 err |= __put_user (r->ru_maxrss, &ru->ru_maxrss);
250 err |= __put_user (r->ru_ixrss, &ru->ru_ixrss);
251 err |= __put_user (r->ru_idrss, &ru->ru_idrss);
252 err |= __put_user (r->ru_isrss, &ru->ru_isrss);
253 err |= __put_user (r->ru_minflt, &ru->ru_minflt);
254 err |= __put_user (r->ru_majflt, &ru->ru_majflt);
255 err |= __put_user (r->ru_nswap, &ru->ru_nswap);
256 err |= __put_user (r->ru_inblock, &ru->ru_inblock);
257 err |= __put_user (r->ru_oublock, &ru->ru_oublock);
258 err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd);
259 err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv);
260 err |= __put_user (r->ru_nsignals, &ru->ru_nsignals);
261 err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw);
262 err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw);
263
264 return err;
265}
266
267asmlinkage int
268sys32_wait4(compat_pid_t pid, unsigned int * stat_addr, int options,
269 struct rusage32 * ru)
270{
271 if (!ru)
272 return sys_wait4(pid, stat_addr, options, NULL);
273 else {
274 struct rusage r;
275 int ret;
276 unsigned int status;
277 mm_segment_t old_fs = get_fs();
278
279 set_fs(KERNEL_DS);
280 ret = sys_wait4(pid, stat_addr ? &status : NULL, options, &r);
281 set_fs(old_fs);
282 if (put_rusage (ru, &r)) return -EFAULT;
283 if (stat_addr && put_user (status, stat_addr))
284 return -EFAULT;
285 return ret;
286 }
287}
288
289asmlinkage int
290sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
291{
292 return sys32_wait4(pid, stat_addr, options, NULL);
293}
294
295struct sysinfo32 {
296 s32 uptime;
297 u32 loads[3];
298 u32 totalram;
299 u32 freeram;
300 u32 sharedram;
301 u32 bufferram;
302 u32 totalswap;
303 u32 freeswap;
304 u16 procs;
305 u32 totalhigh;
306 u32 freehigh;
307 u32 mem_unit;
308 char _f[8];
309};
310
311asmlinkage int sys32_sysinfo(struct sysinfo32 *info)
312{
313 struct sysinfo s;
314 int ret, err;
315 mm_segment_t old_fs = get_fs ();
316
317 set_fs (KERNEL_DS);
318 ret = sys_sysinfo(&s);
319 set_fs (old_fs);
320 err = put_user (s.uptime, &info->uptime);
321 err |= __put_user (s.loads[0], &info->loads[0]);
322 err |= __put_user (s.loads[1], &info->loads[1]);
323 err |= __put_user (s.loads[2], &info->loads[2]);
324 err |= __put_user (s.totalram, &info->totalram);
325 err |= __put_user (s.freeram, &info->freeram);
326 err |= __put_user (s.sharedram, &info->sharedram);
327 err |= __put_user (s.bufferram, &info->bufferram);
328 err |= __put_user (s.totalswap, &info->totalswap);
329 err |= __put_user (s.freeswap, &info->freeswap);
330 err |= __put_user (s.procs, &info->procs);
331 err |= __put_user (s.totalhigh, &info->totalhigh);
332 err |= __put_user (s.freehigh, &info->freehigh);
333 err |= __put_user (s.mem_unit, &info->mem_unit);
334 if (err)
335 return -EFAULT;
336 return ret;
337}
338
339#define RLIM_INFINITY32 0x7fffffff
340#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
341
342struct rlimit32 {
343 int rlim_cur;
344 int rlim_max;
345};
346
347#ifdef __MIPSEB__
348asmlinkage long sys32_truncate64(const char * path, unsigned long __dummy,
349 int length_hi, int length_lo)
350#endif
351#ifdef __MIPSEL__
352asmlinkage long sys32_truncate64(const char * path, unsigned long __dummy,
353 int length_lo, int length_hi)
354#endif
355{
356 loff_t length;
357
358 length = ((unsigned long) length_hi << 32) | (unsigned int) length_lo;
359
360 return sys_truncate(path, length);
361}
362
363#ifdef __MIPSEB__
364asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy,
365 int length_hi, int length_lo)
366#endif
367#ifdef __MIPSEL__
368asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy,
369 int length_lo, int length_hi)
370#endif
371{
372 loff_t length;
373
374 length = ((unsigned long) length_hi << 32) | (unsigned int) length_lo;
375
376 return sys_ftruncate(fd, length);
377}
378
379static inline long
380get_tv32(struct timeval *o, struct compat_timeval *i)
381{
382 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
383 (__get_user(o->tv_sec, &i->tv_sec) |
384 __get_user(o->tv_usec, &i->tv_usec)));
385}
386
387static inline long
388put_tv32(struct compat_timeval *o, struct timeval *i)
389{
390 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
391 (__put_user(i->tv_sec, &o->tv_sec) |
392 __put_user(i->tv_usec, &o->tv_usec)));
393}
394
395extern struct timezone sys_tz;
396
397asmlinkage int
398sys32_gettimeofday(struct compat_timeval *tv, struct timezone *tz)
399{
400 if (tv) {
401 struct timeval ktv;
402 do_gettimeofday(&ktv);
403 if (put_tv32(tv, &ktv))
404 return -EFAULT;
405 }
406 if (tz) {
407 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
408 return -EFAULT;
409 }
410 return 0;
411}
412
413static inline long get_ts32(struct timespec *o, struct compat_timeval *i)
414{
415 long usec;
416
417 if (!access_ok(VERIFY_READ, i, sizeof(*i)))
418 return -EFAULT;
419 if (__get_user(o->tv_sec, &i->tv_sec))
420 return -EFAULT;
421 if (__get_user(usec, &i->tv_usec))
422 return -EFAULT;
423 o->tv_nsec = usec * 1000;
424 return 0;
425}
426
427asmlinkage int
428sys32_settimeofday(struct compat_timeval *tv, struct timezone *tz)
429{
430 struct timespec kts;
431 struct timezone ktz;
432
433 if (tv) {
434 if (get_ts32(&kts, tv))
435 return -EFAULT;
436 }
437 if (tz) {
438 if (copy_from_user(&ktz, tz, sizeof(ktz)))
439 return -EFAULT;
440 }
441
442 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
443}
444
445asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high,
446 unsigned int offset_low, loff_t * result,
447 unsigned int origin)
448{
449 return sys_llseek(fd, offset_high, offset_low, result, origin);
450}
451
452/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
453 lseek back to original location. They fail just like lseek does on
454 non-seekable files. */
455
456asmlinkage ssize_t sys32_pread(unsigned int fd, char * buf,
457 size_t count, u32 unused, u64 a4, u64 a5)
458{
459 ssize_t ret;
460 struct file * file;
461 ssize_t (*read)(struct file *, char *, size_t, loff_t *);
462 loff_t pos;
463
464 ret = -EBADF;
465 file = fget(fd);
466 if (!file)
467 goto bad_file;
468 if (!(file->f_mode & FMODE_READ))
469 goto out;
470 pos = merge_64(a4, a5);
471 ret = rw_verify_area(READ, file, &pos, count);
472 if (ret)
473 goto out;
474 ret = -EINVAL;
475 if (!file->f_op || !(read = file->f_op->read))
476 goto out;
477 if (pos < 0)
478 goto out;
479 ret = -ESPIPE;
480 if (!(file->f_mode & FMODE_PREAD))
481 goto out;
482 ret = read(file, buf, count, &pos);
483 if (ret > 0)
484 dnotify_parent(file->f_dentry, DN_ACCESS);
485out:
486 fput(file);
487bad_file:
488 return ret;
489}
490
491asmlinkage ssize_t sys32_pwrite(unsigned int fd, const char * buf,
492 size_t count, u32 unused, u64 a4, u64 a5)
493{
494 ssize_t ret;
495 struct file * file;
496 ssize_t (*write)(struct file *, const char *, size_t, loff_t *);
497 loff_t pos;
498
499 ret = -EBADF;
500 file = fget(fd);
501 if (!file)
502 goto bad_file;
503 if (!(file->f_mode & FMODE_WRITE))
504 goto out;
505 pos = merge_64(a4, a5);
506 ret = rw_verify_area(WRITE, file, &pos, count);
507 if (ret)
508 goto out;
509 ret = -EINVAL;
510 if (!file->f_op || !(write = file->f_op->write))
511 goto out;
512 if (pos < 0)
513 goto out;
514
515 ret = -ESPIPE;
516 if (!(file->f_mode & FMODE_PWRITE))
517 goto out;
518
519 ret = write(file, buf, count, &pos);
520 if (ret > 0)
521 dnotify_parent(file->f_dentry, DN_MODIFY);
522out:
523 fput(file);
524bad_file:
525 return ret;
526}
527
528asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
529 struct compat_timespec *interval)
530{
531 struct timespec t;
532 int ret;
533 mm_segment_t old_fs = get_fs ();
534
535 set_fs (KERNEL_DS);
536 ret = sys_sched_rr_get_interval(pid, &t);
537 set_fs (old_fs);
538 if (put_user (t.tv_sec, &interval->tv_sec) ||
539 __put_user (t.tv_nsec, &interval->tv_nsec))
540 return -EFAULT;
541 return ret;
542}
543
544struct msgbuf32 { s32 mtype; char mtext[1]; };
545
546struct ipc_perm32
547{
548 key_t key;
549 compat_uid_t uid;
550 compat_gid_t gid;
551 compat_uid_t cuid;
552 compat_gid_t cgid;
553 compat_mode_t mode;
554 unsigned short seq;
555};
556
557struct ipc64_perm32 {
558 key_t key;
559 compat_uid_t uid;
560 compat_gid_t gid;
561 compat_uid_t cuid;
562 compat_gid_t cgid;
563 compat_mode_t mode;
564 unsigned short seq;
565 unsigned short __pad1;
566 unsigned int __unused1;
567 unsigned int __unused2;
568};
569
570struct semid_ds32 {
571 struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */
572 compat_time_t sem_otime; /* last semop time */
573 compat_time_t sem_ctime; /* last change time */
574 u32 sem_base; /* ptr to first semaphore in array */
575 u32 sem_pending; /* pending operations to be processed */
576 u32 sem_pending_last; /* last pending operation */
577 u32 undo; /* undo requests on this array */
578 unsigned short sem_nsems; /* no. of semaphores in array */
579};
580
581struct semid64_ds32 {
582 struct ipc64_perm32 sem_perm;
583 compat_time_t sem_otime;
584 compat_time_t sem_ctime;
585 unsigned int sem_nsems;
586 unsigned int __unused1;
587 unsigned int __unused2;
588};
589
590struct msqid_ds32
591{
592 struct ipc_perm32 msg_perm;
593 u32 msg_first;
594 u32 msg_last;
595 compat_time_t msg_stime;
596 compat_time_t msg_rtime;
597 compat_time_t msg_ctime;
598 u32 wwait;
599 u32 rwait;
600 unsigned short msg_cbytes;
601 unsigned short msg_qnum;
602 unsigned short msg_qbytes;
603 compat_ipc_pid_t msg_lspid;
604 compat_ipc_pid_t msg_lrpid;
605};
606
607struct msqid64_ds32 {
608 struct ipc64_perm32 msg_perm;
609 compat_time_t msg_stime;
610 unsigned int __unused1;
611 compat_time_t msg_rtime;
612 unsigned int __unused2;
613 compat_time_t msg_ctime;
614 unsigned int __unused3;
615 unsigned int msg_cbytes;
616 unsigned int msg_qnum;
617 unsigned int msg_qbytes;
618 compat_pid_t msg_lspid;
619 compat_pid_t msg_lrpid;
620 unsigned int __unused4;
621 unsigned int __unused5;
622};
623
624struct shmid_ds32 {
625 struct ipc_perm32 shm_perm;
626 int shm_segsz;
627 compat_time_t shm_atime;
628 compat_time_t shm_dtime;
629 compat_time_t shm_ctime;
630 compat_ipc_pid_t shm_cpid;
631 compat_ipc_pid_t shm_lpid;
632 unsigned short shm_nattch;
633};
634
635struct shmid64_ds32 {
636 struct ipc64_perm32 shm_perm;
637 compat_size_t shm_segsz;
638 compat_time_t shm_atime;
639 compat_time_t shm_dtime;
640 compat_time_t shm_ctime;
641 compat_pid_t shm_cpid;
642 compat_pid_t shm_lpid;
643 unsigned int shm_nattch;
644 unsigned int __unused1;
645 unsigned int __unused2;
646};
647
648struct ipc_kludge32 {
649 u32 msgp;
650 s32 msgtyp;
651};
652
653static int
654do_sys32_semctl(int first, int second, int third, void *uptr)
655{
656 union semun fourth;
657 u32 pad;
658 int err, err2;
659 struct semid64_ds s;
660 mm_segment_t old_fs;
661
662 if (!uptr)
663 return -EINVAL;
664 err = -EFAULT;
665 if (get_user (pad, (u32 *)uptr))
666 return err;
667 if ((third & ~IPC_64) == SETVAL)
668 fourth.val = (int)pad;
669 else
670 fourth.__pad = (void *)A(pad);
671 switch (third & ~IPC_64) {
672 case IPC_INFO:
673 case IPC_RMID:
674 case IPC_SET:
675 case SEM_INFO:
676 case GETVAL:
677 case GETPID:
678 case GETNCNT:
679 case GETZCNT:
680 case GETALL:
681 case SETVAL:
682 case SETALL:
683 err = sys_semctl (first, second, third, fourth);
684 break;
685
686 case IPC_STAT:
687 case SEM_STAT:
688 fourth.__pad = &s;
689 old_fs = get_fs();
690 set_fs(KERNEL_DS);
691 err = sys_semctl(first, second, third | IPC_64, fourth);
692 set_fs(old_fs);
693
694 if (third & IPC_64) {
695 struct semid64_ds32 *usp64 = (struct semid64_ds32 *) A(pad);
696
697 if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
698 err = -EFAULT;
699 break;
700 }
701 err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
702 err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
703 err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
704 err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
705 err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
706 err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
707 err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
708 err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
709 err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
710 err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
711 } else {
712 struct semid_ds32 *usp32 = (struct semid_ds32 *) A(pad);
713
714 if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
715 err = -EFAULT;
716 break;
717 }
718 err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
719 err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
720 err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
721 err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
722 err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
723 err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
724 err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
725 err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
726 err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
727 err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
728 }
729 if (err2)
730 err = -EFAULT;
731 break;
732
733 default:
734 err = - EINVAL;
735 break;
736 }
737
738 return err;
739}
740
741static int
742do_sys32_msgsnd (int first, int second, int third, void *uptr)
743{
744 struct msgbuf32 *up = (struct msgbuf32 *)uptr;
745 struct msgbuf *p;
746 mm_segment_t old_fs;
747 int err;
748
749 if (second < 0)
750 return -EINVAL;
751 p = kmalloc (second + sizeof (struct msgbuf)
752 + 4, GFP_USER);
753 if (!p)
754 return -ENOMEM;
755 err = get_user (p->mtype, &up->mtype);
756 if (err)
757 goto out;
758 err |= __copy_from_user (p->mtext, &up->mtext, second);
759 if (err)
760 goto out;
761 old_fs = get_fs ();
762 set_fs (KERNEL_DS);
763 err = sys_msgsnd (first, p, second, third);
764 set_fs (old_fs);
765out:
766 kfree (p);
767
768 return err;
769}
770
771static int
772do_sys32_msgrcv (int first, int second, int msgtyp, int third,
773 int version, void *uptr)
774{
775 struct msgbuf32 *up;
776 struct msgbuf *p;
777 mm_segment_t old_fs;
778 int err;
779
780 if (!version) {
781 struct ipc_kludge32 *uipck = (struct ipc_kludge32 *)uptr;
782 struct ipc_kludge32 ipck;
783
784 err = -EINVAL;
785 if (!uptr)
786 goto out;
787 err = -EFAULT;
788 if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge32)))
789 goto out;
790 uptr = (void *)AA(ipck.msgp);
791 msgtyp = ipck.msgtyp;
792 }
793
794 if (second < 0)
795 return -EINVAL;
796 err = -ENOMEM;
797 p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
798 if (!p)
799 goto out;
800 old_fs = get_fs ();
801 set_fs (KERNEL_DS);
802 err = sys_msgrcv (first, p, second + 4, msgtyp, third);
803 set_fs (old_fs);
804 if (err < 0)
805 goto free_then_out;
806 up = (struct msgbuf32 *)uptr;
807 if (put_user (p->mtype, &up->mtype) ||
808 __copy_to_user (&up->mtext, p->mtext, err))
809 err = -EFAULT;
810free_then_out:
811 kfree (p);
812out:
813 return err;
814}
815
816static int
817do_sys32_msgctl (int first, int second, void *uptr)
818{
819 int err = -EINVAL, err2;
820 struct msqid64_ds m;
821 struct msqid_ds32 *up32 = (struct msqid_ds32 *)uptr;
822 struct msqid64_ds32 *up64 = (struct msqid64_ds32 *)uptr;
823 mm_segment_t old_fs;
824
825 switch (second & ~IPC_64) {
826 case IPC_INFO:
827 case IPC_RMID:
828 case MSG_INFO:
829 err = sys_msgctl (first, second, (struct msqid_ds *)uptr);
830 break;
831
832 case IPC_SET:
833 if (second & IPC_64) {
834 if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) {
835 err = -EFAULT;
836 break;
837 }
838 err = __get_user(m.msg_perm.uid, &up64->msg_perm.uid);
839 err |= __get_user(m.msg_perm.gid, &up64->msg_perm.gid);
840 err |= __get_user(m.msg_perm.mode, &up64->msg_perm.mode);
841 err |= __get_user(m.msg_qbytes, &up64->msg_qbytes);
842 } else {
843 if (!access_ok(VERIFY_READ, up32, sizeof(*up32))) {
844 err = -EFAULT;
845 break;
846 }
847 err = __get_user(m.msg_perm.uid, &up32->msg_perm.uid);
848 err |= __get_user(m.msg_perm.gid, &up32->msg_perm.gid);
849 err |= __get_user(m.msg_perm.mode, &up32->msg_perm.mode);
850 err |= __get_user(m.msg_qbytes, &up32->msg_qbytes);
851 }
852 if (err)
853 break;
854 old_fs = get_fs();
855 set_fs(KERNEL_DS);
856 err = sys_msgctl(first, second | IPC_64, (struct msqid_ds *)&m);
857 set_fs(old_fs);
858 break;
859
860 case IPC_STAT:
861 case MSG_STAT:
862 old_fs = get_fs();
863 set_fs(KERNEL_DS);
864 err = sys_msgctl(first, second | IPC_64, (struct msqid_ds *)&m);
865 set_fs(old_fs);
866 if (second & IPC_64) {
867 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
868 err = -EFAULT;
869 break;
870 }
871 err2 = __put_user(m.msg_perm.key, &up64->msg_perm.key);
872 err2 |= __put_user(m.msg_perm.uid, &up64->msg_perm.uid);
873 err2 |= __put_user(m.msg_perm.gid, &up64->msg_perm.gid);
874 err2 |= __put_user(m.msg_perm.cuid, &up64->msg_perm.cuid);
875 err2 |= __put_user(m.msg_perm.cgid, &up64->msg_perm.cgid);
876 err2 |= __put_user(m.msg_perm.mode, &up64->msg_perm.mode);
877 err2 |= __put_user(m.msg_perm.seq, &up64->msg_perm.seq);
878 err2 |= __put_user(m.msg_stime, &up64->msg_stime);
879 err2 |= __put_user(m.msg_rtime, &up64->msg_rtime);
880 err2 |= __put_user(m.msg_ctime, &up64->msg_ctime);
881 err2 |= __put_user(m.msg_cbytes, &up64->msg_cbytes);
882 err2 |= __put_user(m.msg_qnum, &up64->msg_qnum);
883 err2 |= __put_user(m.msg_qbytes, &up64->msg_qbytes);
884 err2 |= __put_user(m.msg_lspid, &up64->msg_lspid);
885 err2 |= __put_user(m.msg_lrpid, &up64->msg_lrpid);
886 if (err2)
887 err = -EFAULT;
888 } else {
889 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
890 err = -EFAULT;
891 break;
892 }
893 err2 = __put_user(m.msg_perm.key, &up32->msg_perm.key);
894 err2 |= __put_user(m.msg_perm.uid, &up32->msg_perm.uid);
895 err2 |= __put_user(m.msg_perm.gid, &up32->msg_perm.gid);
896 err2 |= __put_user(m.msg_perm.cuid, &up32->msg_perm.cuid);
897 err2 |= __put_user(m.msg_perm.cgid, &up32->msg_perm.cgid);
898 err2 |= __put_user(m.msg_perm.mode, &up32->msg_perm.mode);
899 err2 |= __put_user(m.msg_perm.seq, &up32->msg_perm.seq);
900 err2 |= __put_user(m.msg_stime, &up32->msg_stime);
901 err2 |= __put_user(m.msg_rtime, &up32->msg_rtime);
902 err2 |= __put_user(m.msg_ctime, &up32->msg_ctime);
903 err2 |= __put_user(m.msg_cbytes, &up32->msg_cbytes);
904 err2 |= __put_user(m.msg_qnum, &up32->msg_qnum);
905 err2 |= __put_user(m.msg_qbytes, &up32->msg_qbytes);
906 err2 |= __put_user(m.msg_lspid, &up32->msg_lspid);
907 err2 |= __put_user(m.msg_lrpid, &up32->msg_lrpid);
908 if (err2)
909 err = -EFAULT;
910 }
911 break;
912 }
913
914 return err;
915}
916
917static int
918do_sys32_shmat (int first, int second, int third, int version, void *uptr)
919{
920 unsigned long raddr;
921 u32 *uaddr = (u32 *)A((u32)third);
922 int err = -EINVAL;
923
924 if (version == 1)
925 return err;
926 err = do_shmat (first, uptr, second, &raddr);
927 if (err)
928 return err;
929 err = put_user (raddr, uaddr);
930 return err;
931}
932
933struct shm_info32 {
934 int used_ids;
935 u32 shm_tot, shm_rss, shm_swp;
936 u32 swap_attempts, swap_successes;
937};
938
939static int
940do_sys32_shmctl (int first, int second, void *uptr)
941{
942 struct shmid64_ds32 *up64 = (struct shmid64_ds32 *)uptr;
943 struct shmid_ds32 *up32 = (struct shmid_ds32 *)uptr;
944 struct shm_info32 *uip = (struct shm_info32 *)uptr;
945 int err = -EFAULT, err2;
946 struct shmid64_ds s64;
947 mm_segment_t old_fs;
948 struct shm_info si;
949 struct shmid_ds s;
950
951 switch (second & ~IPC_64) {
952 case IPC_INFO:
953 second = IPC_INFO; /* So that we don't have to translate it */
954 case IPC_RMID:
955 case SHM_LOCK:
956 case SHM_UNLOCK:
957 err = sys_shmctl(first, second, (struct shmid_ds *)uptr);
958 break;
959 case IPC_SET:
960 if (second & IPC_64) {
961 err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
962 err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
963 err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
964 } else {
965 err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
966 err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
967 err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
968 }
969 if (err)
970 break;
971 old_fs = get_fs();
972 set_fs(KERNEL_DS);
973 err = sys_shmctl(first, second & ~IPC_64, &s);
974 set_fs(old_fs);
975 break;
976
977 case IPC_STAT:
978 case SHM_STAT:
979 old_fs = get_fs();
980 set_fs(KERNEL_DS);
981 err = sys_shmctl(first, second | IPC_64, (void *) &s64);
982 set_fs(old_fs);
983 if (err < 0)
984 break;
985 if (second & IPC_64) {
986 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
987 err = -EFAULT;
988 break;
989 }
990 err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
991 err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
992 err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
993 err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
994 err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
995 err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
996 err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
997 err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
998 err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
999 err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
1000 err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
1001 err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
1002 err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
1003 err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
1004 } else {
1005 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
1006 err = -EFAULT;
1007 break;
1008 }
1009 err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
1010 err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
1011 err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
1012 err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
1013 err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
1014 err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
1015 err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
1016 err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
1017 err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
1018 err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
1019 err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
1020 err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
1021 err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
1022 err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
1023 }
1024 if (err2)
1025 err = -EFAULT;
1026 break;
1027
1028 case SHM_INFO:
1029 old_fs = get_fs();
1030 set_fs(KERNEL_DS);
1031 err = sys_shmctl(first, second, (void *)&si);
1032 set_fs(old_fs);
1033 if (err < 0)
1034 break;
1035 err2 = put_user(si.used_ids, &uip->used_ids);
1036 err2 |= __put_user(si.shm_tot, &uip->shm_tot);
1037 err2 |= __put_user(si.shm_rss, &uip->shm_rss);
1038 err2 |= __put_user(si.shm_swp, &uip->shm_swp);
1039 err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
1040 err2 |= __put_user (si.swap_successes, &uip->swap_successes);
1041 if (err2)
1042 err = -EFAULT;
1043 break;
1044
1045 default:
1046 err = -EINVAL;
1047 break;
1048 }
1049
1050 return err;
1051}
1052
1053static int sys32_semtimedop(int semid, struct sembuf *tsems, int nsems,
1054 const struct compat_timespec *timeout32)
1055{
1056 struct compat_timespec t32;
1057 struct timespec *t64 = compat_alloc_user_space(sizeof(*t64));
1058
1059 if (copy_from_user(&t32, timeout32, sizeof(t32)))
1060 return -EFAULT;
1061
1062 if (put_user(t32.tv_sec, &t64->tv_sec) ||
1063 put_user(t32.tv_nsec, &t64->tv_nsec))
1064 return -EFAULT;
1065
1066 return sys_semtimedop(semid, tsems, nsems, t64);
1067}
1068
1069asmlinkage long
1070sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
1071{
1072 int version, err;
1073
1074 version = call >> 16; /* hack for backward compatibility */
1075 call &= 0xffff;
1076
1077 switch (call) {
1078 case SEMOP:
1079 /* struct sembuf is the same on 32 and 64bit :)) */
1080 err = sys_semtimedop (first, (struct sembuf *)AA(ptr), second,
1081 NULL);
1082 break;
1083 case SEMTIMEDOP:
1084 err = sys32_semtimedop (first, (struct sembuf *)AA(ptr), second,
1085 (const struct compat_timespec __user *)AA(fifth));
1086 break;
1087 case SEMGET:
1088 err = sys_semget (first, second, third);
1089 break;
1090 case SEMCTL:
1091 err = do_sys32_semctl (first, second, third,
1092 (void *)AA(ptr));
1093 break;
1094
1095 case MSGSND:
1096 err = do_sys32_msgsnd (first, second, third,
1097 (void *)AA(ptr));
1098 break;
1099 case MSGRCV:
1100 err = do_sys32_msgrcv (first, second, fifth, third,
1101 version, (void *)AA(ptr));
1102 break;
1103 case MSGGET:
1104 err = sys_msgget ((key_t) first, second);
1105 break;
1106 case MSGCTL:
1107 err = do_sys32_msgctl (first, second, (void *)AA(ptr));
1108 break;
1109
1110 case SHMAT:
1111 err = do_sys32_shmat (first, second, third,
1112 version, (void *)AA(ptr));
1113 break;
1114 case SHMDT:
1115 err = sys_shmdt ((char *)A(ptr));
1116 break;
1117 case SHMGET:
1118 err = sys_shmget (first, (unsigned)second, third);
1119 break;
1120 case SHMCTL:
1121 err = do_sys32_shmctl (first, second, (void *)AA(ptr));
1122 break;
1123 default:
1124 err = -EINVAL;
1125 break;
1126 }
1127
1128 return err;
1129}
1130
1131asmlinkage long sys32_shmat(int shmid, char __user *shmaddr,
1132 int shmflg, int32_t *addr)
1133{
1134 unsigned long raddr;
1135 int err;
1136
1137 err = do_shmat(shmid, shmaddr, shmflg, &raddr);
1138 if (err)
1139 return err;
1140
1141 return put_user(raddr, addr);
1142}
1143
1144struct sysctl_args32
1145{
1146 compat_caddr_t name;
1147 int nlen;
1148 compat_caddr_t oldval;
1149 compat_caddr_t oldlenp;
1150 compat_caddr_t newval;
1151 compat_size_t newlen;
1152 unsigned int __unused[4];
1153};
1154
1155#ifdef CONFIG_SYSCTL
1156
1157asmlinkage long sys32_sysctl(struct sysctl_args32 *args)
1158{
1159 struct sysctl_args32 tmp;
1160 int error;
1161 size_t oldlen, *oldlenp = NULL;
1162 unsigned long addr = (((long)&args->__unused[0]) + 7) & ~7;
1163
1164 if (copy_from_user(&tmp, args, sizeof(tmp)))
1165 return -EFAULT;
1166
1167 if (tmp.oldval && tmp.oldlenp) {
1168 /* Duh, this is ugly and might not work if sysctl_args
1169 is in read-only memory, but do_sysctl does indirectly
1170 a lot of uaccess in both directions and we'd have to
1171 basically copy the whole sysctl.c here, and
1172 glibc's __sysctl uses rw memory for the structure
1173 anyway. */
1174 if (get_user(oldlen, (u32 *)A(tmp.oldlenp)) ||
1175 put_user(oldlen, (size_t *)addr))
1176 return -EFAULT;
1177 oldlenp = (size_t *)addr;
1178 }
1179
1180 lock_kernel();
1181 error = do_sysctl((int *)A(tmp.name), tmp.nlen, (void *)A(tmp.oldval),
1182 oldlenp, (void *)A(tmp.newval), tmp.newlen);
1183 unlock_kernel();
1184 if (oldlenp) {
1185 if (!error) {
1186 if (get_user(oldlen, (size_t *)addr) ||
1187 put_user(oldlen, (u32 *)A(tmp.oldlenp)))
1188 error = -EFAULT;
1189 }
1190 copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
1191 }
1192 return error;
1193}
1194
1195#endif /* CONFIG_SYSCTL */
1196
1197asmlinkage long sys32_newuname(struct new_utsname * name)
1198{
1199 int ret = 0;
1200
1201 down_read(&uts_sem);
1202 if (copy_to_user(name,&system_utsname,sizeof *name))
1203 ret = -EFAULT;
1204 up_read(&uts_sem);
1205
1206 if (current->personality == PER_LINUX32 && !ret)
1207 if (copy_to_user(name->machine, "mips\0\0\0", 8))
1208 ret = -EFAULT;
1209
1210 return ret;
1211}
1212
1213asmlinkage int sys32_personality(unsigned long personality)
1214{
1215 int ret;
1216 if (current->personality == PER_LINUX32 && personality == PER_LINUX)
1217 personality = PER_LINUX32;
1218 ret = sys_personality(personality);
1219 if (ret == PER_LINUX32)
1220 ret = PER_LINUX;
1221 return ret;
1222}
1223
1224/* ustat compatibility */
1225struct ustat32 {
1226 compat_daddr_t f_tfree;
1227 compat_ino_t f_tinode;
1228 char f_fname[6];
1229 char f_fpack[6];
1230};
1231
1232extern asmlinkage long sys_ustat(dev_t dev, struct ustat * ubuf);
1233
1234asmlinkage int sys32_ustat(dev_t dev, struct ustat32 * ubuf32)
1235{
1236 int err;
1237 struct ustat tmp;
1238 struct ustat32 tmp32;
1239 mm_segment_t old_fs = get_fs();
1240
1241 set_fs(KERNEL_DS);
1242 err = sys_ustat(dev, &tmp);
1243 set_fs (old_fs);
1244
1245 if (err)
1246 goto out;
1247
1248 memset(&tmp32,0,sizeof(struct ustat32));
1249 tmp32.f_tfree = tmp.f_tfree;
1250 tmp32.f_tinode = tmp.f_tinode;
1251
1252 err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0;
1253
1254out:
1255 return err;
1256}
1257
1258/* Handle adjtimex compatibility. */
1259
1260struct timex32 {
1261 u32 modes;
1262 s32 offset, freq, maxerror, esterror;
1263 s32 status, constant, precision, tolerance;
1264 struct compat_timeval time;
1265 s32 tick;
1266 s32 ppsfreq, jitter, shift, stabil;
1267 s32 jitcnt, calcnt, errcnt, stbcnt;
1268 s32 :32; s32 :32; s32 :32; s32 :32;
1269 s32 :32; s32 :32; s32 :32; s32 :32;
1270 s32 :32; s32 :32; s32 :32; s32 :32;
1271};
1272
1273extern int do_adjtimex(struct timex *);
1274
1275asmlinkage int sys32_adjtimex(struct timex32 *utp)
1276{
1277 struct timex txc;
1278 int ret;
1279
1280 memset(&txc, 0, sizeof(struct timex));
1281
1282 if (get_user(txc.modes, &utp->modes) ||
1283 __get_user(txc.offset, &utp->offset) ||
1284 __get_user(txc.freq, &utp->freq) ||
1285 __get_user(txc.maxerror, &utp->maxerror) ||
1286 __get_user(txc.esterror, &utp->esterror) ||
1287 __get_user(txc.status, &utp->status) ||
1288 __get_user(txc.constant, &utp->constant) ||
1289 __get_user(txc.precision, &utp->precision) ||
1290 __get_user(txc.tolerance, &utp->tolerance) ||
1291 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
1292 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
1293 __get_user(txc.tick, &utp->tick) ||
1294 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
1295 __get_user(txc.jitter, &utp->jitter) ||
1296 __get_user(txc.shift, &utp->shift) ||
1297 __get_user(txc.stabil, &utp->stabil) ||
1298 __get_user(txc.jitcnt, &utp->jitcnt) ||
1299 __get_user(txc.calcnt, &utp->calcnt) ||
1300 __get_user(txc.errcnt, &utp->errcnt) ||
1301 __get_user(txc.stbcnt, &utp->stbcnt))
1302 return -EFAULT;
1303
1304 ret = do_adjtimex(&txc);
1305
1306 if (put_user(txc.modes, &utp->modes) ||
1307 __put_user(txc.offset, &utp->offset) ||
1308 __put_user(txc.freq, &utp->freq) ||
1309 __put_user(txc.maxerror, &utp->maxerror) ||
1310 __put_user(txc.esterror, &utp->esterror) ||
1311 __put_user(txc.status, &utp->status) ||
1312 __put_user(txc.constant, &utp->constant) ||
1313 __put_user(txc.precision, &utp->precision) ||
1314 __put_user(txc.tolerance, &utp->tolerance) ||
1315 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
1316 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
1317 __put_user(txc.tick, &utp->tick) ||
1318 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
1319 __put_user(txc.jitter, &utp->jitter) ||
1320 __put_user(txc.shift, &utp->shift) ||
1321 __put_user(txc.stabil, &utp->stabil) ||
1322 __put_user(txc.jitcnt, &utp->jitcnt) ||
1323 __put_user(txc.calcnt, &utp->calcnt) ||
1324 __put_user(txc.errcnt, &utp->errcnt) ||
1325 __put_user(txc.stbcnt, &utp->stbcnt))
1326 ret = -EFAULT;
1327
1328 return ret;
1329}
1330
1331asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t *offset,
1332 s32 count)
1333{
1334 mm_segment_t old_fs = get_fs();
1335 int ret;
1336 off_t of;
1337
1338 if (offset && get_user(of, offset))
1339 return -EFAULT;
1340
1341 set_fs(KERNEL_DS);
1342 ret = sys_sendfile(out_fd, in_fd, offset ? &of : NULL, count);
1343 set_fs(old_fs);
1344
1345 if (offset && put_user(of, offset))
1346 return -EFAULT;
1347
1348 return ret;
1349}
1350
1351asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
1352 size_t count)
1353{
1354 return sys_readahead(fd, merge_64(a2, a3), count);
1355}
1356
1357/* Argument list sizes for sys_socketcall */
1358#define AL(x) ((x) * sizeof(unsigned int))
1359static unsigned char socketcall_nargs[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
1360 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
1361 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)};
1362#undef AL
1363
1364/*
1365 * System call vectors.
1366 *
1367 * Argument checking cleaned up. Saved 20% in size.
1368 * This function doesn't need to set the kernel lock because
1369 * it is set by the callees.
1370 */
1371
1372asmlinkage long sys32_socketcall(int call, unsigned int *args32)
1373{
1374 unsigned int a[6];
1375 unsigned int a0,a1;
1376 int err;
1377
1378 extern asmlinkage long sys_socket(int family, int type, int protocol);
1379 extern asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
1380 extern asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen);
1381 extern asmlinkage long sys_listen(int fd, int backlog);
1382 extern asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen);
1383 extern asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len);
1384 extern asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len);
1385 extern asmlinkage long sys_socketpair(int family, int type, int protocol, int __user *usockvec);
1386 extern asmlinkage long sys_send(int fd, void __user * buff, size_t len, unsigned flags);
1387 extern asmlinkage long sys_sendto(int fd, void __user * buff, size_t len, unsigned flags,
1388 struct sockaddr __user *addr, int addr_len);
1389 extern asmlinkage long sys_recv(int fd, void __user * ubuf, size_t size, unsigned flags);
1390 extern asmlinkage long sys_recvfrom(int fd, void __user * ubuf, size_t size, unsigned flags,
1391 struct sockaddr __user *addr, int __user *addr_len);
1392 extern asmlinkage long sys_shutdown(int fd, int how);
1393 extern asmlinkage long sys_setsockopt(int fd, int level, int optname, char __user *optval, int optlen);
1394 extern asmlinkage long sys_getsockopt(int fd, int level, int optname, char __user *optval, int *optlen);
1395 extern asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
1396 extern asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned int flags);
1397
1398
1399 if(call<1||call>SYS_RECVMSG)
1400 return -EINVAL;
1401
1402 /* copy_from_user should be SMP safe. */
1403 if (copy_from_user(a, args32, socketcall_nargs[call]))
1404 return -EFAULT;
1405
1406 a0=a[0];
1407 a1=a[1];
1408
1409 switch(call)
1410 {
1411 case SYS_SOCKET:
1412 err = sys_socket(a0,a1,a[2]);
1413 break;
1414 case SYS_BIND:
1415 err = sys_bind(a0,(struct sockaddr __user *)A(a1), a[2]);
1416 break;
1417 case SYS_CONNECT:
1418 err = sys_connect(a0, (struct sockaddr __user *)A(a1), a[2]);
1419 break;
1420 case SYS_LISTEN:
1421 err = sys_listen(a0,a1);
1422 break;
1423 case SYS_ACCEPT:
1424 err = sys_accept(a0,(struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
1425 break;
1426 case SYS_GETSOCKNAME:
1427 err = sys_getsockname(a0,(struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
1428 break;
1429 case SYS_GETPEERNAME:
1430 err = sys_getpeername(a0, (struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
1431 break;
1432 case SYS_SOCKETPAIR:
1433 err = sys_socketpair(a0,a1, a[2], (int __user *)A(a[3]));
1434 break;
1435 case SYS_SEND:
1436 err = sys_send(a0, (void __user *)A(a1), a[2], a[3]);
1437 break;
1438 case SYS_SENDTO:
1439 err = sys_sendto(a0,(void __user *)A(a1), a[2], a[3],
1440 (struct sockaddr __user *)A(a[4]), a[5]);
1441 break;
1442 case SYS_RECV:
1443 err = sys_recv(a0, (void __user *)A(a1), a[2], a[3]);
1444 break;
1445 case SYS_RECVFROM:
1446 err = sys_recvfrom(a0, (void __user *)A(a1), a[2], a[3],
1447 (struct sockaddr __user *)A(a[4]), (int __user *)A(a[5]));
1448 break;
1449 case SYS_SHUTDOWN:
1450 err = sys_shutdown(a0,a1);
1451 break;
1452 case SYS_SETSOCKOPT:
1453 err = sys_setsockopt(a0, a1, a[2], (char __user *)A(a[3]), a[4]);
1454 break;
1455 case SYS_GETSOCKOPT:
1456 err = sys_getsockopt(a0, a1, a[2], (char __user *)A(a[3]), (int __user *)A(a[4]));
1457 break;
1458 case SYS_SENDMSG:
1459 err = sys_sendmsg(a0, (struct msghdr __user *) A(a1), a[2]);
1460 break;
1461 case SYS_RECVMSG:
1462 err = sys_recvmsg(a0, (struct msghdr __user *) A(a1), a[2]);
1463 break;
1464 default:
1465 err = -EINVAL;
1466 break;
1467 }
1468 return err;
1469}
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
new file mode 100644
index 000000000000..eed29fc9dc82
--- /dev/null
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -0,0 +1,67 @@
1/*
2 * Export MIPS-specific functions needed for loadable modules.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
9 * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
10 */
11#include <linux/config.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
14#include <asm/checksum.h>
15#include <asm/pgtable.h>
16#include <asm/uaccess.h>
17
18extern void *__bzero(void *__s, size_t __count);
19extern long __strncpy_from_user_nocheck_asm(char *__to,
20 const char *__from, long __len);
21extern long __strncpy_from_user_asm(char *__to, const char *__from,
22 long __len);
23extern long __strlen_user_nocheck_asm(const char *s);
24extern long __strlen_user_asm(const char *s);
25extern long __strnlen_user_nocheck_asm(const char *s);
26extern long __strnlen_user_asm(const char *s);
27
28/*
29 * String functions
30 */
31EXPORT_SYMBOL(memchr);
32EXPORT_SYMBOL(memcmp);
33EXPORT_SYMBOL(memset);
34EXPORT_SYMBOL(memcpy);
35EXPORT_SYMBOL(memmove);
36EXPORT_SYMBOL(strcat);
37EXPORT_SYMBOL(strchr);
38#ifdef CONFIG_MIPS64
39EXPORT_SYMBOL(strncmp);
40#endif
41EXPORT_SYMBOL(strlen);
42EXPORT_SYMBOL(strpbrk);
43EXPORT_SYMBOL(strncat);
44EXPORT_SYMBOL(strnlen);
45EXPORT_SYMBOL(strrchr);
46EXPORT_SYMBOL(strstr);
47
48EXPORT_SYMBOL(kernel_thread);
49
50/*
51 * Userspace access stuff.
52 */
53EXPORT_SYMBOL(__copy_user);
54EXPORT_SYMBOL(__bzero);
55EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
56EXPORT_SYMBOL(__strncpy_from_user_asm);
57EXPORT_SYMBOL(__strlen_user_nocheck_asm);
58EXPORT_SYMBOL(__strlen_user_asm);
59EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
60EXPORT_SYMBOL(__strnlen_user_asm);
61
62EXPORT_SYMBOL(csum_partial);
63
64EXPORT_SYMBOL(invalid_pte_table);
65#ifdef CONFIG_GENERIC_IRQ_PROBE
66EXPORT_SYMBOL(probe_irq_mask);
67#endif
diff --git a/arch/mips/kernel/module-elf32.c b/arch/mips/kernel/module-elf32.c
new file mode 100644
index 000000000000..ffd216d6d6dc
--- /dev/null
+++ b/arch/mips/kernel/module-elf32.c
@@ -0,0 +1,250 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 * Copyright (C) 2001 Rusty Russell.
17 * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
18 */
19
20#undef DEBUG
21
22#include <linux/moduleloader.h>
23#include <linux/elf.h>
24#include <linux/vmalloc.h>
25#include <linux/slab.h>
26#include <linux/fs.h>
27#include <linux/string.h>
28#include <linux/kernel.h>
29
30struct mips_hi16 {
31 struct mips_hi16 *next;
32 Elf32_Addr *addr;
33 Elf32_Addr value;
34};
35
36static struct mips_hi16 *mips_hi16_list;
37
38void *module_alloc(unsigned long size)
39{
40 if (size == 0)
41 return NULL;
42 return vmalloc(size);
43}
44
45
46/* Free memory returned from module_alloc */
47void module_free(struct module *mod, void *module_region)
48{
49 vfree(module_region);
50 /* FIXME: If module_region == mod->init_region, trim exception
51 table entries. */
52}
53
54int module_frob_arch_sections(Elf_Ehdr *hdr,
55 Elf_Shdr *sechdrs,
56 char *secstrings,
57 struct module *mod)
58{
59 return 0;
60}
61
62static int apply_r_mips_none(struct module *me, uint32_t *location,
63 Elf32_Addr v)
64{
65 return 0;
66}
67
68static int apply_r_mips_32(struct module *me, uint32_t *location,
69 Elf32_Addr v)
70{
71 *location += v;
72
73 return 0;
74}
75
76static int apply_r_mips_26(struct module *me, uint32_t *location,
77 Elf32_Addr v)
78{
79 if (v % 4) {
80 printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
81 return -ENOEXEC;
82 }
83
84 if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
85 printk(KERN_ERR
86 "module %s: relocation overflow\n",
87 me->name);
88 return -ENOEXEC;
89 }
90
91 *location = (*location & ~0x03ffffff) |
92 ((*location + (v >> 2)) & 0x03ffffff);
93
94 return 0;
95}
96
97static int apply_r_mips_hi16(struct module *me, uint32_t *location,
98 Elf32_Addr v)
99{
100 struct mips_hi16 *n;
101
102 /*
103 * We cannot relocate this one now because we don't know the value of
104 * the carry we need to add. Save the information, and let LO16 do the
105 * actual relocation.
106 */
107 n = kmalloc(sizeof *n, GFP_KERNEL);
108 if (!n)
109 return -ENOMEM;
110
111 n->addr = location;
112 n->value = v;
113 n->next = mips_hi16_list;
114 mips_hi16_list = n;
115
116 return 0;
117}
118
119static int apply_r_mips_lo16(struct module *me, uint32_t *location,
120 Elf32_Addr v)
121{
122 unsigned long insnlo = *location;
123 Elf32_Addr val, vallo;
124
125 /* Sign extend the addend we extract from the lo insn. */
126 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
127
128 if (mips_hi16_list != NULL) {
129 struct mips_hi16 *l;
130
131 l = mips_hi16_list;
132 while (l != NULL) {
133 struct mips_hi16 *next;
134 unsigned long insn;
135
136 /*
137 * The value for the HI16 had best be the same.
138 */
139 if (v != l->value)
140 goto out_danger;
141
142 /*
143 * Do the HI16 relocation. Note that we actually don't
144 * need to know anything about the LO16 itself, except
145 * where to find the low 16 bits of the addend needed
146 * by the LO16.
147 */
148 insn = *l->addr;
149 val = ((insn & 0xffff) << 16) + vallo;
150 val += v;
151
152 /*
153 * Account for the sign extension that will happen in
154 * the low bits.
155 */
156 val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
157
158 insn = (insn & ~0xffff) | val;
159 *l->addr = insn;
160
161 next = l->next;
162 kfree(l);
163 l = next;
164 }
165
166 mips_hi16_list = NULL;
167 }
168
169 /*
170 * Ok, we're done with the HI16 relocs. Now deal with the LO16.
171 */
172 val = v + vallo;
173 insnlo = (insnlo & ~0xffff) | (val & 0xffff);
174 *location = insnlo;
175
176 return 0;
177
178out_danger:
179 printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
180
181 return -ENOEXEC;
182}
183
184static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
185 Elf32_Addr v) = {
186 [R_MIPS_NONE] = apply_r_mips_none,
187 [R_MIPS_32] = apply_r_mips_32,
188 [R_MIPS_26] = apply_r_mips_26,
189 [R_MIPS_HI16] = apply_r_mips_hi16,
190 [R_MIPS_LO16] = apply_r_mips_lo16
191};
192
193int apply_relocate(Elf32_Shdr *sechdrs,
194 const char *strtab,
195 unsigned int symindex,
196 unsigned int relsec,
197 struct module *me)
198{
199 Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
200 Elf32_Sym *sym;
201 uint32_t *location;
202 unsigned int i;
203 Elf32_Addr v;
204 int res;
205
206 pr_debug("Applying relocate section %u to %u\n", relsec,
207 sechdrs[relsec].sh_info);
208
209 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
210 Elf32_Word r_info = rel[i].r_info;
211
212 /* This is where to make the change */
213 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
214 + rel[i].r_offset;
215 /* This is the symbol it is referring to */
216 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
217 + ELF32_R_SYM(r_info);
218 if (!sym->st_value) {
219 printk(KERN_WARNING "%s: Unknown symbol %s\n",
220 me->name, strtab + sym->st_name);
221 return -ENOENT;
222 }
223
224 v = sym->st_value;
225
226 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
227 if (res)
228 return res;
229 }
230
231 return 0;
232}
233
234int apply_relocate_add(Elf32_Shdr *sechdrs,
235 const char *strtab,
236 unsigned int symindex,
237 unsigned int relsec,
238 struct module *me)
239{
240 /*
241 * Current binutils always generate .rela relocations. Keep smiling
242 * if it's empty, abort otherwise.
243 */
244 if (!sechdrs[relsec].sh_size)
245 return 0;
246
247 printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
248 me->name);
249 return -ENOEXEC;
250}
diff --git a/arch/mips/kernel/module-elf64.c b/arch/mips/kernel/module-elf64.c
new file mode 100644
index 000000000000..e804792ee1ee
--- /dev/null
+++ b/arch/mips/kernel/module-elf64.c
@@ -0,0 +1,274 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 * Copyright (C) 2001 Rusty Russell.
17 * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
18 */
19
20#undef DEBUG
21
22#include <linux/moduleloader.h>
23#include <linux/elf.h>
24#include <linux/vmalloc.h>
25#include <linux/slab.h>
26#include <linux/fs.h>
27#include <linux/string.h>
28#include <linux/kernel.h>
29
30struct mips_hi16 {
31 struct mips_hi16 *next;
32 Elf32_Addr *addr;
33 Elf64_Addr value;
34};
35
36static struct mips_hi16 *mips_hi16_list;
37
38void *module_alloc(unsigned long size)
39{
40 if (size == 0)
41 return NULL;
42 return vmalloc(size);
43}
44
45
46/* Free memory returned from module_alloc */
47void module_free(struct module *mod, void *module_region)
48{
49 vfree(module_region);
50 /* FIXME: If module_region == mod->init_region, trim exception
51 table entries. */
52}
53
54int module_frob_arch_sections(Elf_Ehdr *hdr,
55 Elf_Shdr *sechdrs,
56 char *secstrings,
57 struct module *mod)
58{
59 return 0;
60}
61
62int apply_relocate(Elf64_Shdr *sechdrs,
63 const char *strtab,
64 unsigned int symindex,
65 unsigned int relsec,
66 struct module *me)
67{
68 /*
69 * We don't want to deal with REL relocations - RELA is so much saner.
70 */
71 if (!sechdrs[relsec].sh_size)
72 return 0;
73
74 printk(KERN_ERR "module %s: REL relocation unsupported\n",
75 me->name);
76 return -ENOEXEC;
77}
78
79static int apply_r_mips_none(struct module *me, uint32_t *location,
80 Elf64_Addr v)
81{
82 return 0;
83}
84
85static int apply_r_mips_32(struct module *me, uint32_t *location,
86 Elf64_Addr v)
87{
88 *location = v;
89
90 return 0;
91}
92
93static int apply_r_mips_26(struct module *me, uint32_t *location,
94 Elf64_Addr v)
95{
96 if (v % 4) {
97 printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
98 return -ENOEXEC;
99 }
100
101 if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
102 printk(KERN_ERR
103 "module %s: relocation overflow\n",
104 me->name);
105 return -ENOEXEC;
106 }
107
108 *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
109
110 return 0;
111}
112
113static int apply_r_mips_hi16(struct module *me, uint32_t *location,
114 Elf64_Addr v)
115{
116 struct mips_hi16 *n;
117
118 /*
119 * We cannot relocate this one now because we don't know the value of
120 * the carry we need to add. Save the information, and let LO16 do the
121 * actual relocation.
122 */
123 n = kmalloc(sizeof *n, GFP_KERNEL);
124 if (!n)
125 return -ENOMEM;
126
127 n->addr = location;
128 n->value = v;
129 n->next = mips_hi16_list;
130 mips_hi16_list = n;
131
132 return 0;
133}
134
135static int apply_r_mips_lo16(struct module *me, uint32_t *location,
136 Elf64_Addr v)
137{
138 unsigned long insnlo = *location;
139 Elf32_Addr val, vallo;
140
141 /* Sign extend the addend we extract from the lo insn. */
142 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
143
144 if (mips_hi16_list != NULL) {
145 struct mips_hi16 *l;
146
147 l = mips_hi16_list;
148 while (l != NULL) {
149 struct mips_hi16 *next;
150 unsigned long insn;
151
152 /*
153 * The value for the HI16 had best be the same.
154 */
155 if (v != l->value)
156 goto out_danger;
157
158 /*
159 * Do the HI16 relocation. Note that we actually don't
160 * need to know anything about the LO16 itself, except
161 * where to find the low 16 bits of the addend needed
162 * by the LO16.
163 */
164 insn = *l->addr;
165 val = ((insn & 0xffff) << 16) + vallo;
166 val += v;
167
168 /*
169 * Account for the sign extension that will happen in
170 * the low bits.
171 */
172 val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
173
174 insn = (insn & ~0xffff) | val;
175 *l->addr = insn;
176
177 next = l->next;
178 kfree(l);
179 l = next;
180 }
181
182 mips_hi16_list = NULL;
183 }
184
185 /*
186 * Ok, we're done with the HI16 relocs. Now deal with the LO16.
187 */
188 insnlo = (insnlo & ~0xffff) | (v & 0xffff);
189 *location = insnlo;
190
191 return 0;
192
193out_danger:
194 printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
195
196 return -ENOEXEC;
197}
198
199static int apply_r_mips_64(struct module *me, uint32_t *location,
200 Elf64_Addr v)
201{
202 *(uint64_t *) location = v;
203
204 return 0;
205}
206
207
208static int apply_r_mips_higher(struct module *me, uint32_t *location,
209 Elf64_Addr v)
210{
211 *location = (*location & 0xffff0000) |
212 ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
213
214 return 0;
215}
216
217static int apply_r_mips_highest(struct module *me, uint32_t *location,
218 Elf64_Addr v)
219{
220 *location = (*location & 0xffff0000) |
221 ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
222
223 return 0;
224}
225
226static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
227 Elf64_Addr v) = {
228 [R_MIPS_NONE] = apply_r_mips_none,
229 [R_MIPS_32] = apply_r_mips_32,
230 [R_MIPS_26] = apply_r_mips_26,
231 [R_MIPS_HI16] = apply_r_mips_hi16,
232 [R_MIPS_LO16] = apply_r_mips_lo16,
233 [R_MIPS_64] = apply_r_mips_64,
234 [R_MIPS_HIGHER] = apply_r_mips_higher,
235 [R_MIPS_HIGHEST] = apply_r_mips_highest
236};
237
238int apply_relocate_add(Elf64_Shdr *sechdrs,
239 const char *strtab,
240 unsigned int symindex,
241 unsigned int relsec,
242 struct module *me)
243{
244 Elf64_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
245 Elf64_Sym *sym;
246 uint32_t *location;
247 unsigned int i;
248 Elf64_Addr v;
249 int res;
250
251 pr_debug("Applying relocate section %u to %u\n", relsec,
252 sechdrs[relsec].sh_info);
253
254 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
255 /* This is where to make the change */
256 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
257 + rel[i].r_offset;
258 /* This is the symbol it is referring to */
259 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + rel[i].r_sym;
260 if (!sym->st_value) {
261 printk(KERN_WARNING "%s: Unknown symbol %s\n",
262 me->name, strtab + sym->st_name);
263 return -ENOENT;
264 }
265
266 v = sym->st_value;
267
268 res = reloc_handlers[rel[i].r_type](me, location, v);
269 if (res)
270 return res;
271 }
272
273 return 0;
274}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
new file mode 100644
index 000000000000..458af3c7a639
--- /dev/null
+++ b/arch/mips/kernel/module.c
@@ -0,0 +1,53 @@
1#include <linux/module.h>
2#include <linux/spinlock.h>
3
4static LIST_HEAD(dbe_list);
5static DEFINE_SPINLOCK(dbe_lock);
6
7/* Given an address, look for it in the module exception tables. */
8const struct exception_table_entry *search_module_dbetables(unsigned long addr)
9{
10 unsigned long flags;
11 const struct exception_table_entry *e = NULL;
12 struct mod_arch_specific *dbe;
13
14 spin_lock_irqsave(&dbe_lock, flags);
15 list_for_each_entry(dbe, &dbe_list, dbe_list) {
16 e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr);
17 if (e)
18 break;
19 }
20 spin_unlock_irqrestore(&dbe_lock, flags);
21
22 /* Now, if we found one, we are running inside it now, hence
23 we cannot unload the module, hence no refcnt needed. */
24 return e;
25}
26
27/* Put in dbe list if neccessary. */
28int module_finalize(const Elf_Ehdr *hdr,
29 const Elf_Shdr *sechdrs,
30 struct module *me)
31{
32 const Elf_Shdr *s;
33 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
34
35 INIT_LIST_HEAD(&me->arch.dbe_list);
36 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
37 if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
38 continue;
39 me->arch.dbe_start = (void *)s->sh_addr;
40 me->arch.dbe_end = (void *)s->sh_addr + s->sh_size;
41 spin_lock_irq(&dbe_lock);
42 list_add(&me->arch.dbe_list, &dbe_list);
43 spin_unlock_irq(&dbe_lock);
44 }
45 return 0;
46}
47
48void module_arch_cleanup(struct module *mod)
49{
50 spin_lock_irq(&dbe_lock);
51 list_del(&mod->arch.dbe_list);
52 spin_unlock_irq(&dbe_lock);
53}
diff --git a/arch/mips/kernel/offset.c b/arch/mips/kernel/offset.c
new file mode 100644
index 000000000000..2c11abb5a406
--- /dev/null
+++ b/arch/mips/kernel/offset.c
@@ -0,0 +1,314 @@
1/*
2 * offset.c: Calculate pt_regs and task_struct offsets.
3 *
4 * Copyright (C) 1996 David S. Miller
5 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
6 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
7 *
8 * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc.
10 */
11#include <linux/config.h>
12#include <linux/compat.h>
13#include <linux/types.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/interrupt.h>
17
18#include <asm/ptrace.h>
19#include <asm/processor.h>
20
21#define text(t) __asm__("\n@@@" t)
22#define _offset(type, member) (&(((type *)NULL)->member))
23#define offset(string, ptr, member) \
24 __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member)))
25#define constant(string, member) \
26 __asm__("\n@@@" string "%x0" : : "ri" (member))
27#define size(string, size) \
28 __asm__("\n@@@" string "%0" : : "i" (sizeof(size)))
29#define linefeed text("")
30
31void output_ptreg_defines(void)
32{
33 text("/* MIPS pt_regs offsets. */");
34 offset("#define PT_R0 ", struct pt_regs, regs[0]);
35 offset("#define PT_R1 ", struct pt_regs, regs[1]);
36 offset("#define PT_R2 ", struct pt_regs, regs[2]);
37 offset("#define PT_R3 ", struct pt_regs, regs[3]);
38 offset("#define PT_R4 ", struct pt_regs, regs[4]);
39 offset("#define PT_R5 ", struct pt_regs, regs[5]);
40 offset("#define PT_R6 ", struct pt_regs, regs[6]);
41 offset("#define PT_R7 ", struct pt_regs, regs[7]);
42 offset("#define PT_R8 ", struct pt_regs, regs[8]);
43 offset("#define PT_R9 ", struct pt_regs, regs[9]);
44 offset("#define PT_R10 ", struct pt_regs, regs[10]);
45 offset("#define PT_R11 ", struct pt_regs, regs[11]);
46 offset("#define PT_R12 ", struct pt_regs, regs[12]);
47 offset("#define PT_R13 ", struct pt_regs, regs[13]);
48 offset("#define PT_R14 ", struct pt_regs, regs[14]);
49 offset("#define PT_R15 ", struct pt_regs, regs[15]);
50 offset("#define PT_R16 ", struct pt_regs, regs[16]);
51 offset("#define PT_R17 ", struct pt_regs, regs[17]);
52 offset("#define PT_R18 ", struct pt_regs, regs[18]);
53 offset("#define PT_R19 ", struct pt_regs, regs[19]);
54 offset("#define PT_R20 ", struct pt_regs, regs[20]);
55 offset("#define PT_R21 ", struct pt_regs, regs[21]);
56 offset("#define PT_R22 ", struct pt_regs, regs[22]);
57 offset("#define PT_R23 ", struct pt_regs, regs[23]);
58 offset("#define PT_R24 ", struct pt_regs, regs[24]);
59 offset("#define PT_R25 ", struct pt_regs, regs[25]);
60 offset("#define PT_R26 ", struct pt_regs, regs[26]);
61 offset("#define PT_R27 ", struct pt_regs, regs[27]);
62 offset("#define PT_R28 ", struct pt_regs, regs[28]);
63 offset("#define PT_R29 ", struct pt_regs, regs[29]);
64 offset("#define PT_R30 ", struct pt_regs, regs[30]);
65 offset("#define PT_R31 ", struct pt_regs, regs[31]);
66 offset("#define PT_LO ", struct pt_regs, lo);
67 offset("#define PT_HI ", struct pt_regs, hi);
68 offset("#define PT_EPC ", struct pt_regs, cp0_epc);
69 offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
70 offset("#define PT_STATUS ", struct pt_regs, cp0_status);
71 offset("#define PT_CAUSE ", struct pt_regs, cp0_cause);
72 size("#define PT_SIZE ", struct pt_regs);
73 linefeed;
74}
75
76void output_task_defines(void)
77{
78 text("/* MIPS task_struct offsets. */");
79 offset("#define TASK_STATE ", struct task_struct, state);
80 offset("#define TASK_THREAD_INFO ", struct task_struct, thread_info);
81 offset("#define TASK_FLAGS ", struct task_struct, flags);
82 offset("#define TASK_MM ", struct task_struct, mm);
83 offset("#define TASK_PID ", struct task_struct, pid);
84 size( "#define TASK_STRUCT_SIZE ", struct task_struct);
85 linefeed;
86}
87
88void output_thread_info_defines(void)
89{
90 text("/* MIPS thread_info offsets. */");
91 offset("#define TI_TASK ", struct thread_info, task);
92 offset("#define TI_EXEC_DOMAIN ", struct thread_info, exec_domain);
93 offset("#define TI_FLAGS ", struct thread_info, flags);
94 offset("#define TI_CPU ", struct thread_info, cpu);
95 offset("#define TI_PRE_COUNT ", struct thread_info, preempt_count);
96 offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit);
97 offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block);
98 constant("#define _THREAD_SIZE_ORDER ", THREAD_SIZE_ORDER);
99 constant("#define _THREAD_SIZE ", THREAD_SIZE);
100 constant("#define _THREAD_MASK ", THREAD_MASK);
101 linefeed;
102}
103
104void output_thread_defines(void)
105{
106 text("/* MIPS specific thread_struct offsets. */");
107 offset("#define THREAD_REG16 ", struct task_struct, thread.reg16);
108 offset("#define THREAD_REG17 ", struct task_struct, thread.reg17);
109 offset("#define THREAD_REG18 ", struct task_struct, thread.reg18);
110 offset("#define THREAD_REG19 ", struct task_struct, thread.reg19);
111 offset("#define THREAD_REG20 ", struct task_struct, thread.reg20);
112 offset("#define THREAD_REG21 ", struct task_struct, thread.reg21);
113 offset("#define THREAD_REG22 ", struct task_struct, thread.reg22);
114 offset("#define THREAD_REG23 ", struct task_struct, thread.reg23);
115 offset("#define THREAD_REG29 ", struct task_struct, thread.reg29);
116 offset("#define THREAD_REG30 ", struct task_struct, thread.reg30);
117 offset("#define THREAD_REG31 ", struct task_struct, thread.reg31);
118 offset("#define THREAD_STATUS ", struct task_struct,
119 thread.cp0_status);
120 offset("#define THREAD_FPU ", struct task_struct, thread.fpu);
121
122 offset("#define THREAD_BVADDR ", struct task_struct, \
123 thread.cp0_badvaddr);
124 offset("#define THREAD_BUADDR ", struct task_struct, \
125 thread.cp0_baduaddr);
126 offset("#define THREAD_ECODE ", struct task_struct, \
127 thread.error_code);
128 offset("#define THREAD_TRAPNO ", struct task_struct, thread.trap_no);
129 offset("#define THREAD_MFLAGS ", struct task_struct, thread.mflags);
130 offset("#define THREAD_TRAMP ", struct task_struct, \
131 thread.irix_trampoline);
132 offset("#define THREAD_OLDCTX ", struct task_struct, \
133 thread.irix_oldctx);
134 linefeed;
135}
136
137void output_thread_fpu_defines(void)
138{
139 offset("#define THREAD_FPR0 ",
140 struct task_struct, thread.fpu.hard.fpr[0]);
141 offset("#define THREAD_FPR1 ",
142 struct task_struct, thread.fpu.hard.fpr[1]);
143 offset("#define THREAD_FPR2 ",
144 struct task_struct, thread.fpu.hard.fpr[2]);
145 offset("#define THREAD_FPR3 ",
146 struct task_struct, thread.fpu.hard.fpr[3]);
147 offset("#define THREAD_FPR4 ",
148 struct task_struct, thread.fpu.hard.fpr[4]);
149 offset("#define THREAD_FPR5 ",
150 struct task_struct, thread.fpu.hard.fpr[5]);
151 offset("#define THREAD_FPR6 ",
152 struct task_struct, thread.fpu.hard.fpr[6]);
153 offset("#define THREAD_FPR7 ",
154 struct task_struct, thread.fpu.hard.fpr[7]);
155 offset("#define THREAD_FPR8 ",
156 struct task_struct, thread.fpu.hard.fpr[8]);
157 offset("#define THREAD_FPR9 ",
158 struct task_struct, thread.fpu.hard.fpr[9]);
159 offset("#define THREAD_FPR10 ",
160 struct task_struct, thread.fpu.hard.fpr[10]);
161 offset("#define THREAD_FPR11 ",
162 struct task_struct, thread.fpu.hard.fpr[11]);
163 offset("#define THREAD_FPR12 ",
164 struct task_struct, thread.fpu.hard.fpr[12]);
165 offset("#define THREAD_FPR13 ",
166 struct task_struct, thread.fpu.hard.fpr[13]);
167 offset("#define THREAD_FPR14 ",
168 struct task_struct, thread.fpu.hard.fpr[14]);
169 offset("#define THREAD_FPR15 ",
170 struct task_struct, thread.fpu.hard.fpr[15]);
171 offset("#define THREAD_FPR16 ",
172 struct task_struct, thread.fpu.hard.fpr[16]);
173 offset("#define THREAD_FPR17 ",
174 struct task_struct, thread.fpu.hard.fpr[17]);
175 offset("#define THREAD_FPR18 ",
176 struct task_struct, thread.fpu.hard.fpr[18]);
177 offset("#define THREAD_FPR19 ",
178 struct task_struct, thread.fpu.hard.fpr[19]);
179 offset("#define THREAD_FPR20 ",
180 struct task_struct, thread.fpu.hard.fpr[20]);
181 offset("#define THREAD_FPR21 ",
182 struct task_struct, thread.fpu.hard.fpr[21]);
183 offset("#define THREAD_FPR22 ",
184 struct task_struct, thread.fpu.hard.fpr[22]);
185 offset("#define THREAD_FPR23 ",
186 struct task_struct, thread.fpu.hard.fpr[23]);
187 offset("#define THREAD_FPR24 ",
188 struct task_struct, thread.fpu.hard.fpr[24]);
189 offset("#define THREAD_FPR25 ",
190 struct task_struct, thread.fpu.hard.fpr[25]);
191 offset("#define THREAD_FPR26 ",
192 struct task_struct, thread.fpu.hard.fpr[26]);
193 offset("#define THREAD_FPR27 ",
194 struct task_struct, thread.fpu.hard.fpr[27]);
195 offset("#define THREAD_FPR28 ",
196 struct task_struct, thread.fpu.hard.fpr[28]);
197 offset("#define THREAD_FPR29 ",
198 struct task_struct, thread.fpu.hard.fpr[29]);
199 offset("#define THREAD_FPR30 ",
200 struct task_struct, thread.fpu.hard.fpr[30]);
201 offset("#define THREAD_FPR31 ",
202 struct task_struct, thread.fpu.hard.fpr[31]);
203
204 offset("#define THREAD_FCR31 ",
205 struct task_struct, thread.fpu.hard.fcr31);
206 linefeed;
207}
208
209void output_mm_defines(void)
210{
211 text("/* Size of struct page */");
212 size("#define STRUCT_PAGE_SIZE ", struct page);
213 linefeed;
214 text("/* Linux mm_struct offsets. */");
215 offset("#define MM_USERS ", struct mm_struct, mm_users);
216 offset("#define MM_PGD ", struct mm_struct, pgd);
217 offset("#define MM_CONTEXT ", struct mm_struct, context);
218 linefeed;
219 constant("#define _PAGE_SIZE ", PAGE_SIZE);
220 constant("#define _PAGE_SHIFT ", PAGE_SHIFT);
221 linefeed;
222 constant("#define _PGD_T_SIZE ", sizeof(pgd_t));
223 constant("#define _PMD_T_SIZE ", sizeof(pmd_t));
224 constant("#define _PTE_T_SIZE ", sizeof(pte_t));
225 linefeed;
226 constant("#define _PGD_T_LOG2 ", PGD_T_LOG2);
227 constant("#define _PMD_T_LOG2 ", PMD_T_LOG2);
228 constant("#define _PTE_T_LOG2 ", PTE_T_LOG2);
229 linefeed;
230 constant("#define _PMD_SHIFT ", PMD_SHIFT);
231 constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
232 linefeed;
233 constant("#define _PGD_ORDER ", PGD_ORDER);
234 constant("#define _PMD_ORDER ", PMD_ORDER);
235 constant("#define _PTE_ORDER ", PTE_ORDER);
236 linefeed;
237 constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD);
238 constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD);
239 constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE);
240 linefeed;
241}
242
243void output_sc_defines(void)
244{
245 text("/* Linux sigcontext offsets. */");
246 offset("#define SC_REGS ", struct sigcontext, sc_regs);
247 offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
248 offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
249 offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
250 offset("#define SC_PC ", struct sigcontext, sc_pc);
251 offset("#define SC_STATUS ", struct sigcontext, sc_status);
252 offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
253 offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir);
254 offset("#define SC_CAUSE ", struct sigcontext, sc_cause);
255 offset("#define SC_BADVADDR ", struct sigcontext, sc_badvaddr);
256 linefeed;
257}
258
259#ifdef CONFIG_MIPS32_COMPAT
260void output_sc32_defines(void)
261{
262 text("/* Linux 32-bit sigcontext offsets. */");
263 offset("#define SC32_FPREGS ", struct sigcontext32, sc_fpregs);
264 offset("#define SC32_FPC_CSR ", struct sigcontext32, sc_fpc_csr);
265 offset("#define SC32_FPC_EIR ", struct sigcontext32, sc_fpc_eir);
266 linefeed;
267}
268#endif
269
270void output_signal_defined(void)
271{
272 text("/* Linux signal numbers. */");
273 constant("#define _SIGHUP ", SIGHUP);
274 constant("#define _SIGINT ", SIGINT);
275 constant("#define _SIGQUIT ", SIGQUIT);
276 constant("#define _SIGILL ", SIGILL);
277 constant("#define _SIGTRAP ", SIGTRAP);
278 constant("#define _SIGIOT ", SIGIOT);
279 constant("#define _SIGABRT ", SIGABRT);
280 constant("#define _SIGEMT ", SIGEMT);
281 constant("#define _SIGFPE ", SIGFPE);
282 constant("#define _SIGKILL ", SIGKILL);
283 constant("#define _SIGBUS ", SIGBUS);
284 constant("#define _SIGSEGV ", SIGSEGV);
285 constant("#define _SIGSYS ", SIGSYS);
286 constant("#define _SIGPIPE ", SIGPIPE);
287 constant("#define _SIGALRM ", SIGALRM);
288 constant("#define _SIGTERM ", SIGTERM);
289 constant("#define _SIGUSR1 ", SIGUSR1);
290 constant("#define _SIGUSR2 ", SIGUSR2);
291 constant("#define _SIGCHLD ", SIGCHLD);
292 constant("#define _SIGPWR ", SIGPWR);
293 constant("#define _SIGWINCH ", SIGWINCH);
294 constant("#define _SIGURG ", SIGURG);
295 constant("#define _SIGIO ", SIGIO);
296 constant("#define _SIGSTOP ", SIGSTOP);
297 constant("#define _SIGTSTP ", SIGTSTP);
298 constant("#define _SIGCONT ", SIGCONT);
299 constant("#define _SIGTTIN ", SIGTTIN);
300 constant("#define _SIGTTOU ", SIGTTOU);
301 constant("#define _SIGVTALRM ", SIGVTALRM);
302 constant("#define _SIGPROF ", SIGPROF);
303 constant("#define _SIGXCPU ", SIGXCPU);
304 constant("#define _SIGXFSZ ", SIGXFSZ);
305 linefeed;
306}
307
308void output_irq_cpustat_t_defines(void)
309{
310 text("/* Linux irq_cpustat_t offsets. */");
311 offset("#define IC_SOFTIRQ_PENDING ", irq_cpustat_t, __softirq_pending);
312 size("#define IC_IRQ_CPUSTAT_T ", irq_cpustat_t);
313 linefeed;
314}
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
new file mode 100644
index 000000000000..0f159f30e894
--- /dev/null
+++ b/arch/mips/kernel/proc.c
@@ -0,0 +1,149 @@
1/*
2 * linux/arch/mips/kernel/proc.c
3 *
4 * Copyright (C) 1995, 1996, 2001 Ralf Baechle
5 * Copyright (C) 2001 MIPS Technologies, Inc.
6 */
7#include <linux/config.h>
8#include <linux/delay.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/seq_file.h>
12#include <asm/bootinfo.h>
13#include <asm/cpu.h>
14#include <asm/cpu-features.h>
15#include <asm/mipsregs.h>
16#include <asm/processor.h>
17#include <asm/watch.h>
18
19unsigned int vced_count, vcei_count;
20
21static const char *cpu_name[] = {
22 [CPU_UNKNOWN] "unknown",
23 [CPU_R2000] "R2000",
24 [CPU_R3000] "R3000",
25 [CPU_R3000A] "R3000A",
26 [CPU_R3041] "R3041",
27 [CPU_R3051] "R3051",
28 [CPU_R3052] "R3052",
29 [CPU_R3081] "R3081",
30 [CPU_R3081E] "R3081E",
31 [CPU_R4000PC] "R4000PC",
32 [CPU_R4000SC] "R4000SC",
33 [CPU_R4000MC] "R4000MC",
34 [CPU_R4200] "R4200",
35 [CPU_R4400PC] "R4400PC",
36 [CPU_R4400SC] "R4400SC",
37 [CPU_R4400MC] "R4400MC",
38 [CPU_R4600] "R4600",
39 [CPU_R6000] "R6000",
40 [CPU_R6000A] "R6000A",
41 [CPU_R8000] "R8000",
42 [CPU_R10000] "R10000",
43 [CPU_R12000] "R12000",
44 [CPU_R4300] "R4300",
45 [CPU_R4650] "R4650",
46 [CPU_R4700] "R4700",
47 [CPU_R5000] "R5000",
48 [CPU_R5000A] "R5000A",
49 [CPU_R4640] "R4640",
50 [CPU_NEVADA] "Nevada",
51 [CPU_RM7000] "RM7000",
52 [CPU_RM9000] "RM9000",
53 [CPU_R5432] "R5432",
54 [CPU_4KC] "MIPS 4Kc",
55 [CPU_5KC] "MIPS 5Kc",
56 [CPU_R4310] "R4310",
57 [CPU_SB1] "SiByte SB1",
58 [CPU_TX3912] "TX3912",
59 [CPU_TX3922] "TX3922",
60 [CPU_TX3927] "TX3927",
61 [CPU_AU1000] "Au1000",
62 [CPU_AU1500] "Au1500",
63 [CPU_4KEC] "MIPS 4KEc",
64 [CPU_4KSC] "MIPS 4KSc",
65 [CPU_VR41XX] "NEC Vr41xx",
66 [CPU_R5500] "R5500",
67 [CPU_TX49XX] "TX49xx",
68 [CPU_20KC] "MIPS 20Kc",
69 [CPU_24K] "MIPS 24K",
70 [CPU_25KF] "MIPS 25Kf",
71 [CPU_VR4111] "NEC VR4111",
72 [CPU_VR4121] "NEC VR4121",
73 [CPU_VR4122] "NEC VR4122",
74 [CPU_VR4131] "NEC VR4131",
75 [CPU_VR4133] "NEC VR4133",
76 [CPU_VR4181] "NEC VR4181",
77 [CPU_VR4181A] "NEC VR4181A",
78 [CPU_SR71000] "Sandcraft SR71000"
79};
80
81
82static int show_cpuinfo(struct seq_file *m, void *v)
83{
84 unsigned int version = current_cpu_data.processor_id;
85 unsigned int fp_vers = current_cpu_data.fpu_id;
86 unsigned long n = (unsigned long) v - 1;
87 char fmt [64];
88
89#ifdef CONFIG_SMP
90 if (!cpu_isset(n, cpu_online_map))
91 return 0;
92#endif
93
94 /*
95 * For the first processor also print the system type
96 */
97 if (n == 0)
98 seq_printf(m, "system type\t\t: %s\n", get_system_type());
99
100 seq_printf(m, "processor\t\t: %ld\n", n);
101 sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
102 cpu_has_fpu ? " FPU V%d.%d" : "");
103 seq_printf(m, fmt, cpu_name[current_cpu_data.cputype <= CPU_LAST ?
104 current_cpu_data.cputype : CPU_UNKNOWN],
105 (version >> 4) & 0x0f, version & 0x0f,
106 (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
107 seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n",
108 loops_per_jiffy / (500000/HZ),
109 (loops_per_jiffy / (5000/HZ)) % 100);
110 seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
111 seq_printf(m, "microsecond timers\t: %s\n",
112 cpu_has_counter ? "yes" : "no");
113 seq_printf(m, "tlb_entries\t\t: %d\n", current_cpu_data.tlbsize);
114 seq_printf(m, "extra interrupt vector\t: %s\n",
115 cpu_has_divec ? "yes" : "no");
116 seq_printf(m, "hardware watchpoint\t: %s\n",
117 cpu_has_watch ? "yes" : "no");
118
119 sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
120 cpu_has_vce ? "%u" : "not available");
121 seq_printf(m, fmt, 'D', vced_count);
122 seq_printf(m, fmt, 'I', vcei_count);
123
124 return 0;
125}
126
127static void *c_start(struct seq_file *m, loff_t *pos)
128{
129 unsigned long i = *pos;
130
131 return i < NR_CPUS ? (void *) (i + 1) : NULL;
132}
133
134static void *c_next(struct seq_file *m, void *v, loff_t *pos)
135{
136 ++*pos;
137 return c_start(m, pos);
138}
139
140static void c_stop(struct seq_file *m, void *v)
141{
142}
143
144struct seq_operations cpuinfo_op = {
145 .start = c_start,
146 .next = c_next,
147 .stop = c_stop,
148 .show = show_cpuinfo,
149};
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
new file mode 100644
index 000000000000..6e70c42c2058
--- /dev/null
+++ b/arch/mips/kernel/process.c
@@ -0,0 +1,364 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2004 Thiemo Seufer
9 */
10#include <linux/config.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/stddef.h>
17#include <linux/unistd.h>
18#include <linux/ptrace.h>
19#include <linux/slab.h>
20#include <linux/mman.h>
21#include <linux/personality.h>
22#include <linux/sys.h>
23#include <linux/user.h>
24#include <linux/a.out.h>
25#include <linux/init.h>
26#include <linux/completion.h>
27
28#include <asm/bootinfo.h>
29#include <asm/cpu.h>
30#include <asm/fpu.h>
31#include <asm/pgtable.h>
32#include <asm/system.h>
33#include <asm/mipsregs.h>
34#include <asm/processor.h>
35#include <asm/uaccess.h>
36#include <asm/io.h>
37#include <asm/elf.h>
38#include <asm/isadep.h>
39#include <asm/inst.h>
40
41/*
42 * We use this if we don't have any better idle routine..
43 * (This to kill: kernel/platform.c.
44 */
45void default_idle (void)
46{
47}
48
49/*
50 * The idle thread. There's no useful work to be done, so just try to conserve
51 * power and have a low exit latency (ie sit in a loop waiting for somebody to
52 * say that they'd like to reschedule)
53 */
54ATTRIB_NORET void cpu_idle(void)
55{
56 /* endless idle loop with no priority at all */
57 while (1) {
58 while (!need_resched())
59 if (cpu_wait)
60 (*cpu_wait)();
61 schedule();
62 }
63}
64
65asmlinkage void ret_from_fork(void);
66
67void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
68{
69 unsigned long status;
70
71 /* New thread loses kernel privileges. */
72 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|KU_MASK);
73#ifdef CONFIG_MIPS64
74 status &= ~ST0_FR;
75 status |= (current->thread.mflags & MF_32BIT_REGS) ? 0 : ST0_FR;
76#endif
77 status |= KU_USER;
78 regs->cp0_status = status;
79 clear_used_math();
80 lose_fpu();
81 regs->cp0_epc = pc;
82 regs->regs[29] = sp;
83 current_thread_info()->addr_limit = USER_DS;
84}
85
86void exit_thread(void)
87{
88}
89
90void flush_thread(void)
91{
92}
93
94int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
95 unsigned long unused, struct task_struct *p, struct pt_regs *regs)
96{
97 struct thread_info *ti = p->thread_info;
98 struct pt_regs *childregs;
99 long childksp;
100
101 childksp = (unsigned long)ti + THREAD_SIZE - 32;
102
103 preempt_disable();
104
105 if (is_fpu_owner()) {
106 save_fp(p);
107 }
108
109 preempt_enable();
110
111 /* set up new TSS. */
112 childregs = (struct pt_regs *) childksp - 1;
113 *childregs = *regs;
114 childregs->regs[7] = 0; /* Clear error flag */
115
116#if defined(CONFIG_BINFMT_IRIX)
117 if (current->personality != PER_LINUX) {
118 /* Under IRIX things are a little different. */
119 childregs->regs[3] = 1;
120 regs->regs[3] = 0;
121 }
122#endif
123 childregs->regs[2] = 0; /* Child gets zero as return value */
124 regs->regs[2] = p->pid;
125
126 if (childregs->cp0_status & ST0_CU0) {
127 childregs->regs[28] = (unsigned long) ti;
128 childregs->regs[29] = childksp;
129 ti->addr_limit = KERNEL_DS;
130 } else {
131 childregs->regs[29] = usp;
132 ti->addr_limit = USER_DS;
133 }
134 p->thread.reg29 = (unsigned long) childregs;
135 p->thread.reg31 = (unsigned long) ret_from_fork;
136
137 /*
138 * New tasks lose permission to use the fpu. This accelerates context
139 * switching for most programs since they don't use the fpu.
140 */
141 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
142 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
143 clear_tsk_thread_flag(p, TIF_USEDFPU);
144
145 return 0;
146}
147
148/* Fill in the fpu structure for a core dump.. */
149int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
150{
151 memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
152
153 return 1;
154}
155
156void dump_regs(elf_greg_t *gp, struct pt_regs *regs)
157{
158 int i;
159
160 for (i = 0; i < EF_R0; i++)
161 gp[i] = 0;
162 gp[EF_R0] = 0;
163 for (i = 1; i <= 31; i++)
164 gp[EF_R0 + i] = regs->regs[i];
165 gp[EF_R26] = 0;
166 gp[EF_R27] = 0;
167 gp[EF_LO] = regs->lo;
168 gp[EF_HI] = regs->hi;
169 gp[EF_CP0_EPC] = regs->cp0_epc;
170 gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr;
171 gp[EF_CP0_STATUS] = regs->cp0_status;
172 gp[EF_CP0_CAUSE] = regs->cp0_cause;
173#ifdef EF_UNUSED0
174 gp[EF_UNUSED0] = 0;
175#endif
176}
177
178int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr)
179{
180 memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
181
182 return 1;
183}
184
185/*
186 * Create a kernel thread
187 */
188ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
189{
190 do_exit(fn(arg));
191}
192
193long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
194{
195 struct pt_regs regs;
196
197 memset(&regs, 0, sizeof(regs));
198
199 regs.regs[4] = (unsigned long) arg;
200 regs.regs[5] = (unsigned long) fn;
201 regs.cp0_epc = (unsigned long) kernel_thread_helper;
202 regs.cp0_status = read_c0_status();
203#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
204 regs.cp0_status &= ~(ST0_KUP | ST0_IEC);
205 regs.cp0_status |= ST0_IEP;
206#else
207 regs.cp0_status |= ST0_EXL;
208#endif
209
210 /* Ok, create the new process.. */
211 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
212}
213
214struct mips_frame_info {
215 int frame_offset;
216 int pc_offset;
217};
218static struct mips_frame_info schedule_frame;
219static struct mips_frame_info schedule_timeout_frame;
220static struct mips_frame_info sleep_on_frame;
221static struct mips_frame_info sleep_on_timeout_frame;
222static struct mips_frame_info wait_for_completion_frame;
223static int mips_frame_info_initialized;
224static int __init get_frame_info(struct mips_frame_info *info, void *func)
225{
226 int i;
227 union mips_instruction *ip = (union mips_instruction *)func;
228 info->pc_offset = -1;
229 info->frame_offset = -1;
230 for (i = 0; i < 128; i++, ip++) {
231 /* if jal, jalr, jr, stop. */
232 if (ip->j_format.opcode == jal_op ||
233 (ip->r_format.opcode == spec_op &&
234 (ip->r_format.func == jalr_op ||
235 ip->r_format.func == jr_op)))
236 break;
237
238 if (
239#ifdef CONFIG_MIPS32
240 ip->i_format.opcode == sw_op &&
241#endif
242#ifdef CONFIG_MIPS64
243 ip->i_format.opcode == sd_op &&
244#endif
245 ip->i_format.rs == 29)
246 {
247 /* sw / sd $ra, offset($sp) */
248 if (ip->i_format.rt == 31) {
249 if (info->pc_offset != -1)
250 break;
251 info->pc_offset =
252 ip->i_format.simmediate / sizeof(long);
253 }
254 /* sw / sd $s8, offset($sp) */
255 if (ip->i_format.rt == 30) {
256 if (info->frame_offset != -1)
257 break;
258 info->frame_offset =
259 ip->i_format.simmediate / sizeof(long);
260 }
261 }
262 }
263 if (info->pc_offset == -1 || info->frame_offset == -1) {
264 printk("Can't analyze prologue code at %p\n", func);
265 info->pc_offset = -1;
266 info->frame_offset = -1;
267 return -1;
268 }
269
270 return 0;
271}
272
273static int __init frame_info_init(void)
274{
275 mips_frame_info_initialized =
276 !get_frame_info(&schedule_frame, schedule) &&
277 !get_frame_info(&schedule_timeout_frame, schedule_timeout) &&
278 !get_frame_info(&sleep_on_frame, sleep_on) &&
279 !get_frame_info(&sleep_on_timeout_frame, sleep_on_timeout) &&
280 !get_frame_info(&wait_for_completion_frame, wait_for_completion);
281
282 return 0;
283}
284
285arch_initcall(frame_info_init);
286
287/*
288 * Return saved PC of a blocked thread.
289 */
290unsigned long thread_saved_pc(struct task_struct *tsk)
291{
292 struct thread_struct *t = &tsk->thread;
293
294 /* New born processes are a special case */
295 if (t->reg31 == (unsigned long) ret_from_fork)
296 return t->reg31;
297
298 if (schedule_frame.pc_offset < 0)
299 return 0;
300 return ((unsigned long *)t->reg29)[schedule_frame.pc_offset];
301}
302
303/* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */
304unsigned long get_wchan(struct task_struct *p)
305{
306 unsigned long frame, pc;
307
308 if (!p || p == current || p->state == TASK_RUNNING)
309 return 0;
310
311 if (!mips_frame_info_initialized)
312 return 0;
313 pc = thread_saved_pc(p);
314 if (!in_sched_functions(pc))
315 goto out;
316
317 if (pc >= (unsigned long) sleep_on_timeout)
318 goto schedule_timeout_caller;
319 if (pc >= (unsigned long) sleep_on)
320 goto schedule_caller;
321 if (pc >= (unsigned long) interruptible_sleep_on_timeout)
322 goto schedule_timeout_caller;
323 if (pc >= (unsigned long)interruptible_sleep_on)
324 goto schedule_caller;
325 if (pc >= (unsigned long)wait_for_completion)
326 goto schedule_caller;
327 goto schedule_timeout_caller;
328
329schedule_caller:
330 frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
331 if (pc >= (unsigned long) sleep_on)
332 pc = ((unsigned long *)frame)[sleep_on_frame.pc_offset];
333 else
334 pc = ((unsigned long *)frame)[wait_for_completion_frame.pc_offset];
335 goto out;
336
337schedule_timeout_caller:
338 /*
339 * The schedule_timeout frame
340 */
341 frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
342
343 /*
344 * frame now points to sleep_on_timeout's frame
345 */
346 pc = ((unsigned long *)frame)[schedule_timeout_frame.pc_offset];
347
348 if (in_sched_functions(pc)) {
349 /* schedule_timeout called by [interruptible_]sleep_on_timeout */
350 frame = ((unsigned long *)frame)[schedule_timeout_frame.frame_offset];
351 pc = ((unsigned long *)frame)[sleep_on_timeout_frame.pc_offset];
352 }
353
354out:
355
356#ifdef CONFIG_MIPS64
357 if (current->thread.mflags & MF_32BIT_REGS) /* Kludge for 32-bit ps */
358 pc &= 0xffffffffUL;
359#endif
360
361 return pc;
362}
363
364EXPORT_SYMBOL(get_wchan);
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
new file mode 100644
index 000000000000..a166954a70b3
--- /dev/null
+++ b/arch/mips/kernel/ptrace.c
@@ -0,0 +1,338 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
13 *
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
15 * binaries.
16 */
17#include <linux/config.h>
18#include <linux/compiler.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/errno.h>
23#include <linux/ptrace.h>
24#include <linux/audit.h>
25#include <linux/smp.h>
26#include <linux/smp_lock.h>
27#include <linux/user.h>
28#include <linux/security.h>
29#include <linux/audit.h>
30
31#include <asm/cpu.h>
32#include <asm/fpu.h>
33#include <asm/mipsregs.h>
34#include <asm/pgtable.h>
35#include <asm/page.h>
36#include <asm/system.h>
37#include <asm/uaccess.h>
38#include <asm/bootinfo.h>
39
40/*
41 * Called by kernel/ptrace.c when detaching..
42 *
43 * Make sure single step bits etc are not set.
44 */
45void ptrace_disable(struct task_struct *child)
46{
47 /* Nothing to do.. */
48}
49
50asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
51{
52 struct task_struct *child;
53 int ret;
54
55#if 0
56 printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
57 (int) request, (int) pid, (unsigned long) addr,
58 (unsigned long) data);
59#endif
60 lock_kernel();
61 ret = -EPERM;
62 if (request == PTRACE_TRACEME) {
63 /* are we already being traced? */
64 if (current->ptrace & PT_PTRACED)
65 goto out;
66 if ((ret = security_ptrace(current->parent, current)))
67 goto out;
68 /* set the ptrace bit in the process flags. */
69 current->ptrace |= PT_PTRACED;
70 ret = 0;
71 goto out;
72 }
73 ret = -ESRCH;
74 read_lock(&tasklist_lock);
75 child = find_task_by_pid(pid);
76 if (child)
77 get_task_struct(child);
78 read_unlock(&tasklist_lock);
79 if (!child)
80 goto out;
81
82 ret = -EPERM;
83 if (pid == 1) /* you may not mess with init */
84 goto out_tsk;
85
86 if (request == PTRACE_ATTACH) {
87 ret = ptrace_attach(child);
88 goto out_tsk;
89 }
90
91 ret = ptrace_check_attach(child, request == PTRACE_KILL);
92 if (ret < 0)
93 goto out_tsk;
94
95 switch (request) {
96 /* when I and D space are separate, these will need to be fixed. */
97 case PTRACE_PEEKTEXT: /* read word at location addr. */
98 case PTRACE_PEEKDATA: {
99 unsigned long tmp;
100 int copied;
101
102 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
103 ret = -EIO;
104 if (copied != sizeof(tmp))
105 break;
106 ret = put_user(tmp,(unsigned long *) data);
107 break;
108 }
109
110 /* Read the word at location addr in the USER area. */
111 case PTRACE_PEEKUSR: {
112 struct pt_regs *regs;
113 unsigned long tmp = 0;
114
115 regs = (struct pt_regs *) ((unsigned long) child->thread_info +
116 THREAD_SIZE - 32 - sizeof(struct pt_regs));
117 ret = 0; /* Default return value. */
118
119 switch (addr) {
120 case 0 ... 31:
121 tmp = regs->regs[addr];
122 break;
123 case FPR_BASE ... FPR_BASE + 31:
124 if (tsk_used_math(child)) {
125 fpureg_t *fregs = get_fpu_regs(child);
126
127#ifdef CONFIG_MIPS32
128 /*
129 * The odd registers are actually the high
130 * order bits of the values stored in the even
131 * registers - unless we're using r2k_switch.S.
132 */
133 if (addr & 1)
134 tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
135 else
136 tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
137#endif
138#ifdef CONFIG_MIPS64
139 tmp = fregs[addr - FPR_BASE];
140#endif
141 } else {
142 tmp = -1; /* FP not yet used */
143 }
144 break;
145 case PC:
146 tmp = regs->cp0_epc;
147 break;
148 case CAUSE:
149 tmp = regs->cp0_cause;
150 break;
151 case BADVADDR:
152 tmp = regs->cp0_badvaddr;
153 break;
154 case MMHI:
155 tmp = regs->hi;
156 break;
157 case MMLO:
158 tmp = regs->lo;
159 break;
160 case FPC_CSR:
161 if (cpu_has_fpu)
162 tmp = child->thread.fpu.hard.fcr31;
163 else
164 tmp = child->thread.fpu.soft.fcr31;
165 break;
166 case FPC_EIR: { /* implementation / version register */
167 unsigned int flags;
168
169 if (!cpu_has_fpu)
170 break;
171
172 flags = read_c0_status();
173 __enable_fpu();
174 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
175 write_c0_status(flags);
176 break;
177 }
178 default:
179 tmp = 0;
180 ret = -EIO;
181 goto out_tsk;
182 }
183 ret = put_user(tmp, (unsigned long *) data);
184 break;
185 }
186
187 /* when I and D space are separate, this will have to be fixed. */
188 case PTRACE_POKETEXT: /* write the word at location addr. */
189 case PTRACE_POKEDATA:
190 ret = 0;
191 if (access_process_vm(child, addr, &data, sizeof(data), 1)
192 == sizeof(data))
193 break;
194 ret = -EIO;
195 break;
196
197 case PTRACE_POKEUSR: {
198 struct pt_regs *regs;
199 ret = 0;
200 regs = (struct pt_regs *) ((unsigned long) child->thread_info +
201 THREAD_SIZE - 32 - sizeof(struct pt_regs));
202
203 switch (addr) {
204 case 0 ... 31:
205 regs->regs[addr] = data;
206 break;
207 case FPR_BASE ... FPR_BASE + 31: {
208 fpureg_t *fregs = get_fpu_regs(child);
209
210 if (!tsk_used_math(child)) {
211 /* FP not yet used */
212 memset(&child->thread.fpu.hard, ~0,
213 sizeof(child->thread.fpu.hard));
214 child->thread.fpu.hard.fcr31 = 0;
215 }
216#ifdef CONFIG_MIPS32
217 /*
218 * The odd registers are actually the high order bits
219 * of the values stored in the even registers - unless
220 * we're using r2k_switch.S.
221 */
222 if (addr & 1) {
223 fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
224 fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
225 } else {
226 fregs[addr - FPR_BASE] &= ~0xffffffffLL;
227 fregs[addr - FPR_BASE] |= data;
228 }
229#endif
230#ifdef CONFIG_MIPS64
231 fregs[addr - FPR_BASE] = data;
232#endif
233 break;
234 }
235 case PC:
236 regs->cp0_epc = data;
237 break;
238 case MMHI:
239 regs->hi = data;
240 break;
241 case MMLO:
242 regs->lo = data;
243 break;
244 case FPC_CSR:
245 if (cpu_has_fpu)
246 child->thread.fpu.hard.fcr31 = data;
247 else
248 child->thread.fpu.soft.fcr31 = data;
249 break;
250 default:
251 /* The rest are not allowed. */
252 ret = -EIO;
253 break;
254 }
255 break;
256 }
257
258 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
259 case PTRACE_CONT: { /* restart after signal. */
260 ret = -EIO;
261 if ((unsigned long) data > _NSIG)
262 break;
263 if (request == PTRACE_SYSCALL) {
264 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
265 }
266 else {
267 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
268 }
269 child->exit_code = data;
270 wake_up_process(child);
271 ret = 0;
272 break;
273 }
274
275 /*
276 * make the child exit. Best I can do is send it a sigkill.
277 * perhaps it should be put in the status that it wants to
278 * exit.
279 */
280 case PTRACE_KILL:
281 ret = 0;
282 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
283 break;
284 child->exit_code = SIGKILL;
285 wake_up_process(child);
286 break;
287
288 case PTRACE_DETACH: /* detach a process that was attached. */
289 ret = ptrace_detach(child, data);
290 break;
291
292 default:
293 ret = ptrace_request(child, request, addr, data);
294 break;
295 }
296
297out_tsk:
298 put_task_struct(child);
299out:
300 unlock_kernel();
301 return ret;
302}
303
304/*
305 * Notification of system call entry/exit
306 * - triggered by current->work.syscall_trace
307 */
308asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
309{
310 if (unlikely(current->audit_context)) {
311 if (!entryexit)
312 audit_syscall_entry(current, regs->regs[2],
313 regs->regs[4], regs->regs[5],
314 regs->regs[6], regs->regs[7]);
315 else
316 audit_syscall_exit(current, regs->regs[2]);
317 }
318
319 if (!test_thread_flag(TIF_SYSCALL_TRACE))
320 return;
321 if (!(current->ptrace & PT_PTRACED))
322 return;
323
324 /* The 0x80 provides a way for the tracing parent to distinguish
325 between a syscall stop and SIGTRAP delivery */
326 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
327 0x80 : 0));
328
329 /*
330 * this isn't the same as continuing with a signal, but it will do
331 * for normal use. strace only continues with a signal if the
332 * stopping signal is not SIGTRAP. -brl
333 */
334 if (current->exit_code) {
335 send_sig(current->exit_code, current, 1);
336 current->exit_code = 0;
337 }
338}
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
new file mode 100644
index 000000000000..611dee919d50
--- /dev/null
+++ b/arch/mips/kernel/ptrace32.c
@@ -0,0 +1,285 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
13 *
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
15 * binaries.
16 */
17#include <linux/compiler.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/mm.h>
21#include <linux/errno.h>
22#include <linux/ptrace.h>
23#include <linux/smp.h>
24#include <linux/smp_lock.h>
25#include <linux/user.h>
26#include <linux/security.h>
27
28#include <asm/cpu.h>
29#include <asm/fpu.h>
30#include <asm/mipsregs.h>
31#include <asm/pgtable.h>
32#include <asm/page.h>
33#include <asm/system.h>
34#include <asm/uaccess.h>
35#include <asm/bootinfo.h>
36
37/*
38 * Tracing a 32-bit process with a 64-bit strace and vice versa will not
39 * work. I don't know how to fix this.
40 */
41asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
42{
43 struct task_struct *child;
44 int ret;
45
46#if 0
47 printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
48 (int) request, (int) pid, (unsigned long) addr,
49 (unsigned long) data);
50#endif
51 lock_kernel();
52 ret = -EPERM;
53 if (request == PTRACE_TRACEME) {
54 /* are we already being traced? */
55 if (current->ptrace & PT_PTRACED)
56 goto out;
57 if ((ret = security_ptrace(current->parent, current)))
58 goto out;
59 /* set the ptrace bit in the process flags. */
60 current->ptrace |= PT_PTRACED;
61 ret = 0;
62 goto out;
63 }
64 ret = -ESRCH;
65 read_lock(&tasklist_lock);
66 child = find_task_by_pid(pid);
67 if (child)
68 get_task_struct(child);
69 read_unlock(&tasklist_lock);
70 if (!child)
71 goto out;
72
73 ret = -EPERM;
74 if (pid == 1) /* you may not mess with init */
75 goto out_tsk;
76
77 if (request == PTRACE_ATTACH) {
78 ret = ptrace_attach(child);
79 goto out_tsk;
80 }
81
82 ret = ptrace_check_attach(child, request == PTRACE_KILL);
83 if (ret < 0)
84 goto out_tsk;
85
86 switch (request) {
87 /* when I and D space are separate, these will need to be fixed. */
88 case PTRACE_PEEKTEXT: /* read word at location addr. */
89 case PTRACE_PEEKDATA: {
90 unsigned int tmp;
91 int copied;
92
93 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
94 ret = -EIO;
95 if (copied != sizeof(tmp))
96 break;
97 ret = put_user(tmp, (unsigned int *) (unsigned long) data);
98 break;
99 }
100
101 /* Read the word at location addr in the USER area. */
102 case PTRACE_PEEKUSR: {
103 struct pt_regs *regs;
104 unsigned int tmp;
105
106 regs = (struct pt_regs *) ((unsigned long) child->thread_info +
107 THREAD_SIZE - 32 - sizeof(struct pt_regs));
108 ret = 0; /* Default return value. */
109
110 switch (addr) {
111 case 0 ... 31:
112 tmp = regs->regs[addr];
113 break;
114 case FPR_BASE ... FPR_BASE + 31:
115 if (tsk_used_math(child)) {
116 fpureg_t *fregs = get_fpu_regs(child);
117
118 /*
119 * The odd registers are actually the high
120 * order bits of the values stored in the even
121 * registers - unless we're using r2k_switch.S.
122 */
123 if (addr & 1)
124 tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
125 else
126 tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
127 } else {
128 tmp = -1; /* FP not yet used */
129 }
130 break;
131 case PC:
132 tmp = regs->cp0_epc;
133 break;
134 case CAUSE:
135 tmp = regs->cp0_cause;
136 break;
137 case BADVADDR:
138 tmp = regs->cp0_badvaddr;
139 break;
140 case MMHI:
141 tmp = regs->hi;
142 break;
143 case MMLO:
144 tmp = regs->lo;
145 break;
146 case FPC_CSR:
147 if (cpu_has_fpu)
148 tmp = child->thread.fpu.hard.fcr31;
149 else
150 tmp = child->thread.fpu.soft.fcr31;
151 break;
152 case FPC_EIR: { /* implementation / version register */
153 unsigned int flags;
154
155 if (!cpu_has_fpu)
156 break;
157
158 flags = read_c0_status();
159 __enable_fpu();
160 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
161 write_c0_status(flags);
162 break;
163 }
164 default:
165 tmp = 0;
166 ret = -EIO;
167 goto out_tsk;
168 }
169 ret = put_user(tmp, (unsigned *) (unsigned long) data);
170 break;
171 }
172
173 /* when I and D space are separate, this will have to be fixed. */
174 case PTRACE_POKETEXT: /* write the word at location addr. */
175 case PTRACE_POKEDATA:
176 ret = 0;
177 if (access_process_vm(child, addr, &data, sizeof(data), 1)
178 == sizeof(data))
179 break;
180 ret = -EIO;
181 break;
182
183 case PTRACE_POKEUSR: {
184 struct pt_regs *regs;
185 ret = 0;
186 regs = (struct pt_regs *) ((unsigned long) child->thread_info +
187 THREAD_SIZE - 32 - sizeof(struct pt_regs));
188
189 switch (addr) {
190 case 0 ... 31:
191 regs->regs[addr] = data;
192 break;
193 case FPR_BASE ... FPR_BASE + 31: {
194 fpureg_t *fregs = get_fpu_regs(child);
195
196 if (!tsk_used_math(child)) {
197 /* FP not yet used */
198 memset(&child->thread.fpu.hard, ~0,
199 sizeof(child->thread.fpu.hard));
200 child->thread.fpu.hard.fcr31 = 0;
201 }
202 /*
203 * The odd registers are actually the high order bits
204 * of the values stored in the even registers - unless
205 * we're using r2k_switch.S.
206 */
207 if (addr & 1) {
208 fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
209 fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
210 } else {
211 fregs[addr - FPR_BASE] &= ~0xffffffffLL;
212 /* Must cast, lest sign extension fill upper
213 bits! */
214 fregs[addr - FPR_BASE] |= (unsigned int)data;
215 }
216 break;
217 }
218 case PC:
219 regs->cp0_epc = data;
220 break;
221 case MMHI:
222 regs->hi = data;
223 break;
224 case MMLO:
225 regs->lo = data;
226 break;
227 case FPC_CSR:
228 if (cpu_has_fpu)
229 child->thread.fpu.hard.fcr31 = data;
230 else
231 child->thread.fpu.soft.fcr31 = data;
232 break;
233 default:
234 /* The rest are not allowed. */
235 ret = -EIO;
236 break;
237 }
238 break;
239 }
240
241 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
242 case PTRACE_CONT: { /* restart after signal. */
243 ret = -EIO;
244 if ((unsigned int) data > _NSIG)
245 break;
246 if (request == PTRACE_SYSCALL) {
247 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
248 }
249 else {
250 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
251 }
252 child->exit_code = data;
253 wake_up_process(child);
254 ret = 0;
255 break;
256 }
257
258 /*
259 * make the child exit. Best I can do is send it a sigkill.
260 * perhaps it should be put in the status that it wants to
261 * exit.
262 */
263 case PTRACE_KILL:
264 ret = 0;
265 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
266 break;
267 child->exit_code = SIGKILL;
268 wake_up_process(child);
269 break;
270
271 case PTRACE_DETACH: /* detach a process that was attached. */
272 ret = ptrace_detach(child, data);
273 break;
274
275 default:
276 ret = ptrace_request(child, request, addr, data);
277 break;
278 }
279
280out_tsk:
281 put_task_struct(child);
282out:
283 unlock_kernel();
284 return ret;
285}
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
new file mode 100644
index 000000000000..f83c31f720c4
--- /dev/null
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -0,0 +1,126 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996, 1998 by Ralf Baechle
7 *
8 * Multi-arch abstraction and asm macros for easier reading:
9 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
10 *
11 * Further modifications to make this work:
12 * Copyright (c) 1998 Harald Koerfgen
13 */
14#include <asm/asm.h>
15#include <asm/errno.h>
16#include <asm/fpregdef.h>
17#include <asm/mipsregs.h>
18#include <asm/offset.h>
19#include <asm/regdef.h>
20
21#define EX(a,b) \
229: a,##b; \
23 .section __ex_table,"a"; \
24 PTR 9b,bad_stack; \
25 .previous
26
27 .set noreorder
28 .set mips1
29 /* Save floating point context */
30LEAF(_save_fp_context)
31 li v0, 0 # assume success
32 cfc1 t1,fcr31
33 EX(swc1 $f0,(SC_FPREGS+0)(a0))
34 EX(swc1 $f1,(SC_FPREGS+8)(a0))
35 EX(swc1 $f2,(SC_FPREGS+16)(a0))
36 EX(swc1 $f3,(SC_FPREGS+24)(a0))
37 EX(swc1 $f4,(SC_FPREGS+32)(a0))
38 EX(swc1 $f5,(SC_FPREGS+40)(a0))
39 EX(swc1 $f6,(SC_FPREGS+48)(a0))
40 EX(swc1 $f7,(SC_FPREGS+56)(a0))
41 EX(swc1 $f8,(SC_FPREGS+64)(a0))
42 EX(swc1 $f9,(SC_FPREGS+72)(a0))
43 EX(swc1 $f10,(SC_FPREGS+80)(a0))
44 EX(swc1 $f11,(SC_FPREGS+88)(a0))
45 EX(swc1 $f12,(SC_FPREGS+96)(a0))
46 EX(swc1 $f13,(SC_FPREGS+104)(a0))
47 EX(swc1 $f14,(SC_FPREGS+112)(a0))
48 EX(swc1 $f15,(SC_FPREGS+120)(a0))
49 EX(swc1 $f16,(SC_FPREGS+128)(a0))
50 EX(swc1 $f17,(SC_FPREGS+136)(a0))
51 EX(swc1 $f18,(SC_FPREGS+144)(a0))
52 EX(swc1 $f19,(SC_FPREGS+152)(a0))
53 EX(swc1 $f20,(SC_FPREGS+160)(a0))
54 EX(swc1 $f21,(SC_FPREGS+168)(a0))
55 EX(swc1 $f22,(SC_FPREGS+176)(a0))
56 EX(swc1 $f23,(SC_FPREGS+184)(a0))
57 EX(swc1 $f24,(SC_FPREGS+192)(a0))
58 EX(swc1 $f25,(SC_FPREGS+200)(a0))
59 EX(swc1 $f26,(SC_FPREGS+208)(a0))
60 EX(swc1 $f27,(SC_FPREGS+216)(a0))
61 EX(swc1 $f28,(SC_FPREGS+224)(a0))
62 EX(swc1 $f29,(SC_FPREGS+232)(a0))
63 EX(swc1 $f30,(SC_FPREGS+240)(a0))
64 EX(swc1 $f31,(SC_FPREGS+248)(a0))
65 EX(sw t1,(SC_FPC_CSR)(a0))
66 cfc1 t0,$0 # implementation/version
67 jr ra
68 .set nomacro
69 EX(sw t0,(SC_FPC_EIR)(a0))
70 .set macro
71 END(_save_fp_context)
72
73/*
74 * Restore FPU state:
75 * - fp gp registers
76 * - cp1 status/control register
77 *
78 * We base the decision which registers to restore from the signal stack
79 * frame on the current content of c0_status, not on the content of the
80 * stack frame which might have been changed by the user.
81 */
82LEAF(_restore_fp_context)
83 li v0, 0 # assume success
84 EX(lw t0,(SC_FPC_CSR)(a0))
85 EX(lwc1 $f0,(SC_FPREGS+0)(a0))
86 EX(lwc1 $f1,(SC_FPREGS+8)(a0))
87 EX(lwc1 $f2,(SC_FPREGS+16)(a0))
88 EX(lwc1 $f3,(SC_FPREGS+24)(a0))
89 EX(lwc1 $f4,(SC_FPREGS+32)(a0))
90 EX(lwc1 $f5,(SC_FPREGS+40)(a0))
91 EX(lwc1 $f6,(SC_FPREGS+48)(a0))
92 EX(lwc1 $f7,(SC_FPREGS+56)(a0))
93 EX(lwc1 $f8,(SC_FPREGS+64)(a0))
94 EX(lwc1 $f9,(SC_FPREGS+72)(a0))
95 EX(lwc1 $f10,(SC_FPREGS+80)(a0))
96 EX(lwc1 $f11,(SC_FPREGS+88)(a0))
97 EX(lwc1 $f12,(SC_FPREGS+96)(a0))
98 EX(lwc1 $f13,(SC_FPREGS+104)(a0))
99 EX(lwc1 $f14,(SC_FPREGS+112)(a0))
100 EX(lwc1 $f15,(SC_FPREGS+120)(a0))
101 EX(lwc1 $f16,(SC_FPREGS+128)(a0))
102 EX(lwc1 $f17,(SC_FPREGS+136)(a0))
103 EX(lwc1 $f18,(SC_FPREGS+144)(a0))
104 EX(lwc1 $f19,(SC_FPREGS+152)(a0))
105 EX(lwc1 $f20,(SC_FPREGS+160)(a0))
106 EX(lwc1 $f21,(SC_FPREGS+168)(a0))
107 EX(lwc1 $f22,(SC_FPREGS+176)(a0))
108 EX(lwc1 $f23,(SC_FPREGS+184)(a0))
109 EX(lwc1 $f24,(SC_FPREGS+192)(a0))
110 EX(lwc1 $f25,(SC_FPREGS+200)(a0))
111 EX(lwc1 $f26,(SC_FPREGS+208)(a0))
112 EX(lwc1 $f27,(SC_FPREGS+216)(a0))
113 EX(lwc1 $f28,(SC_FPREGS+224)(a0))
114 EX(lwc1 $f29,(SC_FPREGS+232)(a0))
115 EX(lwc1 $f30,(SC_FPREGS+240)(a0))
116 EX(lwc1 $f31,(SC_FPREGS+248)(a0))
117 jr ra
118 ctc1 t0,fcr31
119 END(_restore_fp_context)
120 .set reorder
121
122 .type fault@function
123 .ent fault
124fault: li v0, -EFAULT
125 jr ra
126 .end fault
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
new file mode 100644
index 000000000000..243e7b629af6
--- /dev/null
+++ b/arch/mips/kernel/r2300_switch.S
@@ -0,0 +1,174 @@
1/*
2 * r2300_switch.S: R2300 specific task switching code.
3 *
4 * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
5 * Copyright (C) 1994, 1995, 1996 by Andreas Busse
6 *
7 * Multi-cpu abstraction and macros for easier reading:
8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 *
10 * Further modifications to make this work:
11 * Copyright (c) 1998-2000 Harald Koerfgen
12 */
13#include <linux/config.h>
14#include <asm/asm.h>
15#include <asm/cachectl.h>
16#include <asm/fpregdef.h>
17#include <asm/mipsregs.h>
18#include <asm/offset.h>
19#include <asm/page.h>
20#include <asm/regdef.h>
21#include <asm/stackframe.h>
22#include <asm/thread_info.h>
23
24#include <asm/asmmacro.h>
25
26 .set mips1
27 .align 5
28
29/*
30 * Offset to the current process status flags, the first 32 bytes of the
31 * stack are not used.
32 */
33#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
34
35/*
36 * FPU context is saved iff the process has used it's FPU in the current
37 * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
38 * space STATUS register should be 0, so that a process *always* starts its
39 * userland with FPU disabled after each context switch.
40 *
41 * FPU will be enabled as soon as the process accesses FPU again, through
42 * do_cpu() trap.
43 */
44
45/*
46 * task_struct *resume(task_struct *prev, task_struct *next,
47 * struct thread_info *next_ti) )
48 */
49LEAF(resume)
50#ifndef CONFIG_CPU_HAS_LLSC
51 sw zero, ll_bit
52#endif
53 mfc0 t1, CP0_STATUS
54 sw t1, THREAD_STATUS(a0)
55 cpu_save_nonscratch a0
56 sw ra, THREAD_REG31(a0)
57
58 /*
59 * check if we need to save FPU registers
60 */
61 lw t3, TASK_THREAD_INFO(a0)
62 lw t0, TI_FLAGS(t3)
63 li t1, _TIF_USEDFPU
64 and t2, t0, t1
65 beqz t2, 1f
66 nor t1, zero, t1
67
68 and t0, t0, t1
69 sw t0, TI_FLAGS(t3)
70
71 /*
72 * clear saved user stack CU1 bit
73 */
74 lw t0, ST_OFF(t3)
75 li t1, ~ST0_CU1
76 and t0, t0, t1
77 sw t0, ST_OFF(t3)
78
79 fpu_save_single a0, t0 # clobbers t0
80
811:
82 /*
83 * The order of restoring the registers takes care of the race
84 * updating $28, $29 and kernelsp without disabling ints.
85 */
86 move $28, a2
87 cpu_restore_nonscratch a1
88
89 addiu t1, $28, _THREAD_SIZE - 32
90 sw t1, kernelsp
91
92 mfc0 t1, CP0_STATUS /* Do we really need this? */
93 li a3, 0xff01
94 and t1, a3
95 lw a2, THREAD_STATUS(a1)
96 nor a3, $0, a3
97 and a2, a3
98 or a2, t1
99 mtc0 a2, CP0_STATUS
100 move v0, a0
101 jr ra
102 END(resume)
103
104/*
105 * Save a thread's fp context.
106 */
107LEAF(_save_fp)
108 fpu_save_single a0, t1 # clobbers t1
109 jr ra
110 END(_save_fp)
111
112/*
113 * Restore a thread's fp context.
114 */
115LEAF(_restore_fp)
116 fpu_restore_single a0, t1 # clobbers t1
117 jr ra
118 END(_restore_fp)
119
120/*
121 * Load the FPU with signalling NANS. This bit pattern we're using has
122 * the property that no matter whether considered as single or as double
123 * precision represents signaling NANS.
124 *
125 * We initialize fcr31 to rounding to nearest, no exceptions.
126 */
127
128#define FPU_DEFAULT 0x00000000
129
130LEAF(_init_fpu)
131 mfc0 t0, CP0_STATUS
132 li t1, ST0_CU1
133 or t0, t1
134 mtc0 t0, CP0_STATUS
135
136 li t1, FPU_DEFAULT
137 ctc1 t1, fcr31
138
139 li t0, -1
140
141 mtc1 t0, $f0
142 mtc1 t0, $f1
143 mtc1 t0, $f2
144 mtc1 t0, $f3
145 mtc1 t0, $f4
146 mtc1 t0, $f5
147 mtc1 t0, $f6
148 mtc1 t0, $f7
149 mtc1 t0, $f8
150 mtc1 t0, $f9
151 mtc1 t0, $f10
152 mtc1 t0, $f11
153 mtc1 t0, $f12
154 mtc1 t0, $f13
155 mtc1 t0, $f14
156 mtc1 t0, $f15
157 mtc1 t0, $f16
158 mtc1 t0, $f17
159 mtc1 t0, $f18
160 mtc1 t0, $f19
161 mtc1 t0, $f20
162 mtc1 t0, $f21
163 mtc1 t0, $f22
164 mtc1 t0, $f23
165 mtc1 t0, $f24
166 mtc1 t0, $f25
167 mtc1 t0, $f26
168 mtc1 t0, $f27
169 mtc1 t0, $f28
170 mtc1 t0, $f29
171 mtc1 t0, $f30
172 mtc1 t0, $f31
173 jr ra
174 END(_init_fpu)
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
new file mode 100644
index 000000000000..ebb643d8d14c
--- /dev/null
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -0,0 +1,191 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle
7 *
8 * Multi-arch abstraction and asm macros for easier reading:
9 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
10 *
11 * Carsten Langgaard, carstenl@mips.com
12 * Copyright (C) 2000 MIPS Technologies, Inc.
13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
14 */
15#include <linux/config.h>
16#include <asm/asm.h>
17#include <asm/errno.h>
18#include <asm/fpregdef.h>
19#include <asm/mipsregs.h>
20#include <asm/offset.h>
21#include <asm/regdef.h>
22
23 .macro EX insn, reg, src
24 .set push
25 .set nomacro
26.ex\@: \insn \reg, \src
27 .set pop
28 .section __ex_table,"a"
29 PTR .ex\@, fault
30 .previous
31 .endm
32
33 .set noreorder
34 .set mips3
35 /* Save floating point context */
36LEAF(_save_fp_context)
37 cfc1 t1, fcr31
38
39#ifdef CONFIG_MIPS64
40 /* Store the 16 odd double precision registers */
41 EX sdc1 $f1, SC_FPREGS+8(a0)
42 EX sdc1 $f3, SC_FPREGS+24(a0)
43 EX sdc1 $f5, SC_FPREGS+40(a0)
44 EX sdc1 $f7, SC_FPREGS+56(a0)
45 EX sdc1 $f9, SC_FPREGS+72(a0)
46 EX sdc1 $f11, SC_FPREGS+88(a0)
47 EX sdc1 $f13, SC_FPREGS+104(a0)
48 EX sdc1 $f15, SC_FPREGS+120(a0)
49 EX sdc1 $f17, SC_FPREGS+136(a0)
50 EX sdc1 $f19, SC_FPREGS+152(a0)
51 EX sdc1 $f21, SC_FPREGS+168(a0)
52 EX sdc1 $f23, SC_FPREGS+184(a0)
53 EX sdc1 $f25, SC_FPREGS+200(a0)
54 EX sdc1 $f27, SC_FPREGS+216(a0)
55 EX sdc1 $f29, SC_FPREGS+232(a0)
56 EX sdc1 $f31, SC_FPREGS+248(a0)
57#endif
58
59 /* Store the 16 even double precision registers */
60 EX sdc1 $f0, SC_FPREGS+0(a0)
61 EX sdc1 $f2, SC_FPREGS+16(a0)
62 EX sdc1 $f4, SC_FPREGS+32(a0)
63 EX sdc1 $f6, SC_FPREGS+48(a0)
64 EX sdc1 $f8, SC_FPREGS+64(a0)
65 EX sdc1 $f10, SC_FPREGS+80(a0)
66 EX sdc1 $f12, SC_FPREGS+96(a0)
67 EX sdc1 $f14, SC_FPREGS+112(a0)
68 EX sdc1 $f16, SC_FPREGS+128(a0)
69 EX sdc1 $f18, SC_FPREGS+144(a0)
70 EX sdc1 $f20, SC_FPREGS+160(a0)
71 EX sdc1 $f22, SC_FPREGS+176(a0)
72 EX sdc1 $f24, SC_FPREGS+192(a0)
73 EX sdc1 $f26, SC_FPREGS+208(a0)
74 EX sdc1 $f28, SC_FPREGS+224(a0)
75 EX sdc1 $f30, SC_FPREGS+240(a0)
76 EX sw t1, SC_FPC_CSR(a0)
77 cfc1 t0, $0 # implementation/version
78 EX sw t0, SC_FPC_EIR(a0)
79
80 jr ra
81 li v0, 0 # success
82 END(_save_fp_context)
83
84#ifdef CONFIG_MIPS32_COMPAT
85 /* Save 32-bit process floating point context */
86LEAF(_save_fp_context32)
87 cfc1 t1, fcr31
88
89 EX sdc1 $f0, SC32_FPREGS+0(a0)
90 EX sdc1 $f2, SC32_FPREGS+16(a0)
91 EX sdc1 $f4, SC32_FPREGS+32(a0)
92 EX sdc1 $f6, SC32_FPREGS+48(a0)
93 EX sdc1 $f8, SC32_FPREGS+64(a0)
94 EX sdc1 $f10, SC32_FPREGS+80(a0)
95 EX sdc1 $f12, SC32_FPREGS+96(a0)
96 EX sdc1 $f14, SC32_FPREGS+112(a0)
97 EX sdc1 $f16, SC32_FPREGS+128(a0)
98 EX sdc1 $f18, SC32_FPREGS+144(a0)
99 EX sdc1 $f20, SC32_FPREGS+160(a0)
100 EX sdc1 $f22, SC32_FPREGS+176(a0)
101 EX sdc1 $f24, SC32_FPREGS+192(a0)
102 EX sdc1 $f26, SC32_FPREGS+208(a0)
103 EX sdc1 $f28, SC32_FPREGS+224(a0)
104 EX sdc1 $f30, SC32_FPREGS+240(a0)
105 EX sw t1, SC32_FPC_CSR(a0)
106 cfc1 t0, $0 # implementation/version
107 EX sw t0, SC32_FPC_EIR(a0)
108
109 jr ra
110 li v0, 0 # success
111 END(_save_fp_context32)
112#endif
113
114/*
115 * Restore FPU state:
116 * - fp gp registers
117 * - cp1 status/control register
118 */
119LEAF(_restore_fp_context)
120 EX lw t0, SC_FPC_CSR(a0)
121#ifdef CONFIG_MIPS64
122 EX ldc1 $f1, SC_FPREGS+8(a0)
123 EX ldc1 $f3, SC_FPREGS+24(a0)
124 EX ldc1 $f5, SC_FPREGS+40(a0)
125 EX ldc1 $f7, SC_FPREGS+56(a0)
126 EX ldc1 $f9, SC_FPREGS+72(a0)
127 EX ldc1 $f11, SC_FPREGS+88(a0)
128 EX ldc1 $f13, SC_FPREGS+104(a0)
129 EX ldc1 $f15, SC_FPREGS+120(a0)
130 EX ldc1 $f17, SC_FPREGS+136(a0)
131 EX ldc1 $f19, SC_FPREGS+152(a0)
132 EX ldc1 $f21, SC_FPREGS+168(a0)
133 EX ldc1 $f23, SC_FPREGS+184(a0)
134 EX ldc1 $f25, SC_FPREGS+200(a0)
135 EX ldc1 $f27, SC_FPREGS+216(a0)
136 EX ldc1 $f29, SC_FPREGS+232(a0)
137 EX ldc1 $f31, SC_FPREGS+248(a0)
138#endif
139 EX ldc1 $f0, SC_FPREGS+0(a0)
140 EX ldc1 $f2, SC_FPREGS+16(a0)
141 EX ldc1 $f4, SC_FPREGS+32(a0)
142 EX ldc1 $f6, SC_FPREGS+48(a0)
143 EX ldc1 $f8, SC_FPREGS+64(a0)
144 EX ldc1 $f10, SC_FPREGS+80(a0)
145 EX ldc1 $f12, SC_FPREGS+96(a0)
146 EX ldc1 $f14, SC_FPREGS+112(a0)
147 EX ldc1 $f16, SC_FPREGS+128(a0)
148 EX ldc1 $f18, SC_FPREGS+144(a0)
149 EX ldc1 $f20, SC_FPREGS+160(a0)
150 EX ldc1 $f22, SC_FPREGS+176(a0)
151 EX ldc1 $f24, SC_FPREGS+192(a0)
152 EX ldc1 $f26, SC_FPREGS+208(a0)
153 EX ldc1 $f28, SC_FPREGS+224(a0)
154 EX ldc1 $f30, SC_FPREGS+240(a0)
155 ctc1 t0, fcr31
156 jr ra
157 li v0, 0 # success
158 END(_restore_fp_context)
159
160#ifdef CONFIG_MIPS32_COMPAT
161LEAF(_restore_fp_context32)
162 /* Restore an o32 sigcontext. */
163 EX lw t0, SC32_FPC_CSR(a0)
164 EX ldc1 $f0, SC32_FPREGS+0(a0)
165 EX ldc1 $f2, SC32_FPREGS+16(a0)
166 EX ldc1 $f4, SC32_FPREGS+32(a0)
167 EX ldc1 $f6, SC32_FPREGS+48(a0)
168 EX ldc1 $f8, SC32_FPREGS+64(a0)
169 EX ldc1 $f10, SC32_FPREGS+80(a0)
170 EX ldc1 $f12, SC32_FPREGS+96(a0)
171 EX ldc1 $f14, SC32_FPREGS+112(a0)
172 EX ldc1 $f16, SC32_FPREGS+128(a0)
173 EX ldc1 $f18, SC32_FPREGS+144(a0)
174 EX ldc1 $f20, SC32_FPREGS+160(a0)
175 EX ldc1 $f22, SC32_FPREGS+176(a0)
176 EX ldc1 $f24, SC32_FPREGS+192(a0)
177 EX ldc1 $f26, SC32_FPREGS+208(a0)
178 EX ldc1 $f28, SC32_FPREGS+224(a0)
179 EX ldc1 $f30, SC32_FPREGS+240(a0)
180 ctc1 t0, fcr31
181 jr ra
182 li v0, 0 # success
183 END(_restore_fp_context32)
184 .set reorder
185#endif
186
187 .type fault@function
188 .ent fault
189fault: li v0, -EFAULT # failure
190 jr ra
191 .end fault
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
new file mode 100644
index 000000000000..1fc3b2eb12bd
--- /dev/null
+++ b/arch/mips/kernel/r4k_switch.S
@@ -0,0 +1,221 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 * written by Carsten Langgaard, carstenl@mips.com
12 */
13#include <linux/config.h>
14#include <asm/asm.h>
15#include <asm/cachectl.h>
16#include <asm/fpregdef.h>
17#include <asm/mipsregs.h>
18#include <asm/offset.h>
19#include <asm/page.h>
20#include <asm/pgtable-bits.h>
21#include <asm/regdef.h>
22#include <asm/stackframe.h>
23#include <asm/thread_info.h>
24
25#include <asm/asmmacro.h>
26
27/*
28 * Offset to the current process status flags, the first 32 bytes of the
29 * stack are not used.
30 */
31#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
32
33/*
34 * FPU context is saved iff the process has used it's FPU in the current
35 * time slice as indicated by _TIF_USEDFPU. In any case, the CU1 bit for user
36 * space STATUS register should be 0, so that a process *always* starts its
37 * userland with FPU disabled after each context switch.
38 *
39 * FPU will be enabled as soon as the process accesses FPU again, through
40 * do_cpu() trap.
41 */
42
43/*
44 * task_struct *resume(task_struct *prev, task_struct *next,
45 * struct thread_info *next_ti)
46 */
47 .align 5
48 LEAF(resume)
49#ifndef CONFIG_CPU_HAS_LLSC
50 sw zero, ll_bit
51#endif
52 mfc0 t1, CP0_STATUS
53 LONG_S t1, THREAD_STATUS(a0)
54 cpu_save_nonscratch a0
55 LONG_S ra, THREAD_REG31(a0)
56
57 /*
58 * check if we need to save FPU registers
59 */
60 PTR_L t3, TASK_THREAD_INFO(a0)
61 LONG_L t0, TI_FLAGS(t3)
62 li t1, _TIF_USEDFPU
63 and t2, t0, t1
64 beqz t2, 1f
65 nor t1, zero, t1
66
67 and t0, t0, t1
68 LONG_S t0, TI_FLAGS(t3)
69
70 /*
71 * clear saved user stack CU1 bit
72 */
73 LONG_L t0, ST_OFF(t3)
74 li t1, ~ST0_CU1
75 and t0, t0, t1
76 LONG_S t0, ST_OFF(t3)
77
78 fpu_save_double a0 t1 t0 t2 # c0_status passed in t1
79 # clobbers t0 and t2
801:
81
82 /*
83 * The order of restoring the registers takes care of the race
84 * updating $28, $29 and kernelsp without disabling ints.
85 */
86 move $28, a2
87 cpu_restore_nonscratch a1
88
89 PTR_ADDIU t0, $28, _THREAD_SIZE - 32
90 set_saved_sp t0, t1, t2
91
92 mfc0 t1, CP0_STATUS /* Do we really need this? */
93 li a3, 0xff01
94 and t1, a3
95 LONG_L a2, THREAD_STATUS(a1)
96 nor a3, $0, a3
97 and a2, a3
98 or a2, t1
99 mtc0 a2, CP0_STATUS
100 move v0, a0
101 jr ra
102 END(resume)
103
104/*
105 * Save a thread's fp context.
106 */
107LEAF(_save_fp)
108#ifdef CONFIG_MIPS64
109 mfc0 t1, CP0_STATUS
110#endif
111 fpu_save_double a0 t1 t0 t2 # clobbers t1
112 jr ra
113 END(_save_fp)
114
115/*
116 * Restore a thread's fp context.
117 */
118LEAF(_restore_fp)
119 fpu_restore_double a0, t1 # clobbers t1
120 jr ra
121 END(_restore_fp)
122
123/*
124 * Load the FPU with signalling NANS. This bit pattern we're using has
125 * the property that no matter whether considered as single or as double
126 * precision represents signaling NANS.
127 *
128 * We initialize fcr31 to rounding to nearest, no exceptions.
129 */
130
131#define FPU_DEFAULT 0x00000000
132
133LEAF(_init_fpu)
134 mfc0 t0, CP0_STATUS
135 li t1, ST0_CU1
136 or t0, t1
137 mtc0 t0, CP0_STATUS
138 fpu_enable_hazard
139
140 li t1, FPU_DEFAULT
141 ctc1 t1, fcr31
142
143 li t1, -1 # SNaN
144
145#ifdef CONFIG_MIPS64
146 sll t0, t0, 5
147 bgez t0, 1f # 16 / 32 register mode?
148
149 dmtc1 t1, $f1
150 dmtc1 t1, $f3
151 dmtc1 t1, $f5
152 dmtc1 t1, $f7
153 dmtc1 t1, $f9
154 dmtc1 t1, $f11
155 dmtc1 t1, $f13
156 dmtc1 t1, $f15
157 dmtc1 t1, $f17
158 dmtc1 t1, $f19
159 dmtc1 t1, $f21
160 dmtc1 t1, $f23
161 dmtc1 t1, $f25
162 dmtc1 t1, $f27
163 dmtc1 t1, $f29
164 dmtc1 t1, $f31
1651:
166#endif
167
168#ifdef CONFIG_CPU_MIPS32
169 mtc1 t1, $f0
170 mtc1 t1, $f1
171 mtc1 t1, $f2
172 mtc1 t1, $f3
173 mtc1 t1, $f4
174 mtc1 t1, $f5
175 mtc1 t1, $f6
176 mtc1 t1, $f7
177 mtc1 t1, $f8
178 mtc1 t1, $f9
179 mtc1 t1, $f10
180 mtc1 t1, $f11
181 mtc1 t1, $f12
182 mtc1 t1, $f13
183 mtc1 t1, $f14
184 mtc1 t1, $f15
185 mtc1 t1, $f16
186 mtc1 t1, $f17
187 mtc1 t1, $f18
188 mtc1 t1, $f19
189 mtc1 t1, $f20
190 mtc1 t1, $f21
191 mtc1 t1, $f22
192 mtc1 t1, $f23
193 mtc1 t1, $f24
194 mtc1 t1, $f25
195 mtc1 t1, $f26
196 mtc1 t1, $f27
197 mtc1 t1, $f28
198 mtc1 t1, $f29
199 mtc1 t1, $f30
200 mtc1 t1, $f31
201#else
202 .set mips3
203 dmtc1 t1, $f0
204 dmtc1 t1, $f2
205 dmtc1 t1, $f4
206 dmtc1 t1, $f6
207 dmtc1 t1, $f8
208 dmtc1 t1, $f10
209 dmtc1 t1, $f12
210 dmtc1 t1, $f14
211 dmtc1 t1, $f16
212 dmtc1 t1, $f18
213 dmtc1 t1, $f20
214 dmtc1 t1, $f22
215 dmtc1 t1, $f24
216 dmtc1 t1, $f26
217 dmtc1 t1, $f28
218 dmtc1 t1, $f30
219#endif
220 jr ra
221 END(_init_fpu)
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
new file mode 100644
index 000000000000..d8d3b13fe57f
--- /dev/null
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -0,0 +1,87 @@
1/*
2 * r6000_fpu.S: Save/restore floating point context for signal handlers.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996 by Ralf Baechle
9 *
10 * Multi-arch abstraction and asm macros for easier reading:
11 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
12 */
13#include <asm/asm.h>
14#include <asm/fpregdef.h>
15#include <asm/mipsregs.h>
16#include <asm/offset.h>
17#include <asm/regdef.h>
18
19 .set noreorder
20 .set mips2
21 /* Save floating point context */
22 LEAF(_save_fp_context)
23 mfc0 t0,CP0_STATUS
24 sll t0,t0,2
25 bgez t0,1f
26 nop
27
28 cfc1 t1,fcr31
29 /* Store the 16 double precision registers */
30 sdc1 $f0,(SC_FPREGS+0)(a0)
31 sdc1 $f2,(SC_FPREGS+16)(a0)
32 sdc1 $f4,(SC_FPREGS+32)(a0)
33 sdc1 $f6,(SC_FPREGS+48)(a0)
34 sdc1 $f8,(SC_FPREGS+64)(a0)
35 sdc1 $f10,(SC_FPREGS+80)(a0)
36 sdc1 $f12,(SC_FPREGS+96)(a0)
37 sdc1 $f14,(SC_FPREGS+112)(a0)
38 sdc1 $f16,(SC_FPREGS+128)(a0)
39 sdc1 $f18,(SC_FPREGS+144)(a0)
40 sdc1 $f20,(SC_FPREGS+160)(a0)
41 sdc1 $f22,(SC_FPREGS+176)(a0)
42 sdc1 $f24,(SC_FPREGS+192)(a0)
43 sdc1 $f26,(SC_FPREGS+208)(a0)
44 sdc1 $f28,(SC_FPREGS+224)(a0)
45 sdc1 $f30,(SC_FPREGS+240)(a0)
46 jr ra
47 sw t0,SC_FPC_CSR(a0)
481: jr ra
49 nop
50 END(_save_fp_context)
51
52/* Restore FPU state:
53 * - fp gp registers
54 * - cp1 status/control register
55 *
56 * We base the decision which registers to restore from the signal stack
57 * frame on the current content of c0_status, not on the content of the
58 * stack frame which might have been changed by the user.
59 */
60 LEAF(_restore_fp_context)
61 mfc0 t0,CP0_STATUS
62 sll t0,t0,2
63
64 bgez t0,1f
65 lw t0,SC_FPC_CSR(a0)
66 /* Restore the 16 double precision registers */
67 ldc1 $f0,(SC_FPREGS+0)(a0)
68 ldc1 $f2,(SC_FPREGS+16)(a0)
69 ldc1 $f4,(SC_FPREGS+32)(a0)
70 ldc1 $f6,(SC_FPREGS+48)(a0)
71 ldc1 $f8,(SC_FPREGS+64)(a0)
72 ldc1 $f10,(SC_FPREGS+80)(a0)
73 ldc1 $f12,(SC_FPREGS+96)(a0)
74 ldc1 $f14,(SC_FPREGS+112)(a0)
75 ldc1 $f16,(SC_FPREGS+128)(a0)
76 ldc1 $f18,(SC_FPREGS+144)(a0)
77 ldc1 $f20,(SC_FPREGS+160)(a0)
78 ldc1 $f22,(SC_FPREGS+176)(a0)
79 ldc1 $f24,(SC_FPREGS+192)(a0)
80 ldc1 $f26,(SC_FPREGS+208)(a0)
81 ldc1 $f28,(SC_FPREGS+224)(a0)
82 ldc1 $f30,(SC_FPREGS+240)(a0)
83 jr ra
84 ctc1 t0,fcr31
851: jr ra
86 nop
87 END(_restore_fp_context)
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
new file mode 100644
index 000000000000..7e0a9821931a
--- /dev/null
+++ b/arch/mips/kernel/reset.c
@@ -0,0 +1,43 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 by Ralf Baechle
7 * Copyright (C) 2001 MIPS Technologies, Inc.
8 */
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/reboot.h>
13#include <asm/reboot.h>
14
15/*
16 * Urgs ... Too many MIPS machines to handle this in a generic way.
17 * So handle all using function pointers to machine specific
18 * functions.
19 */
20void (*_machine_restart)(char *command);
21void (*_machine_halt)(void);
22void (*_machine_power_off)(void);
23
24void machine_restart(char *command)
25{
26 _machine_restart(command);
27}
28
29EXPORT_SYMBOL(machine_restart);
30
31void machine_halt(void)
32{
33 _machine_halt();
34}
35
36EXPORT_SYMBOL(machine_halt);
37
38void machine_power_off(void)
39{
40 _machine_power_off();
41}
42
43EXPORT_SYMBOL(machine_power_off);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
new file mode 100644
index 000000000000..344f2e29eb61
--- /dev/null
+++ b/arch/mips/kernel/scall32-o32.S
@@ -0,0 +1,641 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
7 * Copyright (C) 2001 MIPS Technologies, Inc.
8 * Copyright (C) 2004 Thiemo Seufer
9 */
10#include <linux/config.h>
11#include <linux/errno.h>
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/mipsregs.h>
15#include <asm/regdef.h>
16#include <asm/stackframe.h>
17#include <asm/isadep.h>
18#include <asm/sysmips.h>
19#include <asm/thread_info.h>
20#include <asm/unistd.h>
21#include <asm/war.h>
22#include <asm/offset.h>
23
24/* Highest syscall used of any syscall flavour */
25#define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls
26
27 .align 5
28NESTED(handle_sys, PT_SIZE, sp)
29 .set noat
30 SAVE_SOME
31 STI
32 .set at
33
34 lw t1, PT_EPC(sp) # skip syscall on return
35
36#if defined(CONFIG_BINFMT_IRIX)
37 sltiu t0, v0, MAX_SYSCALL_NO + 1 # check syscall number
38#else
39 subu v0, v0, __NR_O32_Linux # check syscall number
40 sltiu t0, v0, __NR_O32_Linux_syscalls + 1
41#endif
42 addiu t1, 4 # skip to next instruction
43 sw t1, PT_EPC(sp)
44 beqz t0, illegal_syscall
45
46 sll t0, v0, 3
47 la t1, sys_call_table
48 addu t1, t0
49 lw t2, (t1) # syscall routine
50 lw t3, 4(t1) # >= 0 if we need stack arguments
51 beqz t2, illegal_syscall
52
53 sw a3, PT_R26(sp) # save a3 for syscall restarting
54 bgez t3, stackargs
55
56stack_done:
57 lw t0, TI_FLAGS($28) # syscall tracing enabled?
58 li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
59 and t0, t1
60 bnez t0, syscall_trace_entry # -> yes
61
62 jalr t2 # Do The Real Thing (TM)
63
64 li t0, -EMAXERRNO - 1 # error?
65 sltu t0, t0, v0
66 sw t0, PT_R7(sp) # set error flag
67 beqz t0, 1f
68
69 negu v0 # error
70 sw v0, PT_R0(sp) # set flag for syscall
71 # restarting
721: sw v0, PT_R2(sp) # result
73
74o32_syscall_exit:
75 local_irq_disable # make sure need_resched and
76 # signals dont change between
77 # sampling and return
78 lw a2, TI_FLAGS($28) # current->work
79 li t0, _TIF_ALLWORK_MASK
80 and t0, a2
81 bnez t0, o32_syscall_exit_work
82
83 j restore_partial
84
85o32_syscall_exit_work:
86 j syscall_exit_work_partial
87
88/* ------------------------------------------------------------------------ */
89
90syscall_trace_entry:
91 SAVE_STATIC
92 move s0, t2
93 move a0, sp
94 li a1, 0
95 jal do_syscall_trace
96
97 lw a0, PT_R4(sp) # Restore argument registers
98 lw a1, PT_R5(sp)
99 lw a2, PT_R6(sp)
100 lw a3, PT_R7(sp)
101 jalr s0
102
103 li t0, -EMAXERRNO - 1 # error?
104 sltu t0, t0, v0
105 sw t0, PT_R7(sp) # set error flag
106 beqz t0, 1f
107
108 negu v0 # error
109 sw v0, PT_R0(sp) # set flag for syscall
110 # restarting
1111: sw v0, PT_R2(sp) # result
112
113 j syscall_exit
114
115/* ------------------------------------------------------------------------ */
116
117 /*
118 * More than four arguments. Try to deal with it by copying the
119 * stack arguments from the user stack to the kernel stack.
120 * This Sucks (TM).
121 */
122stackargs:
123 lw t0, PT_R29(sp) # get old user stack pointer
124
125 /*
126 * We intentionally keep the kernel stack a little below the top of
127 * userspace so we don't have to do a slower byte accurate check here.
128 */
129 lw t5, TI_ADDR_LIMIT($28)
130 addu t4, t0, 32
131 and t5, t4
132 bltz t5, bad_stack # -> sp is bad
133
134 /* Ok, copy the args from the luser stack to the kernel stack.
135 * t3 is the precomputed number of instruction bytes needed to
136 * load or store arguments 6-8.
137 */
138
139 la t1, 5f # load up to 3 arguments
140 subu t1, t3
1411: lw t5, 16(t0) # argument #5 from usp
142 .set push
143 .set noreorder
144 .set nomacro
145 jr t1
146 addiu t1, 6f - 5f
147
1482: lw t8, 28(t0) # argument #8 from usp
1493: lw t7, 24(t0) # argument #7 from usp
1504: lw t6, 20(t0) # argument #6 from usp
1515: jr t1
152 sw t5, 16(sp) # argument #5 to ksp
153
154 sw t8, 28(sp) # argument #8 to ksp
155 sw t7, 24(sp) # argument #7 to ksp
156 sw t6, 20(sp) # argument #6 to ksp
1576: j stack_done # go back
158 nop
159 .set pop
160
161 .section __ex_table,"a"
162 PTR 1b,bad_stack
163 PTR 2b,bad_stack
164 PTR 3b,bad_stack
165 PTR 4b,bad_stack
166 .previous
167
168 /*
169 * The stackpointer for a call with more than 4 arguments is bad.
170 * We probably should handle this case a bit more drastic.
171 */
172bad_stack:
173 negu v0 # error
174 sw v0, PT_R0(sp)
175 sw v0, PT_R2(sp)
176 li t0, 1 # set error flag
177 sw t0, PT_R7(sp)
178 j o32_syscall_exit
179
180 /*
181 * The system call does not exist in this kernel
182 */
183illegal_syscall:
184 li v0, -ENOSYS # error
185 sw v0, PT_R2(sp)
186 li t0, 1 # set error flag
187 sw t0, PT_R7(sp)
188 j o32_syscall_exit
189 END(handle_sys)
190
191 LEAF(mips_atomic_set)
192 andi v0, a1, 3 # must be word aligned
193 bnez v0, bad_alignment
194
195 lw v1, TI_ADDR_LIMIT($28) # in legal address range?
196 addiu a0, a1, 4
197 or a0, a0, a1
198 and a0, a0, v1
199 bltz a0, bad_address
200
201#ifdef CONFIG_CPU_HAS_LLSC
202 /* Ok, this is the ll/sc case. World is sane :-) */
2031: ll v0, (a1)
204 move a0, a2
2052: sc a0, (a1)
206#if R10000_LLSC_WAR
207 beqzl a0, 1b
208#else
209 beqz a0, 1b
210#endif
211
212 .section __ex_table,"a"
213 PTR 1b, bad_stack
214 PTR 2b, bad_stack
215 .previous
216#else
217 sw a1, 16(sp)
218 sw a2, 20(sp)
219
220 move a0, sp
221 move a2, a1
222 li a1, 1
223 jal do_page_fault
224
225 lw a1, 16(sp)
226 lw a2, 20(sp)
227
228 /*
229 * At this point the page should be readable and writable unless
230 * there was no more memory available.
231 */
2321: lw v0, (a1)
2332: sw a2, (a1)
234
235 .section __ex_table,"a"
236 PTR 1b, no_mem
237 PTR 2b, no_mem
238 .previous
239#endif
240
241 sw zero, PT_R7(sp) # success
242 sw v0, PT_R2(sp) # result
243
244 /* Success, so skip usual error handling garbage. */
245 lw a2, TI_FLAGS($28) # syscall tracing enabled?
246 li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
247 and t0, a2, t0
248 bnez t0, 1f
249
250 j o32_syscall_exit
251
2521: SAVE_STATIC
253 move a0, sp
254 li a1, 1
255 jal do_syscall_trace
256 j syscall_exit
257
258no_mem: li v0, -ENOMEM
259 jr ra
260
261bad_address:
262 li v0, -EFAULT
263 jr ra
264
265bad_alignment:
266 li v0, -EINVAL
267 jr ra
268 END(mips_atomic_set)
269
270 LEAF(sys_sysmips)
271 beq a0, MIPS_ATOMIC_SET, mips_atomic_set
272 j _sys_sysmips
273 END(sys_sysmips)
274
275 LEAF(sys_syscall)
276#if defined(CONFIG_BINFMT_IRIX)
277 sltiu v0, a0, MAX_SYSCALL_NO + 1 # check syscall number
278#else
279 subu t0, a0, __NR_O32_Linux # check syscall number
280 sltiu v0, t0, __NR_O32_Linux_syscalls + 1
281#endif
282 sll t1, t0, 3
283 beqz v0, einval
284
285 lw t2, sys_call_table(t1) # syscall routine
286
287#if defined(CONFIG_BINFMT_IRIX)
288 li v1, 4000 # nr of sys_syscall
289#else
290 li v1, 4000 - __NR_O32_Linux # index of sys_syscall
291#endif
292 beq t0, v1, einval # do not recurse
293
294 /* Some syscalls like execve get their arguments from struct pt_regs
295 and claim zero arguments in the syscall table. Thus we have to
296 assume the worst case and shuffle around all potential arguments.
297 If you want performance, don't use indirect syscalls. */
298
299 move a0, a1 # shift argument registers
300 move a1, a2
301 move a2, a3
302 lw a3, 16(sp)
303 lw t4, 20(sp)
304 lw t5, 24(sp)
305 lw t6, 28(sp)
306 sw t4, 16(sp)
307 sw t5, 20(sp)
308 sw t6, 24(sp)
309 sw a0, PT_R4(sp) # .. and push back a0 - a3, some
310 sw a1, PT_R5(sp) # syscalls expect them there
311 sw a2, PT_R6(sp)
312 sw a3, PT_R7(sp)
313 sw a3, PT_R26(sp) # update a3 for syscall restarting
314 jr t2
315 /* Unreached */
316
317einval: li v0, -EINVAL
318 jr ra
319 END(sys_syscall)
320
321 .macro fifty ptr, nargs, from=1, to=50
322 sys \ptr \nargs
323 .if \to-\from
324 fifty \ptr,\nargs,"(\from+1)",\to
325 .endif
326 .endm
327
328 .macro mille ptr, nargs, from=1, to=20
329 fifty \ptr,\nargs
330 .if \to-\from
331 mille \ptr,\nargs,"(\from+1)",\to
332 .endif
333 .endm
334
335 .macro syscalltable
336#if defined(CONFIG_BINFMT_IRIX)
337 mille sys_ni_syscall 0 /* 0 - 999 SVR4 flavour */
338 mille sys_ni_syscall 0 /* 1000 - 1999 32-bit IRIX */
339 mille sys_ni_syscall 0 /* 2000 - 2999 BSD43 flavour */
340 mille sys_ni_syscall 0 /* 3000 - 3999 POSIX flavour */
341#endif
342
343 sys sys_syscall 8 /* 4000 */
344 sys sys_exit 1
345 sys sys_fork 0
346 sys sys_read 3
347 sys sys_write 3
348 sys sys_open 3 /* 4005 */
349 sys sys_close 1
350 sys sys_waitpid 3
351 sys sys_creat 2
352 sys sys_link 2
353 sys sys_unlink 1 /* 4010 */
354 sys sys_execve 0
355 sys sys_chdir 1
356 sys sys_time 1
357 sys sys_mknod 3
358 sys sys_chmod 2 /* 4015 */
359 sys sys_lchown 3
360 sys sys_ni_syscall 0
361 sys sys_ni_syscall 0 /* was sys_stat */
362 sys sys_lseek 3
363 sys sys_getpid 0 /* 4020 */
364 sys sys_mount 5
365 sys sys_oldumount 1
366 sys sys_setuid 1
367 sys sys_getuid 0
368 sys sys_stime 1 /* 4025 */
369 sys sys_ptrace 4
370 sys sys_alarm 1
371 sys sys_ni_syscall 0 /* was sys_fstat */
372 sys sys_pause 0
373 sys sys_utime 2 /* 4030 */
374 sys sys_ni_syscall 0
375 sys sys_ni_syscall 0
376 sys sys_access 2
377 sys sys_nice 1
378 sys sys_ni_syscall 0 /* 4035 */
379 sys sys_sync 0
380 sys sys_kill 2
381 sys sys_rename 2
382 sys sys_mkdir 2
383 sys sys_rmdir 1 /* 4040 */
384 sys sys_dup 1
385 sys sys_pipe 0
386 sys sys_times 1
387 sys sys_ni_syscall 0
388 sys sys_brk 1 /* 4045 */
389 sys sys_setgid 1
390 sys sys_getgid 0
391 sys sys_ni_syscall 0 /* was signal(2) */
392 sys sys_geteuid 0
393 sys sys_getegid 0 /* 4050 */
394 sys sys_acct 1
395 sys sys_umount 2
396 sys sys_ni_syscall 0
397 sys sys_ioctl 3
398 sys sys_fcntl 3 /* 4055 */
399 sys sys_ni_syscall 2
400 sys sys_setpgid 2
401 sys sys_ni_syscall 0
402 sys sys_olduname 1
403 sys sys_umask 1 /* 4060 */
404 sys sys_chroot 1
405 sys sys_ustat 2
406 sys sys_dup2 2
407 sys sys_getppid 0
408 sys sys_getpgrp 0 /* 4065 */
409 sys sys_setsid 0
410 sys sys_sigaction 3
411 sys sys_sgetmask 0
412 sys sys_ssetmask 1
413 sys sys_setreuid 2 /* 4070 */
414 sys sys_setregid 2
415 sys sys_sigsuspend 0
416 sys sys_sigpending 1
417 sys sys_sethostname 2
418 sys sys_setrlimit 2 /* 4075 */
419 sys sys_getrlimit 2
420 sys sys_getrusage 2
421 sys sys_gettimeofday 2
422 sys sys_settimeofday 2
423 sys sys_getgroups 2 /* 4080 */
424 sys sys_setgroups 2
425 sys sys_ni_syscall 0 /* old_select */
426 sys sys_symlink 2
427 sys sys_ni_syscall 0 /* was sys_lstat */
428 sys sys_readlink 3 /* 4085 */
429 sys sys_uselib 1
430 sys sys_swapon 2
431 sys sys_reboot 3
432 sys old_readdir 3
433 sys old_mmap 6 /* 4090 */
434 sys sys_munmap 2
435 sys sys_truncate 2
436 sys sys_ftruncate 2
437 sys sys_fchmod 2
438 sys sys_fchown 3 /* 4095 */
439 sys sys_getpriority 2
440 sys sys_setpriority 3
441 sys sys_ni_syscall 0
442 sys sys_statfs 2
443 sys sys_fstatfs 2 /* 4100 */
444 sys sys_ni_syscall 0 /* was ioperm(2) */
445 sys sys_socketcall 2
446 sys sys_syslog 3
447 sys sys_setitimer 3
448 sys sys_getitimer 2 /* 4105 */
449 sys sys_newstat 2
450 sys sys_newlstat 2
451 sys sys_newfstat 2
452 sys sys_uname 1
453 sys sys_ni_syscall 0 /* 4110 was iopl(2) */
454 sys sys_vhangup 0
455 sys sys_ni_syscall 0 /* was sys_idle() */
456 sys sys_ni_syscall 0 /* was sys_vm86 */
457 sys sys_wait4 4
458 sys sys_swapoff 1 /* 4115 */
459 sys sys_sysinfo 1
460 sys sys_ipc 6
461 sys sys_fsync 1
462 sys sys_sigreturn 0
463 sys sys_clone 0 /* 4120 */
464 sys sys_setdomainname 2
465 sys sys_newuname 1
466 sys sys_ni_syscall 0 /* sys_modify_ldt */
467 sys sys_adjtimex 1
468 sys sys_mprotect 3 /* 4125 */
469 sys sys_sigprocmask 3
470 sys sys_ni_syscall 0 /* was create_module */
471 sys sys_init_module 5
472 sys sys_delete_module 1
473 sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */
474 sys sys_quotactl 4
475 sys sys_getpgid 1
476 sys sys_fchdir 1
477 sys sys_bdflush 2
478 sys sys_sysfs 3 /* 4135 */
479 sys sys_personality 1
480 sys sys_ni_syscall 0 /* for afs_syscall */
481 sys sys_setfsuid 1
482 sys sys_setfsgid 1
483 sys sys_llseek 5 /* 4140 */
484 sys sys_getdents 3
485 sys sys_select 5
486 sys sys_flock 2
487 sys sys_msync 3
488 sys sys_readv 3 /* 4145 */
489 sys sys_writev 3
490 sys sys_cacheflush 3
491 sys sys_cachectl 3
492 sys sys_sysmips 4
493 sys sys_ni_syscall 0 /* 4150 */
494 sys sys_getsid 1
495 sys sys_fdatasync 1
496 sys sys_sysctl 1
497 sys sys_mlock 2
498 sys sys_munlock 2 /* 4155 */
499 sys sys_mlockall 1
500 sys sys_munlockall 0
501 sys sys_sched_setparam 2
502 sys sys_sched_getparam 2
503 sys sys_sched_setscheduler 3 /* 4160 */
504 sys sys_sched_getscheduler 1
505 sys sys_sched_yield 0
506 sys sys_sched_get_priority_max 1
507 sys sys_sched_get_priority_min 1
508 sys sys_sched_rr_get_interval 2 /* 4165 */
509 sys sys_nanosleep, 2
510 sys sys_mremap, 4
511 sys sys_accept 3
512 sys sys_bind 3
513 sys sys_connect 3 /* 4170 */
514 sys sys_getpeername 3
515 sys sys_getsockname 3
516 sys sys_getsockopt 5
517 sys sys_listen 2
518 sys sys_recv 4 /* 4175 */
519 sys sys_recvfrom 6
520 sys sys_recvmsg 3
521 sys sys_send 4
522 sys sys_sendmsg 3
523 sys sys_sendto 6 /* 4180 */
524 sys sys_setsockopt 5
525 sys sys_shutdown 2
526 sys sys_socket 3
527 sys sys_socketpair 4
528 sys sys_setresuid 3 /* 4185 */
529 sys sys_getresuid 3
530 sys sys_ni_syscall 0 /* was sys_query_module */
531 sys sys_poll 3
532 sys sys_nfsservctl 3
533 sys sys_setresgid 3 /* 4190 */
534 sys sys_getresgid 3
535 sys sys_prctl 5
536 sys sys_rt_sigreturn 0
537 sys sys_rt_sigaction 4
538 sys sys_rt_sigprocmask 4 /* 4195 */
539 sys sys_rt_sigpending 2
540 sys sys_rt_sigtimedwait 4
541 sys sys_rt_sigqueueinfo 3
542 sys sys_rt_sigsuspend 0
543 sys sys_pread64 6 /* 4200 */
544 sys sys_pwrite64 6
545 sys sys_chown 3
546 sys sys_getcwd 2
547 sys sys_capget 2
548 sys sys_capset 2 /* 4205 */
549 sys sys_sigaltstack 0
550 sys sys_sendfile 4
551 sys sys_ni_syscall 0
552 sys sys_ni_syscall 0
553 sys sys_mmap2 6 /* 4210 */
554 sys sys_truncate64 4
555 sys sys_ftruncate64 4
556 sys sys_stat64 2
557 sys sys_lstat64 2
558 sys sys_fstat64 2 /* 4215 */
559 sys sys_pivot_root 2
560 sys sys_mincore 3
561 sys sys_madvise 3
562 sys sys_getdents64 3
563 sys sys_fcntl64 3 /* 4220 */
564 sys sys_ni_syscall 0
565 sys sys_gettid 0
566 sys sys_readahead 5
567 sys sys_setxattr 5
568 sys sys_lsetxattr 5 /* 4225 */
569 sys sys_fsetxattr 5
570 sys sys_getxattr 4
571 sys sys_lgetxattr 4
572 sys sys_fgetxattr 4
573 sys sys_listxattr 3 /* 4230 */
574 sys sys_llistxattr 3
575 sys sys_flistxattr 3
576 sys sys_removexattr 2
577 sys sys_lremovexattr 2
578 sys sys_fremovexattr 2 /* 4235 */
579 sys sys_tkill 2
580 sys sys_sendfile64 5
581 sys sys_futex 2
582 sys sys_sched_setaffinity 3
583 sys sys_sched_getaffinity 3 /* 4240 */
584 sys sys_io_setup 2
585 sys sys_io_destroy 1
586 sys sys_io_getevents 5
587 sys sys_io_submit 3
588 sys sys_io_cancel 3 /* 4245 */
589 sys sys_exit_group 1
590 sys sys_lookup_dcookie 3
591 sys sys_epoll_create 1
592 sys sys_epoll_ctl 4
593 sys sys_epoll_wait 3 /* 4250 */
594 sys sys_remap_file_pages 5
595 sys sys_set_tid_address 1
596 sys sys_restart_syscall 0
597 sys sys_fadvise64_64 7
598 sys sys_statfs64 3 /* 4255 */
599 sys sys_fstatfs64 2
600 sys sys_timer_create 3
601 sys sys_timer_settime 4
602 sys sys_timer_gettime 2
603 sys sys_timer_getoverrun 1 /* 4260 */
604 sys sys_timer_delete 1
605 sys sys_clock_settime 2
606 sys sys_clock_gettime 2
607 sys sys_clock_getres 2
608 sys sys_clock_nanosleep 4 /* 4265 */
609 sys sys_tgkill 3
610 sys sys_utimes 2
611 sys sys_mbind 4
612 sys sys_ni_syscall 0 /* sys_get_mempolicy */
613 sys sys_ni_syscall 0 /* 4270 sys_set_mempolicy */
614 sys sys_mq_open 4
615 sys sys_mq_unlink 1
616 sys sys_mq_timedsend 5
617 sys sys_mq_timedreceive 5
618 sys sys_mq_notify 2 /* 4275 */
619 sys sys_mq_getsetattr 3
620 sys sys_ni_syscall 0 /* sys_vserver */
621 sys sys_waitid 4
622 sys sys_ni_syscall 0 /* available, was setaltroot */
623 sys sys_add_key 5
624 sys sys_request_key 4
625 sys sys_keyctl 5
626
627 .endm
628
629 /* We pre-compute the number of _instruction_ bytes needed to
630 load or store the arguments 6-8. Negative values are ignored. */
631
632 .macro sys function, nargs
633 PTR \function
634 LONG (\nargs << 2) - (5 << 2)
635 .endm
636
637 .align 3
638 .type sys_call_table,@object
639EXPORT(sys_call_table)
640 syscalltable
641 .size sys_call_table, . - sys_call_table
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
new file mode 100644
index 000000000000..32efb888160a
--- /dev/null
+++ b/arch/mips/kernel/scall64-64.S
@@ -0,0 +1,451 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 */
10#include <linux/config.h>
11#include <linux/errno.h>
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/mipsregs.h>
15#include <asm/regdef.h>
16#include <asm/stackframe.h>
17#include <asm/offset.h>
18#include <asm/sysmips.h>
19#include <asm/thread_info.h>
20#include <asm/unistd.h>
21#include <asm/war.h>
22
23#ifndef CONFIG_BINFMT_ELF32
24/* Neither O32 nor N32, so define handle_sys here */
25#define handle_sys64 handle_sys
26#endif
27
28 .align 5
29NESTED(handle_sys64, PT_SIZE, sp)
30#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
31 /*
32 * When 32-bit compatibility is configured scall_o32.S
33 * already did this.
34 */
35 .set noat
36 SAVE_SOME
37 STI
38 .set at
39#endif
40
41 dsubu t0, v0, __NR_64_Linux # check syscall number
42 sltiu t0, t0, __NR_64_Linux_syscalls + 1
43#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
44 ld t1, PT_EPC(sp) # skip syscall on return
45 daddiu t1, 4 # skip to next instruction
46 sd t1, PT_EPC(sp)
47#endif
48 beqz t0, illegal_syscall
49
50 dsll t0, v0, 3 # offset into table
51 ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0)
52 # syscall routine
53
54 sd a3, PT_R26(sp) # save a3 for syscall restarting
55
56 li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
57 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
58 and t0, t1, t0
59 bnez t0, syscall_trace_entry
60
61 jalr t2 # Do The Real Thing (TM)
62
63 li t0, -EMAXERRNO - 1 # error?
64 sltu t0, t0, v0
65 sd t0, PT_R7(sp) # set error flag
66 beqz t0, 1f
67
68 dnegu v0 # error
69 sd v0, PT_R0(sp) # set flag for syscall
70 # restarting
711: sd v0, PT_R2(sp) # result
72
73n64_syscall_exit:
74 local_irq_disable # make sure need_resched and
75 # signals dont change between
76 # sampling and return
77 LONG_L a2, TI_FLAGS($28) # current->work
78 li t0, _TIF_ALLWORK_MASK
79 and t0, a2, t0
80 bnez t0, n64_syscall_exit_work
81
82 j restore_partial
83
84n64_syscall_exit_work:
85 j syscall_exit_work_partial
86
87/* ------------------------------------------------------------------------ */
88
89syscall_trace_entry:
90 SAVE_STATIC
91 move s0, t2
92 move a0, sp
93 li a1, 0
94 jal do_syscall_trace
95
96 ld a0, PT_R4(sp) # Restore argument registers
97 ld a1, PT_R5(sp)
98 ld a2, PT_R6(sp)
99 ld a3, PT_R7(sp)
100 ld a4, PT_R8(sp)
101 ld a5, PT_R9(sp)
102 jalr s0
103
104 li t0, -EMAXERRNO - 1 # error?
105 sltu t0, t0, v0
106 sd t0, PT_R7(sp) # set error flag
107 beqz t0, 1f
108
109 dnegu v0 # error
110 sd v0, PT_R0(sp) # set flag for syscall restarting
1111: sd v0, PT_R2(sp) # result
112
113 j syscall_exit
114
115illegal_syscall:
116 /* This also isn't a 64-bit syscall, throw an error. */
117 li v0, -ENOSYS # error
118 sd v0, PT_R2(sp)
119 li t0, 1 # set error flag
120 sd t0, PT_R7(sp)
121 j n64_syscall_exit
122 END(handle_sys64)
123
124 LEAF(mips_atomic_set)
125 andi v0, a1, 3 # must be word aligned
126 bnez v0, bad_alignment
127
128 LONG_L v1, TI_ADDR_LIMIT($28) # in legal address range?
129 LONG_ADDIU a0, a1, 4
130 or a0, a0, a1
131 and a0, a0, v1
132 bltz a0, bad_address
133
134#ifdef CONFIG_CPU_HAS_LLSC
135 /* Ok, this is the ll/sc case. World is sane :-) */
1361: ll v0, (a1)
137 move a0, a2
1382: sc a0, (a1)
139#if R10000_LLSC_WAR
140 beqzl a0, 1b
141#else
142 beqz a0, 1b
143#endif
144
145 .section __ex_table,"a"
146 PTR 1b, bad_stack
147 PTR 2b, bad_stack
148 .previous
149#else
150 sw a1, 16(sp)
151 sw a2, 20(sp)
152
153 move a0, sp
154 move a2, a1
155 li a1, 1
156 jal do_page_fault
157
158 lw a1, 16(sp)
159 lw a2, 20(sp)
160
161 /*
162 * At this point the page should be readable and writable unless
163 * there was no more memory available.
164 */
1651: lw v0, (a1)
1662: sw a2, (a1)
167
168 .section __ex_table,"a"
169 PTR 1b, no_mem
170 PTR 2b, no_mem
171 .previous
172#endif
173
174 sd zero, PT_R7(sp) # success
175 sd v0, PT_R2(sp) # result
176
177 /* Success, so skip usual error handling garbage. */
178 li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
179 LONG_L a2, TI_FLAGS($28) # syscall tracing enabled?
180 and t0, a2, t0
181 bnez t0, 1f
182
183 j n64_syscall_exit
184
1851: SAVE_STATIC
186 move a0, sp
187 li a1, 1
188 jal do_syscall_trace
189 j syscall_exit
190
191no_mem: li v0, -ENOMEM
192 jr ra
193
194bad_address:
195 li v0, -EFAULT
196 jr ra
197
198bad_alignment:
199 li v0, -EINVAL
200 jr ra
201 END(mips_atomic_set)
202
203 LEAF(sys_sysmips)
204 beq a0, MIPS_ATOMIC_SET, mips_atomic_set
205 j _sys_sysmips
206 END(sys_sysmips)
207
208 .align 3
209sys_call_table:
210 PTR sys_read /* 5000 */
211 PTR sys_write
212 PTR sys_open
213 PTR sys_close
214 PTR sys_newstat
215 PTR sys_newfstat /* 5005 */
216 PTR sys_newlstat
217 PTR sys_poll
218 PTR sys_lseek
219 PTR old_mmap
220 PTR sys_mprotect /* 5010 */
221 PTR sys_munmap
222 PTR sys_brk
223 PTR sys_rt_sigaction
224 PTR sys_rt_sigprocmask
225 PTR sys_ioctl /* 5015 */
226 PTR sys_pread64
227 PTR sys_pwrite64
228 PTR sys_readv
229 PTR sys_writev
230 PTR sys_access /* 5020 */
231 PTR sys_pipe
232 PTR sys_select
233 PTR sys_sched_yield
234 PTR sys_mremap
235 PTR sys_msync /* 5025 */
236 PTR sys_mincore
237 PTR sys_madvise
238 PTR sys_shmget
239 PTR sys_shmat
240 PTR sys_shmctl /* 5030 */
241 PTR sys_dup
242 PTR sys_dup2
243 PTR sys_pause
244 PTR sys_nanosleep
245 PTR sys_getitimer /* 5035 */
246 PTR sys_setitimer
247 PTR sys_alarm
248 PTR sys_getpid
249 PTR sys_sendfile64
250 PTR sys_socket /* 5040 */
251 PTR sys_connect
252 PTR sys_accept
253 PTR sys_sendto
254 PTR sys_recvfrom
255 PTR sys_sendmsg /* 5045 */
256 PTR sys_recvmsg
257 PTR sys_shutdown
258 PTR sys_bind
259 PTR sys_listen
260 PTR sys_getsockname /* 5050 */
261 PTR sys_getpeername
262 PTR sys_socketpair
263 PTR sys_setsockopt
264 PTR sys_getsockopt
265 PTR sys_clone /* 5055 */
266 PTR sys_fork
267 PTR sys_execve
268 PTR sys_exit
269 PTR sys_wait4
270 PTR sys_kill /* 5060 */
271 PTR sys_newuname
272 PTR sys_semget
273 PTR sys_semop
274 PTR sys_semctl
275 PTR sys_shmdt /* 5065 */
276 PTR sys_msgget
277 PTR sys_msgsnd
278 PTR sys_msgrcv
279 PTR sys_msgctl
280 PTR sys_fcntl /* 5070 */
281 PTR sys_flock
282 PTR sys_fsync
283 PTR sys_fdatasync
284 PTR sys_truncate
285 PTR sys_ftruncate /* 5075 */
286 PTR sys_getdents
287 PTR sys_getcwd
288 PTR sys_chdir
289 PTR sys_fchdir
290 PTR sys_rename /* 5080 */
291 PTR sys_mkdir
292 PTR sys_rmdir
293 PTR sys_creat
294 PTR sys_link
295 PTR sys_unlink /* 5085 */
296 PTR sys_symlink
297 PTR sys_readlink
298 PTR sys_chmod
299 PTR sys_fchmod
300 PTR sys_chown /* 5090 */
301 PTR sys_fchown
302 PTR sys_lchown
303 PTR sys_umask
304 PTR sys_gettimeofday
305 PTR sys_getrlimit /* 5095 */
306 PTR sys_getrusage
307 PTR sys_sysinfo
308 PTR sys_times
309 PTR sys_ptrace
310 PTR sys_getuid /* 5100 */
311 PTR sys_syslog
312 PTR sys_getgid
313 PTR sys_setuid
314 PTR sys_setgid
315 PTR sys_geteuid /* 5105 */
316 PTR sys_getegid
317 PTR sys_setpgid
318 PTR sys_getppid
319 PTR sys_getpgrp
320 PTR sys_setsid /* 5110 */
321 PTR sys_setreuid
322 PTR sys_setregid
323 PTR sys_getgroups
324 PTR sys_setgroups
325 PTR sys_setresuid /* 5115 */
326 PTR sys_getresuid
327 PTR sys_setresgid
328 PTR sys_getresgid
329 PTR sys_getpgid
330 PTR sys_setfsuid /* 5120 */
331 PTR sys_setfsgid
332 PTR sys_getsid
333 PTR sys_capget
334 PTR sys_capset
335 PTR sys_rt_sigpending /* 5125 */
336 PTR sys_rt_sigtimedwait
337 PTR sys_rt_sigqueueinfo
338 PTR sys_rt_sigsuspend
339 PTR sys_sigaltstack
340 PTR sys_utime /* 5130 */
341 PTR sys_mknod
342 PTR sys_personality
343 PTR sys_ustat
344 PTR sys_statfs
345 PTR sys_fstatfs /* 5135 */
346 PTR sys_sysfs
347 PTR sys_getpriority
348 PTR sys_setpriority
349 PTR sys_sched_setparam
350 PTR sys_sched_getparam /* 5140 */
351 PTR sys_sched_setscheduler
352 PTR sys_sched_getscheduler
353 PTR sys_sched_get_priority_max
354 PTR sys_sched_get_priority_min
355 PTR sys_sched_rr_get_interval /* 5145 */
356 PTR sys_mlock
357 PTR sys_munlock
358 PTR sys_mlockall
359 PTR sys_munlockall
360 PTR sys_vhangup /* 5150 */
361 PTR sys_pivot_root
362 PTR sys_sysctl
363 PTR sys_prctl
364 PTR sys_adjtimex
365 PTR sys_setrlimit /* 5155 */
366 PTR sys_chroot
367 PTR sys_sync
368 PTR sys_acct
369 PTR sys_settimeofday
370 PTR sys_mount /* 5160 */
371 PTR sys_umount
372 PTR sys_swapon
373 PTR sys_swapoff
374 PTR sys_reboot
375 PTR sys_sethostname /* 5165 */
376 PTR sys_setdomainname
377 PTR sys_ni_syscall /* was create_module */
378 PTR sys_init_module
379 PTR sys_delete_module
380 PTR sys_ni_syscall /* 5170, was get_kernel_syms */
381 PTR sys_ni_syscall /* was query_module */
382 PTR sys_quotactl
383 PTR sys_nfsservctl
384 PTR sys_ni_syscall /* res. for getpmsg */
385 PTR sys_ni_syscall /* 5175 for putpmsg */
386 PTR sys_ni_syscall /* res. for afs_syscall */
387 PTR sys_ni_syscall /* res. for security */
388 PTR sys_gettid
389 PTR sys_readahead
390 PTR sys_setxattr /* 5180 */
391 PTR sys_lsetxattr
392 PTR sys_fsetxattr
393 PTR sys_getxattr
394 PTR sys_lgetxattr
395 PTR sys_fgetxattr /* 5185 */
396 PTR sys_listxattr
397 PTR sys_llistxattr
398 PTR sys_flistxattr
399 PTR sys_removexattr
400 PTR sys_lremovexattr /* 5190 */
401 PTR sys_fremovexattr
402 PTR sys_tkill
403 PTR sys_ni_syscall
404 PTR sys_futex
405 PTR sys_sched_setaffinity /* 5195 */
406 PTR sys_sched_getaffinity
407 PTR sys_cacheflush
408 PTR sys_cachectl
409 PTR sys_sysmips
410 PTR sys_io_setup /* 5200 */
411 PTR sys_io_destroy
412 PTR sys_io_getevents
413 PTR sys_io_submit
414 PTR sys_io_cancel
415 PTR sys_exit_group /* 5205 */
416 PTR sys_lookup_dcookie
417 PTR sys_epoll_create
418 PTR sys_epoll_ctl
419 PTR sys_epoll_wait
420 PTR sys_remap_file_pages /* 5210 */
421 PTR sys_rt_sigreturn
422 PTR sys_set_tid_address
423 PTR sys_restart_syscall
424 PTR sys_semtimedop
425 PTR sys_fadvise64_64 /* 5215 */
426 PTR sys_timer_create
427 PTR sys_timer_settime
428 PTR sys_timer_gettime
429 PTR sys_timer_getoverrun
430 PTR sys_timer_delete /* 5220 */
431 PTR sys_clock_settime
432 PTR sys_clock_gettime
433 PTR sys_clock_getres
434 PTR sys_clock_nanosleep
435 PTR sys_tgkill /* 5225 */
436 PTR sys_utimes
437 PTR sys_mbind
438 PTR sys_ni_syscall /* sys_get_mempolicy */
439 PTR sys_ni_syscall /* sys_set_mempolicy */
440 PTR sys_mq_open /* 5230 */
441 PTR sys_mq_unlink
442 PTR sys_mq_timedsend
443 PTR sys_mq_timedreceive
444 PTR sys_mq_notify
445 PTR sys_mq_getsetattr /* 5235 */
446 PTR sys_ni_syscall /* sys_vserver */
447 PTR sys_waitid
448 PTR sys_ni_syscall /* available, was setaltroot */
449 PTR sys_add_key
450 PTR sys_request_key /* 5240 */
451 PTR sys_keyctl
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
new file mode 100644
index 000000000000..e52049c87bc3
--- /dev/null
+++ b/arch/mips/kernel/scall64-n32.S
@@ -0,0 +1,365 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 */
10#include <linux/config.h>
11#include <linux/errno.h>
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/mipsregs.h>
15#include <asm/regdef.h>
16#include <asm/stackframe.h>
17#include <asm/thread_info.h>
18#include <asm/unistd.h>
19
20/* This duplicates the definition from <linux/sched.h> */
21#define PT_TRACESYS 0x00000002 /* tracing system calls */
22
23/* This duplicates the definition from <asm/signal.h> */
24#define SIGILL 4 /* Illegal instruction (ANSI). */
25
26#ifndef CONFIG_MIPS32_O32
27/* No O32, so define handle_sys here */
28#define handle_sysn32 handle_sys
29#endif
30
31 .align 5
32NESTED(handle_sysn32, PT_SIZE, sp)
33#ifndef CONFIG_MIPS32_O32
34 .set noat
35 SAVE_SOME
36 STI
37 .set at
38#endif
39
40 dsubu t0, v0, __NR_N32_Linux # check syscall number
41 sltiu t0, t0, __NR_N32_Linux_syscalls + 1
42
43#ifndef CONFIG_MIPS32_O32
44 ld t1, PT_EPC(sp) # skip syscall on return
45 daddiu t1, 4 # skip to next instruction
46 sd t1, PT_EPC(sp)
47#endif
48 beqz t0, not_n32_scall
49
50 dsll t0, v0, 3 # offset into table
51 ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0)
52
53 sd a3, PT_R26(sp) # save a3 for syscall restarting
54
55 li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
56 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
57 and t0, t1, t0
58 bnez t0, n32_syscall_trace_entry
59
60 jalr t2 # Do The Real Thing (TM)
61
62 li t0, -EMAXERRNO - 1 # error?
63 sltu t0, t0, v0
64 sd t0, PT_R7(sp) # set error flag
65 beqz t0, 1f
66
67 dnegu v0 # error
68 sd v0, PT_R0(sp) # set flag for syscall restarting
691: sd v0, PT_R2(sp) # result
70
71 local_irq_disable # make sure need_resched and
72 # signals dont change between
73 # sampling and return
74 LONG_L a2, TI_FLAGS($28) # current->work
75 li t0, _TIF_ALLWORK_MASK
76 and t0, a2, t0
77 bnez t0, n32_syscall_exit_work
78
79 j restore_partial
80
81n32_syscall_exit_work:
82 j syscall_exit_work_partial
83
84/* ------------------------------------------------------------------------ */
85
86n32_syscall_trace_entry:
87 SAVE_STATIC
88 move s0, t2
89 move a0, sp
90 li a1, 0
91 jal do_syscall_trace
92
93 ld a0, PT_R4(sp) # Restore argument registers
94 ld a1, PT_R5(sp)
95 ld a2, PT_R6(sp)
96 ld a3, PT_R7(sp)
97 ld a4, PT_R8(sp)
98 ld a5, PT_R9(sp)
99 jalr s0
100
101 li t0, -EMAXERRNO - 1 # error?
102 sltu t0, t0, v0
103 sd t0, PT_R7(sp) # set error flag
104 beqz t0, 1f
105
106 dnegu v0 # error
107 sd v0, PT_R0(sp) # set flag for syscall restarting
1081: sd v0, PT_R2(sp) # result
109
110 j syscall_exit
111
112not_n32_scall:
113 /* This is not an n32 compatibility syscall, pass it on to
114 the n64 syscall handlers. */
115 j handle_sys64
116
117 END(handle_sysn32)
118
119EXPORT(sysn32_call_table)
120 PTR sys_read /* 6000 */
121 PTR sys_write
122 PTR sys_open
123 PTR sys_close
124 PTR sys_newstat
125 PTR sys_newfstat /* 6005 */
126 PTR sys_newlstat
127 PTR sys_poll
128 PTR sys_lseek
129 PTR old_mmap
130 PTR sys_mprotect /* 6010 */
131 PTR sys_munmap
132 PTR sys_brk
133 PTR sys32_rt_sigaction
134 PTR sys32_rt_sigprocmask
135 PTR compat_sys_ioctl /* 6015 */
136 PTR sys_pread64
137 PTR sys_pwrite64
138 PTR compat_sys_readv
139 PTR compat_sys_writev
140 PTR sys_access /* 6020 */
141 PTR sys_pipe
142 PTR compat_sys_select
143 PTR sys_sched_yield
144 PTR sys_mremap
145 PTR sys_msync /* 6025 */
146 PTR sys_mincore
147 PTR sys_madvise
148 PTR sys_shmget
149 PTR sys32_shmat
150 PTR sys_shmctl /* 6030 */
151 PTR sys_dup
152 PTR sys_dup2
153 PTR sys_pause
154 PTR compat_sys_nanosleep
155 PTR compat_sys_getitimer /* 6035 */
156 PTR compat_sys_setitimer
157 PTR sys_alarm
158 PTR sys_getpid
159 PTR sys32_sendfile
160 PTR sys_socket /* 6040 */
161 PTR sys_connect
162 PTR sys_accept
163 PTR sys_sendto
164 PTR sys_recvfrom
165 PTR compat_sys_sendmsg /* 6045 */
166 PTR compat_sys_recvmsg
167 PTR sys_shutdown
168 PTR sys_bind
169 PTR sys_listen
170 PTR sys_getsockname /* 6050 */
171 PTR sys_getpeername
172 PTR sys_socketpair
173 PTR compat_sys_setsockopt
174 PTR sys_getsockopt
175 PTR sys_clone /* 6055 */
176 PTR sys_fork
177 PTR sys32_execve
178 PTR sys_exit
179 PTR sys32_wait4
180 PTR sys_kill /* 6060 */
181 PTR sys32_newuname
182 PTR sys_semget
183 PTR sys_semop
184 PTR sys_semctl
185 PTR sys_shmdt /* 6065 */
186 PTR sys_msgget
187 PTR sys_msgsnd
188 PTR sys_msgrcv
189 PTR sys_msgctl
190 PTR compat_sys_fcntl /* 6070 */
191 PTR sys_flock
192 PTR sys_fsync
193 PTR sys_fdatasync
194 PTR sys_truncate
195 PTR sys_ftruncate /* 6075 */
196 PTR sys32_getdents
197 PTR sys_getcwd
198 PTR sys_chdir
199 PTR sys_fchdir
200 PTR sys_rename /* 6080 */
201 PTR sys_mkdir
202 PTR sys_rmdir
203 PTR sys_creat
204 PTR sys_link
205 PTR sys_unlink /* 6085 */
206 PTR sys_symlink
207 PTR sys_readlink
208 PTR sys_chmod
209 PTR sys_fchmod
210 PTR sys_chown /* 6090 */
211 PTR sys_fchown
212 PTR sys_lchown
213 PTR sys_umask
214 PTR sys32_gettimeofday
215 PTR compat_sys_getrlimit /* 6095 */
216 PTR compat_sys_getrusage
217 PTR sys32_sysinfo
218 PTR compat_sys_times
219 PTR sys_ptrace
220 PTR sys_getuid /* 6100 */
221 PTR sys_syslog
222 PTR sys_getgid
223 PTR sys_setuid
224 PTR sys_setgid
225 PTR sys_geteuid /* 6105 */
226 PTR sys_getegid
227 PTR sys_setpgid
228 PTR sys_getppid
229 PTR sys_getpgrp
230 PTR sys_setsid /* 6110 */
231 PTR sys_setreuid
232 PTR sys_setregid
233 PTR sys_getgroups
234 PTR sys_setgroups
235 PTR sys_setresuid /* 6115 */
236 PTR sys_getresuid
237 PTR sys_setresgid
238 PTR sys_getresgid
239 PTR sys_getpgid
240 PTR sys_setfsuid /* 6120 */
241 PTR sys_setfsgid
242 PTR sys_getsid
243 PTR sys_capget
244 PTR sys_capset
245 PTR sys32_rt_sigpending /* 6125 */
246 PTR compat_sys_rt_sigtimedwait
247 PTR sys32_rt_sigqueueinfo
248 PTR sys32_rt_sigsuspend
249 PTR sys32_sigaltstack
250 PTR compat_sys_utime /* 6130 */
251 PTR sys_mknod
252 PTR sys32_personality
253 PTR sys_ustat
254 PTR compat_sys_statfs
255 PTR compat_sys_fstatfs /* 6135 */
256 PTR sys_sysfs
257 PTR sys_getpriority
258 PTR sys_setpriority
259 PTR sys_sched_setparam
260 PTR sys_sched_getparam /* 6140 */
261 PTR sys_sched_setscheduler
262 PTR sys_sched_getscheduler
263 PTR sys_sched_get_priority_max
264 PTR sys_sched_get_priority_min
265 PTR sys32_sched_rr_get_interval /* 6145 */
266 PTR sys_mlock
267 PTR sys_munlock
268 PTR sys_mlockall
269 PTR sys_munlockall
270 PTR sys_vhangup /* 6150 */
271 PTR sys_pivot_root
272 PTR sys32_sysctl
273 PTR sys_prctl
274 PTR sys32_adjtimex
275 PTR compat_sys_setrlimit /* 6155 */
276 PTR sys_chroot
277 PTR sys_sync
278 PTR sys_acct
279 PTR sys32_settimeofday
280 PTR sys_mount /* 6160 */
281 PTR sys_umount
282 PTR sys_swapon
283 PTR sys_swapoff
284 PTR sys_reboot
285 PTR sys_sethostname /* 6165 */
286 PTR sys_setdomainname
287 PTR sys_ni_syscall /* was create_module */
288 PTR sys_init_module
289 PTR sys_delete_module
290 PTR sys_ni_syscall /* 6170, was get_kernel_syms */
291 PTR sys_ni_syscall /* was query_module */
292 PTR sys_quotactl
293 PTR sys_nfsservctl
294 PTR sys_ni_syscall /* res. for getpmsg */
295 PTR sys_ni_syscall /* 6175 for putpmsg */
296 PTR sys_ni_syscall /* res. for afs_syscall */
297 PTR sys_ni_syscall /* res. for security */
298 PTR sys_gettid
299 PTR sys32_readahead
300 PTR sys_setxattr /* 6180 */
301 PTR sys_lsetxattr
302 PTR sys_fsetxattr
303 PTR sys_getxattr
304 PTR sys_lgetxattr
305 PTR sys_fgetxattr /* 6185 */
306 PTR sys_listxattr
307 PTR sys_llistxattr
308 PTR sys_flistxattr
309 PTR sys_removexattr
310 PTR sys_lremovexattr /* 6190 */
311 PTR sys_fremovexattr
312 PTR sys_tkill
313 PTR sys_ni_syscall
314 PTR compat_sys_futex
315 PTR compat_sys_sched_setaffinity /* 6195 */
316 PTR compat_sys_sched_getaffinity
317 PTR sys_cacheflush
318 PTR sys_cachectl
319 PTR sys_sysmips
320 PTR sys_io_setup /* 6200 */
321 PTR sys_io_destroy
322 PTR sys_io_getevents
323 PTR sys_io_submit
324 PTR sys_io_cancel
325 PTR sys_exit_group /* 6205 */
326 PTR sys_lookup_dcookie
327 PTR sys_epoll_create
328 PTR sys_epoll_ctl
329 PTR sys_epoll_wait
330 PTR sys_remap_file_pages /* 6210 */
331 PTR sysn32_rt_sigreturn
332 PTR sys_fcntl
333 PTR sys_set_tid_address
334 PTR sys_restart_syscall
335 PTR sys_semtimedop /* 6215 */
336 PTR sys_fadvise64_64
337 PTR compat_sys_statfs64
338 PTR compat_sys_fstatfs64
339 PTR sys_sendfile64
340 PTR sys_timer_create /* 6220 */
341 PTR sys_timer_settime
342 PTR sys_timer_gettime
343 PTR sys_timer_getoverrun
344 PTR sys_timer_delete
345 PTR sys_clock_settime /* 6225 */
346 PTR sys_clock_gettime
347 PTR sys_clock_getres
348 PTR sys_clock_nanosleep
349 PTR sys_tgkill
350 PTR compat_sys_utimes /* 6230 */
351 PTR sys_ni_syscall /* sys_mbind */
352 PTR sys_ni_syscall /* sys_get_mempolicy */
353 PTR sys_ni_syscall /* sys_set_mempolicy */
354 PTR compat_sys_mq_open
355 PTR sys_mq_unlink /* 6235 */
356 PTR compat_sys_mq_timedsend
357 PTR compat_sys_mq_timedreceive
358 PTR compat_sys_mq_notify
359 PTR compat_sys_mq_getsetattr
360 PTR sys_ni_syscall /* 6240, sys_vserver */
361 PTR sys_waitid
362 PTR sys_ni_syscall /* available, was setaltroot */
363 PTR sys_add_key
364 PTR sys_request_key
365 PTR sys_keyctl /* 6245 */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
new file mode 100644
index 000000000000..739f3998d76b
--- /dev/null
+++ b/arch/mips/kernel/scall64-o32.S
@@ -0,0 +1,488 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995 - 2000, 2001 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 *
11 * Hairy, the userspace application uses a different argument passing
12 * convention than the kernel, so we have to translate things from o32
13 * to ABI64 calling convention. 64-bit syscalls are also processed
14 * here for now.
15 */
16#include <linux/config.h>
17#include <linux/errno.h>
18#include <asm/asm.h>
19#include <asm/asmmacro.h>
20#include <asm/mipsregs.h>
21#include <asm/regdef.h>
22#include <asm/stackframe.h>
23#include <asm/thread_info.h>
24#include <asm/unistd.h>
25#include <asm/sysmips.h>
26
27 .align 5
28NESTED(handle_sys, PT_SIZE, sp)
29 .set noat
30 SAVE_SOME
31 STI
32 .set at
33 ld t1, PT_EPC(sp) # skip syscall on return
34
35 dsubu t0, v0, __NR_O32_Linux # check syscall number
36 sltiu t0, t0, __NR_O32_Linux_syscalls + 1
37 daddiu t1, 4 # skip to next instruction
38 sd t1, PT_EPC(sp)
39 beqz t0, not_o32_scall
40#if 0
41 SAVE_ALL
42 move a1, v0
43 PRINT("Scall %ld\n")
44 RESTORE_ALL
45#endif
46
47 /* We don't want to stumble over broken sign extensions from
48 userland. O32 does never use the upper half. */
49 sll a0, a0, 0
50 sll a1, a1, 0
51 sll a2, a2, 0
52 sll a3, a3, 0
53
54 dsll t0, v0, 3 # offset into table
55 ld t2, (sys_call_table - (__NR_O32_Linux * 8))(t0)
56
57 sd a3, PT_R26(sp) # save a3 for syscall restarting
58
59 /*
60 * More than four arguments. Try to deal with it by copying the
61 * stack arguments from the user stack to the kernel stack.
62 * This Sucks (TM).
63 *
64 * We intentionally keep the kernel stack a little below the top of
65 * userspace so we don't have to do a slower byte accurate check here.
66 */
67 ld t0, PT_R29(sp) # get old user stack pointer
68 daddu t1, t0, 32
69 bltz t1, bad_stack
70
711: lw a4, 16(t0) # argument #5 from usp
722: lw a5, 20(t0) # argument #6 from usp
733: lw a6, 24(t0) # argument #7 from usp
744: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls)
75
76 .section __ex_table,"a"
77 PTR 1b, bad_stack
78 PTR 2b, bad_stack
79 PTR 3b, bad_stack
80 PTR 4b, bad_stack
81 .previous
82
83 li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
84 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
85 and t0, t1, t0
86 bnez t0, trace_a_syscall
87
88 jalr t2 # Do The Real Thing (TM)
89
90 li t0, -EMAXERRNO - 1 # error?
91 sltu t0, t0, v0
92 sd t0, PT_R7(sp) # set error flag
93 beqz t0, 1f
94
95 dnegu v0 # error
96 sd v0, PT_R0(sp) # flag for syscall restarting
971: sd v0, PT_R2(sp) # result
98
99o32_syscall_exit:
100 local_irq_disable # make need_resched and
101 # signals dont change between
102 # sampling and return
103 LONG_L a2, TI_FLAGS($28)
104 li t0, _TIF_ALLWORK_MASK
105 and t0, a2, t0
106 bnez t0, o32_syscall_exit_work
107
108 j restore_partial
109
110o32_syscall_exit_work:
111 j syscall_exit_work_partial
112
113/* ------------------------------------------------------------------------ */
114
115trace_a_syscall:
116 SAVE_STATIC
117 sd a4, PT_R8(sp) # Save argument registers
118 sd a5, PT_R9(sp)
119 sd a6, PT_R10(sp)
120 sd a7, PT_R11(sp) # For indirect syscalls
121
122 move s0, t2 # Save syscall pointer
123 move a0, sp
124 li a1, 0
125 jal do_syscall_trace
126
127 ld a0, PT_R4(sp) # Restore argument registers
128 ld a1, PT_R5(sp)
129 ld a2, PT_R6(sp)
130 ld a3, PT_R7(sp)
131 ld a4, PT_R8(sp)
132 ld a5, PT_R9(sp)
133 ld a6, PT_R10(sp)
134 ld a7, PT_R11(sp) # For indirect syscalls
135 jalr s0
136
137 li t0, -EMAXERRNO - 1 # error?
138 sltu t0, t0, v0
139 sd t0, PT_R7(sp) # set error flag
140 beqz t0, 1f
141
142 dnegu v0 # error
143 sd v0, PT_R0(sp) # set flag for syscall restarting
1441: sd v0, PT_R2(sp) # result
145
146 j syscall_exit
147
148/* ------------------------------------------------------------------------ */
149
150 /*
151 * The stackpointer for a call with more than 4 arguments is bad.
152 */
153bad_stack:
154 dnegu v0 # error
155 sd v0, PT_R0(sp)
156 sd v0, PT_R2(sp)
157 li t0, 1 # set error flag
158 sd t0, PT_R7(sp)
159 j o32_syscall_exit
160
161not_o32_scall:
162 /*
163 * This is not an o32 compatibility syscall, pass it on
164 * to the 64-bit syscall handlers.
165 */
166#ifdef CONFIG_MIPS32_N32
167 j handle_sysn32
168#else
169 j handle_sys64
170#endif
171 END(handle_sys)
172
173LEAF(sys32_syscall)
174 sltu v0, a0, __NR_O32_Linux + __NR_O32_Linux_syscalls + 1
175 beqz v0, einval
176
177 dsll v0, a0, 3
178 ld t2, (sys_call_table - (__NR_O32_Linux * 8))(v0)
179
180 li v1, 4000 # indirect syscall number
181 beq a0, v1, einval # do not recurse
182
183 move a0, a1 # shift argument registers
184 move a1, a2
185 move a2, a3
186 move a3, a4
187 move a4, a5
188 move a5, a6
189 move a6, a7
190 sd a0, PT_R4(sp) # ... and push back a0 - a3, some
191 sd a1, PT_R5(sp) # syscalls expect them there
192 sd a2, PT_R6(sp)
193 sd a3, PT_R7(sp)
194 sd a3, PT_R26(sp) # update a3 for syscall restarting
195 jr t2
196 /* Unreached */
197
198einval: li v0, -EINVAL
199 jr ra
200 END(sys32_syscall)
201
202 .align 3
203 .type sys_call_table,@object
204sys_call_table:
205 PTR sys32_syscall /* 4000 */
206 PTR sys_exit
207 PTR sys_fork
208 PTR sys_read
209 PTR sys_write
210 PTR sys_open /* 4005 */
211 PTR sys_close
212 PTR sys_waitpid
213 PTR sys_creat
214 PTR sys_link
215 PTR sys_unlink /* 4010 */
216 PTR sys32_execve
217 PTR sys_chdir
218 PTR compat_sys_time
219 PTR sys_mknod
220 PTR sys_chmod /* 4015 */
221 PTR sys_lchown
222 PTR sys_ni_syscall
223 PTR sys_ni_syscall /* was sys_stat */
224 PTR sys_lseek
225 PTR sys_getpid /* 4020 */
226 PTR sys_mount
227 PTR sys_oldumount
228 PTR sys_setuid
229 PTR sys_getuid
230 PTR compat_sys_stime /* 4025 */
231 PTR sys32_ptrace
232 PTR sys_alarm
233 PTR sys_ni_syscall /* was sys_fstat */
234 PTR sys_pause
235 PTR compat_sys_utime /* 4030 */
236 PTR sys_ni_syscall
237 PTR sys_ni_syscall
238 PTR sys_access
239 PTR sys_nice
240 PTR sys_ni_syscall /* 4035 */
241 PTR sys_sync
242 PTR sys_kill
243 PTR sys_rename
244 PTR sys_mkdir
245 PTR sys_rmdir /* 4040 */
246 PTR sys_dup
247 PTR sys_pipe
248 PTR compat_sys_times
249 PTR sys_ni_syscall
250 PTR sys_brk /* 4045 */
251 PTR sys_setgid
252 PTR sys_getgid
253 PTR sys_ni_syscall /* was signal 2 */
254 PTR sys_geteuid
255 PTR sys_getegid /* 4050 */
256 PTR sys_acct
257 PTR sys_umount
258 PTR sys_ni_syscall
259 PTR compat_sys_ioctl
260 PTR compat_sys_fcntl /* 4055 */
261 PTR sys_ni_syscall
262 PTR sys_setpgid
263 PTR sys_ni_syscall
264 PTR sys_olduname
265 PTR sys_umask /* 4060 */
266 PTR sys_chroot
267 PTR sys32_ustat
268 PTR sys_dup2
269 PTR sys_getppid
270 PTR sys_getpgrp /* 4065 */
271 PTR sys_setsid
272 PTR sys32_sigaction
273 PTR sys_sgetmask
274 PTR sys_ssetmask
275 PTR sys_setreuid /* 4070 */
276 PTR sys_setregid
277 PTR sys32_sigsuspend
278 PTR compat_sys_sigpending
279 PTR sys_sethostname
280 PTR compat_sys_setrlimit /* 4075 */
281 PTR compat_sys_getrlimit
282 PTR compat_sys_getrusage
283 PTR sys32_gettimeofday
284 PTR sys32_settimeofday
285 PTR sys_getgroups /* 4080 */
286 PTR sys_setgroups
287 PTR sys_ni_syscall /* old_select */
288 PTR sys_symlink
289 PTR sys_ni_syscall /* was sys_lstat */
290 PTR sys_readlink /* 4085 */
291 PTR sys_uselib
292 PTR sys_swapon
293 PTR sys_reboot
294 PTR sys32_readdir
295 PTR old_mmap /* 4090 */
296 PTR sys_munmap
297 PTR sys_truncate
298 PTR sys_ftruncate
299 PTR sys_fchmod
300 PTR sys_fchown /* 4095 */
301 PTR sys_getpriority
302 PTR sys_setpriority
303 PTR sys_ni_syscall
304 PTR compat_sys_statfs
305 PTR compat_sys_fstatfs /* 4100 */
306 PTR sys_ni_syscall /* sys_ioperm */
307 PTR sys32_socketcall
308 PTR sys_syslog
309 PTR compat_sys_setitimer
310 PTR compat_sys_getitimer /* 4105 */
311 PTR compat_sys_newstat
312 PTR compat_sys_newlstat
313 PTR compat_sys_newfstat
314 PTR sys_uname
315 PTR sys_ni_syscall /* sys_ioperm *//* 4110 */
316 PTR sys_vhangup
317 PTR sys_ni_syscall /* was sys_idle */
318 PTR sys_ni_syscall /* sys_vm86 */
319 PTR sys32_wait4
320 PTR sys_swapoff /* 4115 */
321 PTR sys32_sysinfo
322 PTR sys32_ipc
323 PTR sys_fsync
324 PTR sys32_sigreturn
325 PTR sys_clone /* 4120 */
326 PTR sys_setdomainname
327 PTR sys32_newuname
328 PTR sys_ni_syscall /* sys_modify_ldt */
329 PTR sys32_adjtimex
330 PTR sys_mprotect /* 4125 */
331 PTR compat_sys_sigprocmask
332 PTR sys_ni_syscall /* was creat_module */
333 PTR sys_init_module
334 PTR sys_delete_module
335 PTR sys_ni_syscall /* 4130, get_kernel_syms */
336 PTR sys_quotactl
337 PTR sys_getpgid
338 PTR sys_fchdir
339 PTR sys_bdflush
340 PTR sys_sysfs /* 4135 */
341 PTR sys32_personality
342 PTR sys_ni_syscall /* for afs_syscall */
343 PTR sys_setfsuid
344 PTR sys_setfsgid
345 PTR sys32_llseek /* 4140 */
346 PTR sys32_getdents
347 PTR compat_sys_select
348 PTR sys_flock
349 PTR sys_msync
350 PTR compat_sys_readv /* 4145 */
351 PTR compat_sys_writev
352 PTR sys_cacheflush
353 PTR sys_cachectl
354 PTR sys_sysmips
355 PTR sys_ni_syscall /* 4150 */
356 PTR sys_getsid
357 PTR sys_fdatasync
358 PTR sys32_sysctl
359 PTR sys_mlock
360 PTR sys_munlock /* 4155 */
361 PTR sys_mlockall
362 PTR sys_munlockall
363 PTR sys_sched_setparam
364 PTR sys_sched_getparam
365 PTR sys_sched_setscheduler /* 4160 */
366 PTR sys_sched_getscheduler
367 PTR sys_sched_yield
368 PTR sys_sched_get_priority_max
369 PTR sys_sched_get_priority_min
370 PTR sys32_sched_rr_get_interval /* 4165 */
371 PTR compat_sys_nanosleep
372 PTR sys_mremap
373 PTR sys_accept
374 PTR sys_bind
375 PTR sys_connect /* 4170 */
376 PTR sys_getpeername
377 PTR sys_getsockname
378 PTR sys_getsockopt
379 PTR sys_listen
380 PTR sys_recv /* 4175 */
381 PTR sys_recvfrom
382 PTR compat_sys_recvmsg
383 PTR sys_send
384 PTR compat_sys_sendmsg
385 PTR sys_sendto /* 4180 */
386 PTR compat_sys_setsockopt
387 PTR sys_shutdown
388 PTR sys_socket
389 PTR sys_socketpair
390 PTR sys_setresuid /* 4185 */
391 PTR sys_getresuid
392 PTR sys_ni_syscall /* was query_module */
393 PTR sys_poll
394 PTR sys_nfsservctl
395 PTR sys_setresgid /* 4190 */
396 PTR sys_getresgid
397 PTR sys_prctl
398 PTR sys32_rt_sigreturn
399 PTR sys32_rt_sigaction
400 PTR sys32_rt_sigprocmask /* 4195 */
401 PTR sys32_rt_sigpending
402 PTR compat_sys_rt_sigtimedwait
403 PTR sys32_rt_sigqueueinfo
404 PTR sys32_rt_sigsuspend
405 PTR sys32_pread /* 4200 */
406 PTR sys32_pwrite
407 PTR sys_chown
408 PTR sys_getcwd
409 PTR sys_capget
410 PTR sys_capset /* 4205 */
411 PTR sys32_sigaltstack
412 PTR sys32_sendfile
413 PTR sys_ni_syscall
414 PTR sys_ni_syscall
415 PTR sys32_mmap2 /* 4210 */
416 PTR sys32_truncate64
417 PTR sys32_ftruncate64
418 PTR sys_newstat
419 PTR sys_newlstat
420 PTR sys_newfstat /* 4215 */
421 PTR sys_pivot_root
422 PTR sys_mincore
423 PTR sys_madvise
424 PTR sys_getdents64
425 PTR compat_sys_fcntl64 /* 4220 */
426 PTR sys_ni_syscall
427 PTR sys_gettid
428 PTR sys32_readahead
429 PTR sys_setxattr
430 PTR sys_lsetxattr /* 4225 */
431 PTR sys_fsetxattr
432 PTR sys_getxattr
433 PTR sys_lgetxattr
434 PTR sys_fgetxattr
435 PTR sys_listxattr /* 4230 */
436 PTR sys_llistxattr
437 PTR sys_flistxattr
438 PTR sys_removexattr
439 PTR sys_lremovexattr
440 PTR sys_fremovexattr /* 4235 */
441 PTR sys_tkill
442 PTR sys_sendfile64
443 PTR compat_sys_futex
444 PTR compat_sys_sched_setaffinity
445 PTR compat_sys_sched_getaffinity /* 4240 */
446 PTR sys_io_setup
447 PTR sys_io_destroy
448 PTR sys_io_getevents
449 PTR sys_io_submit
450 PTR sys_io_cancel /* 4245 */
451 PTR sys_exit_group
452 PTR sys_lookup_dcookie
453 PTR sys_epoll_create
454 PTR sys_epoll_ctl
455 PTR sys_epoll_wait /* 4250 */
456 PTR sys_remap_file_pages
457 PTR sys_set_tid_address
458 PTR sys_restart_syscall
459 PTR sys_fadvise64_64
460 PTR compat_sys_statfs64 /* 4255 */
461 PTR compat_sys_fstatfs64
462 PTR sys_timer_create
463 PTR compat_sys_timer_settime
464 PTR compat_sys_timer_gettime
465 PTR sys_timer_getoverrun /* 4260 */
466 PTR sys_timer_delete
467 PTR compat_sys_clock_settime
468 PTR compat_sys_clock_gettime
469 PTR compat_sys_clock_getres
470 PTR compat_sys_clock_nanosleep /* 4265 */
471 PTR sys_tgkill
472 PTR compat_sys_utimes
473 PTR sys_ni_syscall /* sys_mbind */
474 PTR sys_ni_syscall /* sys_get_mempolicy */
475 PTR sys_ni_syscall /* 4270 sys_set_mempolicy */
476 PTR compat_sys_mq_open
477 PTR sys_mq_unlink
478 PTR compat_sys_mq_timedsend
479 PTR compat_sys_mq_timedreceive
480 PTR compat_sys_mq_notify /* 4275 */
481 PTR compat_sys_mq_getsetattr
482 PTR sys_ni_syscall /* sys_vserver */
483 PTR sys_waitid
484 PTR sys_ni_syscall /* available, was setaltroot */
485 PTR sys_add_key /* 4280 */
486 PTR sys_request_key
487 PTR sys_keyctl
488 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c
new file mode 100644
index 000000000000..9c40fe5a8e8d
--- /dev/null
+++ b/arch/mips/kernel/semaphore.c
@@ -0,0 +1,164 @@
1/*
2 * MIPS-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
13 * to eliminate the SMP races in the old version between the updates
14 * of `count' and `waking'. Now we use negative `count' values to
15 * indicate that some process(es) are waiting for the semaphore.
16 */
17
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/init.h>
21#include <asm/atomic.h>
22#include <asm/cpu-features.h>
23#include <asm/errno.h>
24#include <asm/semaphore.h>
25#include <asm/war.h>
26/*
27 * Atomically update sem->count.
28 * This does the equivalent of the following:
29 *
30 * old_count = sem->count;
31 * tmp = MAX(old_count, 0) + incr;
32 * sem->count = tmp;
33 * return old_count;
34 *
35 * On machines without lld/scd we need a spinlock to make the manipulation of
36 * sem->count and sem->waking atomic. Scalability isn't an issue because
37 * this lock is used on UP only so it's just an empty variable.
38 */
39static inline int __sem_update_count(struct semaphore *sem, int incr)
40{
41 int old_count, tmp;
42
43 if (cpu_has_llsc && R10000_LLSC_WAR) {
44 __asm__ __volatile__(
45 "1: ll %0, %2 \n"
46 " sra %1, %0, 31 \n"
47 " not %1 \n"
48 " and %1, %0, %1 \n"
49 " add %1, %1, %3 \n"
50 " sc %1, %2 \n"
51 " beqzl %1, 1b \n"
52 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
53 : "r" (incr), "m" (sem->count));
54 } else if (cpu_has_llsc) {
55 __asm__ __volatile__(
56 "1: ll %0, %2 \n"
57 " sra %1, %0, 31 \n"
58 " not %1 \n"
59 " and %1, %0, %1 \n"
60 " add %1, %1, %3 \n"
61 " sc %1, %2 \n"
62 " beqz %1, 1b \n"
63 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
64 : "r" (incr), "m" (sem->count));
65 } else {
66 static DEFINE_SPINLOCK(semaphore_lock);
67 unsigned long flags;
68
69 spin_lock_irqsave(&semaphore_lock, flags);
70 old_count = atomic_read(&sem->count);
71 tmp = max_t(int, old_count, 0) + incr;
72 atomic_set(&sem->count, tmp);
73 spin_unlock_irqrestore(&semaphore_lock, flags);
74 }
75
76 return old_count;
77}
78
79void __up(struct semaphore *sem)
80{
81 /*
82 * Note that we incremented count in up() before we came here,
83 * but that was ineffective since the result was <= 0, and
84 * any negative value of count is equivalent to 0.
85 * This ends up setting count to 1, unless count is now > 0
86 * (i.e. because some other cpu has called up() in the meantime),
87 * in which case we just increment count.
88 */
89 __sem_update_count(sem, 1);
90 wake_up(&sem->wait);
91}
92
93EXPORT_SYMBOL(__up);
94
95/*
96 * Note that when we come in to __down or __down_interruptible,
97 * we have already decremented count, but that decrement was
98 * ineffective since the result was < 0, and any negative value
99 * of count is equivalent to 0.
100 * Thus it is only when we decrement count from some value > 0
101 * that we have actually got the semaphore.
102 */
103void __sched __down(struct semaphore *sem)
104{
105 struct task_struct *tsk = current;
106 DECLARE_WAITQUEUE(wait, tsk);
107
108 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
109 add_wait_queue_exclusive(&sem->wait, &wait);
110
111 /*
112 * Try to get the semaphore. If the count is > 0, then we've
113 * got the semaphore; we decrement count and exit the loop.
114 * If the count is 0 or negative, we set it to -1, indicating
115 * that we are asleep, and then sleep.
116 */
117 while (__sem_update_count(sem, -1) <= 0) {
118 schedule();
119 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
120 }
121 remove_wait_queue(&sem->wait, &wait);
122 __set_task_state(tsk, TASK_RUNNING);
123
124 /*
125 * If there are any more sleepers, wake one of them up so
126 * that it can either get the semaphore, or set count to -1
127 * indicating that there are still processes sleeping.
128 */
129 wake_up(&sem->wait);
130}
131
132EXPORT_SYMBOL(__down);
133
134int __sched __down_interruptible(struct semaphore * sem)
135{
136 int retval = 0;
137 struct task_struct *tsk = current;
138 DECLARE_WAITQUEUE(wait, tsk);
139
140 __set_task_state(tsk, TASK_INTERRUPTIBLE);
141 add_wait_queue_exclusive(&sem->wait, &wait);
142
143 while (__sem_update_count(sem, -1) <= 0) {
144 if (signal_pending(current)) {
145 /*
146 * A signal is pending - give up trying.
147 * Set sem->count to 0 if it is negative,
148 * since we are no longer sleeping.
149 */
150 __sem_update_count(sem, 0);
151 retval = -EINTR;
152 break;
153 }
154 schedule();
155 set_task_state(tsk, TASK_INTERRUPTIBLE);
156 }
157 remove_wait_queue(&sem->wait, &wait);
158 __set_task_state(tsk, TASK_RUNNING);
159
160 wake_up(&sem->wait);
161 return retval;
162}
163
164EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
new file mode 100644
index 000000000000..6018ca25aceb
--- /dev/null
+++ b/arch/mips/kernel/setup.c
@@ -0,0 +1,571 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000 2001, 2002 Maciej W. Rozycki
12 */
13#include <linux/config.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/ioport.h>
17#include <linux/sched.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/stddef.h>
22#include <linux/string.h>
23#include <linux/unistd.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/utsname.h>
27#include <linux/a.out.h>
28#include <linux/tty.h>
29#include <linux/bootmem.h>
30#include <linux/initrd.h>
31#include <linux/major.h>
32#include <linux/kdev_t.h>
33#include <linux/root_dev.h>
34#include <linux/highmem.h>
35#include <linux/console.h>
36
37#include <asm/addrspace.h>
38#include <asm/bootinfo.h>
39#include <asm/cpu.h>
40#include <asm/sections.h>
41#include <asm/setup.h>
42#include <asm/system.h>
43
44struct cpuinfo_mips cpu_data[NR_CPUS];
45
46EXPORT_SYMBOL(cpu_data);
47
48#ifdef CONFIG_VT
49struct screen_info screen_info;
50#endif
51
52/*
53 * Despite it's name this variable is even if we don't have PCI
54 */
55unsigned int PCI_DMA_BUS_IS_PHYS;
56
57EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
58
59/*
60 * Setup information
61 *
62 * These are initialized so they are in the .data section
63 */
64unsigned long mips_machtype = MACH_UNKNOWN;
65unsigned long mips_machgroup = MACH_GROUP_UNKNOWN;
66
67EXPORT_SYMBOL(mips_machtype);
68EXPORT_SYMBOL(mips_machgroup);
69
70struct boot_mem_map boot_mem_map;
71
72static char command_line[CL_SIZE];
73 char arcs_cmdline[CL_SIZE]=CONFIG_CMDLINE;
74
75/*
76 * mips_io_port_base is the begin of the address space to which x86 style
77 * I/O ports are mapped.
78 */
79const unsigned long mips_io_port_base = -1;
80EXPORT_SYMBOL(mips_io_port_base);
81
82/*
83 * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
84 * for the processor.
85 */
86unsigned long isa_slot_offset;
87EXPORT_SYMBOL(isa_slot_offset);
88
89static struct resource code_resource = { .name = "Kernel code", };
90static struct resource data_resource = { .name = "Kernel data", };
91
92void __init add_memory_region(phys_t start, phys_t size, long type)
93{
94 int x = boot_mem_map.nr_map;
95 struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
96
97 /*
98 * Try to merge with previous entry if any. This is far less than
99 * perfect but is sufficient for most real world cases.
100 */
101 if (x && prev->addr + prev->size == start && prev->type == type) {
102 prev->size += size;
103 return;
104 }
105
106 if (x == BOOT_MEM_MAP_MAX) {
107 printk("Ooops! Too many entries in the memory map!\n");
108 return;
109 }
110
111 boot_mem_map.map[x].addr = start;
112 boot_mem_map.map[x].size = size;
113 boot_mem_map.map[x].type = type;
114 boot_mem_map.nr_map++;
115}
116
117static void __init print_memory_map(void)
118{
119 int i;
120 const int field = 2 * sizeof(unsigned long);
121
122 for (i = 0; i < boot_mem_map.nr_map; i++) {
123 printk(" memory: %0*Lx @ %0*Lx ",
124 field, (unsigned long long) boot_mem_map.map[i].size,
125 field, (unsigned long long) boot_mem_map.map[i].addr);
126
127 switch (boot_mem_map.map[i].type) {
128 case BOOT_MEM_RAM:
129 printk("(usable)\n");
130 break;
131 case BOOT_MEM_ROM_DATA:
132 printk("(ROM data)\n");
133 break;
134 case BOOT_MEM_RESERVED:
135 printk("(reserved)\n");
136 break;
137 default:
138 printk("type %lu\n", boot_mem_map.map[i].type);
139 break;
140 }
141 }
142}
143
144static inline void parse_cmdline_early(void)
145{
146 char c = ' ', *to = command_line, *from = saved_command_line;
147 unsigned long start_at, mem_size;
148 int len = 0;
149 int usermem = 0;
150
151 printk("Determined physical RAM map:\n");
152 print_memory_map();
153
154 for (;;) {
155 /*
156 * "mem=XXX[kKmM]" defines a memory region from
157 * 0 to <XXX>, overriding the determined size.
158 * "mem=XXX[KkmM]@YYY[KkmM]" defines a memory region from
159 * <YYY> to <YYY>+<XXX>, overriding the determined size.
160 */
161 if (c == ' ' && !memcmp(from, "mem=", 4)) {
162 if (to != command_line)
163 to--;
164 /*
165 * If a user specifies memory size, we
166 * blow away any automatically generated
167 * size.
168 */
169 if (usermem == 0) {
170 boot_mem_map.nr_map = 0;
171 usermem = 1;
172 }
173 mem_size = memparse(from + 4, &from);
174 if (*from == '@')
175 start_at = memparse(from + 1, &from);
176 else
177 start_at = 0;
178 add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
179 }
180 c = *(from++);
181 if (!c)
182 break;
183 if (CL_SIZE <= ++len)
184 break;
185 *(to++) = c;
186 }
187 *to = '\0';
188
189 if (usermem) {
190 printk("User-defined physical RAM map:\n");
191 print_memory_map();
192 }
193}
194
195static inline int parse_rd_cmdline(unsigned long* rd_start, unsigned long* rd_end)
196{
197 /*
198 * "rd_start=0xNNNNNNNN" defines the memory address of an initrd
199 * "rd_size=0xNN" it's size
200 */
201 unsigned long start = 0;
202 unsigned long size = 0;
203 unsigned long end;
204 char cmd_line[CL_SIZE];
205 char *start_str;
206 char *size_str;
207 char *tmp;
208
209 strcpy(cmd_line, command_line);
210 *command_line = 0;
211 tmp = cmd_line;
212 /* Ignore "rd_start=" strings in other parameters. */
213 start_str = strstr(cmd_line, "rd_start=");
214 if (start_str && start_str != cmd_line && *(start_str - 1) != ' ')
215 start_str = strstr(start_str, " rd_start=");
216 while (start_str) {
217 if (start_str != cmd_line)
218 strncat(command_line, tmp, start_str - tmp);
219 start = memparse(start_str + 9, &start_str);
220 tmp = start_str + 1;
221 start_str = strstr(start_str, " rd_start=");
222 }
223 if (*tmp)
224 strcat(command_line, tmp);
225
226 strcpy(cmd_line, command_line);
227 *command_line = 0;
228 tmp = cmd_line;
229 /* Ignore "rd_size" strings in other parameters. */
230 size_str = strstr(cmd_line, "rd_size=");
231 if (size_str && size_str != cmd_line && *(size_str - 1) != ' ')
232 size_str = strstr(size_str, " rd_size=");
233 while (size_str) {
234 if (size_str != cmd_line)
235 strncat(command_line, tmp, size_str - tmp);
236 size = memparse(size_str + 8, &size_str);
237 tmp = size_str + 1;
238 size_str = strstr(size_str, " rd_size=");
239 }
240 if (*tmp)
241 strcat(command_line, tmp);
242
243#ifdef CONFIG_MIPS64
244 /* HACK: Guess if the sign extension was forgotten */
245 if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
246 start |= 0xffffffff00000000;
247#endif
248
249 end = start + size;
250 if (start && end) {
251 *rd_start = start;
252 *rd_end = end;
253 return 1;
254 }
255 return 0;
256}
257
258#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
259#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
260#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
261
262#define MAXMEM HIGHMEM_START
263#define MAXMEM_PFN PFN_DOWN(MAXMEM)
264
265static inline void bootmem_init(void)
266{
267 unsigned long start_pfn;
268 unsigned long reserved_end = (unsigned long)&_end;
269#ifndef CONFIG_SGI_IP27
270 unsigned long first_usable_pfn;
271 unsigned long bootmap_size;
272 int i;
273#endif
274#ifdef CONFIG_BLK_DEV_INITRD
275 int initrd_reserve_bootmem = 0;
276
277 /* Board specific code should have set up initrd_start and initrd_end */
278 ROOT_DEV = Root_RAM0;
279 if (parse_rd_cmdline(&initrd_start, &initrd_end)) {
280 reserved_end = max(reserved_end, initrd_end);
281 initrd_reserve_bootmem = 1;
282 } else {
283 unsigned long tmp;
284 u32 *initrd_header;
285
286 tmp = ((reserved_end + PAGE_SIZE-1) & PAGE_MASK) - sizeof(u32) * 2;
287 if (tmp < reserved_end)
288 tmp += PAGE_SIZE;
289 initrd_header = (u32 *)tmp;
290 if (initrd_header[0] == 0x494E5244) {
291 initrd_start = (unsigned long)&initrd_header[2];
292 initrd_end = initrd_start + initrd_header[1];
293 reserved_end = max(reserved_end, initrd_end);
294 initrd_reserve_bootmem = 1;
295 }
296 }
297#endif /* CONFIG_BLK_DEV_INITRD */
298
299 /*
300 * Partially used pages are not usable - thus
301 * we are rounding upwards.
302 */
303 start_pfn = PFN_UP(CPHYSADDR(reserved_end));
304
305#ifndef CONFIG_SGI_IP27
306 /* Find the highest page frame number we have available. */
307 max_pfn = 0;
308 first_usable_pfn = -1UL;
309 for (i = 0; i < boot_mem_map.nr_map; i++) {
310 unsigned long start, end;
311
312 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
313 continue;
314
315 start = PFN_UP(boot_mem_map.map[i].addr);
316 end = PFN_DOWN(boot_mem_map.map[i].addr
317 + boot_mem_map.map[i].size);
318
319 if (start >= end)
320 continue;
321 if (end > max_pfn)
322 max_pfn = end;
323 if (start < first_usable_pfn) {
324 if (start > start_pfn) {
325 first_usable_pfn = start;
326 } else if (end > start_pfn) {
327 first_usable_pfn = start_pfn;
328 }
329 }
330 }
331
332 /*
333 * Determine low and high memory ranges
334 */
335 max_low_pfn = max_pfn;
336 if (max_low_pfn > MAXMEM_PFN) {
337 max_low_pfn = MAXMEM_PFN;
338#ifndef CONFIG_HIGHMEM
339 /* Maximum memory usable is what is directly addressable */
340 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
341 MAXMEM >> 20);
342 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
343#endif
344 }
345
346#ifdef CONFIG_HIGHMEM
347 /*
348 * Crude, we really should make a better attempt at detecting
349 * highstart_pfn
350 */
351 highstart_pfn = highend_pfn = max_pfn;
352 if (max_pfn > MAXMEM_PFN) {
353 highstart_pfn = MAXMEM_PFN;
354 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
355 (highend_pfn - highstart_pfn) >> (20 - PAGE_SHIFT));
356 }
357#endif
358
359 /* Initialize the boot-time allocator with low memory only. */
360 bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn);
361
362 /*
363 * Register fully available low RAM pages with the bootmem allocator.
364 */
365 for (i = 0; i < boot_mem_map.nr_map; i++) {
366 unsigned long curr_pfn, last_pfn, size;
367
368 /*
369 * Reserve usable memory.
370 */
371 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
372 continue;
373
374 /*
375 * We are rounding up the start address of usable memory:
376 */
377 curr_pfn = PFN_UP(boot_mem_map.map[i].addr);
378 if (curr_pfn >= max_low_pfn)
379 continue;
380 if (curr_pfn < start_pfn)
381 curr_pfn = start_pfn;
382
383 /*
384 * ... and at the end of the usable range downwards:
385 */
386 last_pfn = PFN_DOWN(boot_mem_map.map[i].addr
387 + boot_mem_map.map[i].size);
388
389 if (last_pfn > max_low_pfn)
390 last_pfn = max_low_pfn;
391
392 /*
393 * Only register lowmem part of lowmem segment with bootmem.
394 */
395 size = last_pfn - curr_pfn;
396 if (curr_pfn > PFN_DOWN(HIGHMEM_START))
397 continue;
398 if (curr_pfn + size - 1 > PFN_DOWN(HIGHMEM_START))
399 size = PFN_DOWN(HIGHMEM_START) - curr_pfn;
400 if (!size)
401 continue;
402
403 /*
404 * ... finally, did all the rounding and playing
405 * around just make the area go away?
406 */
407 if (last_pfn <= curr_pfn)
408 continue;
409
410 /* Register lowmem ranges */
411 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
412 }
413
414 /* Reserve the bootmap memory. */
415 reserve_bootmem(PFN_PHYS(first_usable_pfn), bootmap_size);
416#endif /* CONFIG_SGI_IP27 */
417
418#ifdef CONFIG_BLK_DEV_INITRD
419 initrd_below_start_ok = 1;
420 if (initrd_start) {
421 unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start);
422 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
423 (void *)initrd_start, initrd_size);
424
425 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
426 printk("initrd extends beyond end of memory "
427 "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
428 sizeof(long) * 2,
429 (unsigned long long)CPHYSADDR(initrd_end),
430 sizeof(long) * 2,
431 (unsigned long long)PFN_PHYS(max_low_pfn));
432 initrd_start = initrd_end = 0;
433 initrd_reserve_bootmem = 0;
434 }
435
436 if (initrd_reserve_bootmem)
437 reserve_bootmem(CPHYSADDR(initrd_start), initrd_size);
438 }
439#endif /* CONFIG_BLK_DEV_INITRD */
440}
441
442static inline void resource_init(void)
443{
444 int i;
445
446#if defined(CONFIG_MIPS64) && !defined(CONFIG_BUILD_ELF64)
447 /*
448 * The 64bit code in 32bit object format trick can't represent
449 * 64bit wide relocations for linker script symbols.
450 */
451 code_resource.start = CPHYSADDR(&_text);
452 code_resource.end = CPHYSADDR(&_etext) - 1;
453 data_resource.start = CPHYSADDR(&_etext);
454 data_resource.end = CPHYSADDR(&_edata) - 1;
455#else
456 code_resource.start = virt_to_phys(&_text);
457 code_resource.end = virt_to_phys(&_etext) - 1;
458 data_resource.start = virt_to_phys(&_etext);
459 data_resource.end = virt_to_phys(&_edata) - 1;
460#endif
461
462 /*
463 * Request address space for all standard RAM.
464 */
465 for (i = 0; i < boot_mem_map.nr_map; i++) {
466 struct resource *res;
467 unsigned long start, end;
468
469 start = boot_mem_map.map[i].addr;
470 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
471 if (start >= MAXMEM)
472 continue;
473 if (end >= MAXMEM)
474 end = MAXMEM - 1;
475
476 res = alloc_bootmem(sizeof(struct resource));
477 switch (boot_mem_map.map[i].type) {
478 case BOOT_MEM_RAM:
479 case BOOT_MEM_ROM_DATA:
480 res->name = "System RAM";
481 break;
482 case BOOT_MEM_RESERVED:
483 default:
484 res->name = "reserved";
485 }
486
487 res->start = start;
488 res->end = end;
489
490 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
491 request_resource(&iomem_resource, res);
492
493 /*
494 * We don't know which RAM region contains kernel data,
495 * so we try it repeatedly and let the resource manager
496 * test it.
497 */
498 request_resource(res, &code_resource);
499 request_resource(res, &data_resource);
500 }
501}
502
503#undef PFN_UP
504#undef PFN_DOWN
505#undef PFN_PHYS
506
507#undef MAXMEM
508#undef MAXMEM_PFN
509
510static int __initdata earlyinit_debug;
511
512static int __init earlyinit_debug_setup(char *str)
513{
514 earlyinit_debug = 1;
515 return 1;
516}
517__setup("earlyinit_debug", earlyinit_debug_setup);
518
519extern initcall_t __earlyinitcall_start, __earlyinitcall_end;
520
521static void __init do_earlyinitcalls(void)
522{
523 initcall_t *call, *start, *end;
524
525 start = &__earlyinitcall_start;
526 end = &__earlyinitcall_end;
527
528 for (call = start; call < end; call++) {
529 if (earlyinit_debug)
530 printk("calling earlyinitcall 0x%p\n", *call);
531
532 (*call)();
533 }
534}
535
536void __init setup_arch(char **cmdline_p)
537{
538 cpu_probe();
539 prom_init();
540 cpu_report();
541
542#if defined(CONFIG_VT)
543#if defined(CONFIG_VGA_CONSOLE)
544 conswitchp = &vga_con;
545#elif defined(CONFIG_DUMMY_CONSOLE)
546 conswitchp = &dummy_con;
547#endif
548#endif
549
550 /* call board setup routine */
551 do_earlyinitcalls();
552
553 strlcpy(command_line, arcs_cmdline, sizeof(command_line));
554 strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
555
556 *cmdline_p = command_line;
557
558 parse_cmdline_early();
559 bootmem_init();
560 paging_init();
561 resource_init();
562}
563
564int __init fpu_disable(char *s)
565{
566 cpu_data[0].options &= ~MIPS_CPU_FPU;
567
568 return 1;
569}
570
571__setup("nofpu", fpu_disable);
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
new file mode 100644
index 000000000000..f9234df53253
--- /dev/null
+++ b/arch/mips/kernel/signal-common.h
@@ -0,0 +1,137 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10
11static inline int
12setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
13{
14 int err = 0;
15
16 err |= __put_user(regs->cp0_epc, &sc->sc_pc);
17 err |= __put_user(regs->cp0_status, &sc->sc_status);
18
19#define save_gp_reg(i) do { \
20 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \
21} while(0)
22 __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2);
23 save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6);
24 save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10);
25 save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14);
26 save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18);
27 save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22);
28 save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26);
29 save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30);
30 save_gp_reg(31);
31#undef save_gp_reg
32
33 err |= __put_user(regs->hi, &sc->sc_mdhi);
34 err |= __put_user(regs->lo, &sc->sc_mdlo);
35 err |= __put_user(regs->cp0_cause, &sc->sc_cause);
36 err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr);
37
38 err |= __put_user(!!used_math(), &sc->sc_used_math);
39
40 if (!used_math())
41 goto out;
42
43 /*
44 * Save FPU state to signal context. Signal handler will "inherit"
45 * current FPU state.
46 */
47 preempt_disable();
48
49 if (!is_fpu_owner()) {
50 own_fpu();
51 restore_fp(current);
52 }
53 err |= save_fp_context(sc);
54
55 preempt_enable();
56
57out:
58 return err;
59}
60
61static inline int
62restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
63{
64 int err = 0;
65 unsigned int used_math;
66
67 /* Always make any pending restarted system calls return -EINTR */
68 current_thread_info()->restart_block.fn = do_no_restart_syscall;
69
70 err |= __get_user(regs->cp0_epc, &sc->sc_pc);
71 err |= __get_user(regs->hi, &sc->sc_mdhi);
72 err |= __get_user(regs->lo, &sc->sc_mdlo);
73
74#define restore_gp_reg(i) do { \
75 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
76} while(0)
77 restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3);
78 restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6);
79 restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9);
80 restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12);
81 restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15);
82 restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18);
83 restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21);
84 restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24);
85 restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27);
86 restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30);
87 restore_gp_reg(31);
88#undef restore_gp_reg
89
90 err |= __get_user(used_math, &sc->sc_used_math);
91 conditional_used_math(used_math);
92
93 preempt_disable();
94
95 if (used_math()) {
96 /* restore fpu context if we have used it before */
97 own_fpu();
98 err |= restore_fp_context(sc);
99 } else {
100 /* signal handler may have used FPU. Give it up. */
101 lose_fpu();
102 }
103
104 preempt_enable();
105
106 return err;
107}
108
109/*
110 * Determine which stack to use..
111 */
112static inline void *
113get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
114{
115 unsigned long sp, almask;
116
117 /* Default to using normal stack */
118 sp = regs->regs[29];
119
120 /*
121 * FPU emulator may have it's own trampoline active just
122 * above the user stack, 16-bytes before the next lowest
123 * 16 byte boundary. Try to avoid trashing it.
124 */
125 sp -= 32;
126
127 /* This is the X/Open sanctioned signal stack switching. */
128 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
129 sp = current->sas_ss_sp + current->sas_ss_size;
130
131 if (PLAT_TRAMPOLINE_STUFF_LINE)
132 almask = ~(PLAT_TRAMPOLINE_STUFF_LINE - 1);
133 else
134 almask = ALMASK;
135
136 return (void *)((sp - frame_size) & almask);
137}
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
new file mode 100644
index 000000000000..508026ae5842
--- /dev/null
+++ b/arch/mips/kernel/signal.c
@@ -0,0 +1,517 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10#include <linux/config.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/personality.h>
14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/kernel.h>
17#include <linux/signal.h>
18#include <linux/errno.h>
19#include <linux/wait.h>
20#include <linux/ptrace.h>
21#include <linux/unistd.h>
22#include <linux/compiler.h>
23
24#include <asm/asm.h>
25#include <linux/bitops.h>
26#include <asm/cacheflush.h>
27#include <asm/fpu.h>
28#include <asm/sim.h>
29#include <asm/uaccess.h>
30#include <asm/ucontext.h>
31#include <asm/cpu-features.h>
32
33#include "signal-common.h"
34
35#define DEBUG_SIG 0
36
37#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
38
39static int do_signal(sigset_t *oldset, struct pt_regs *regs);
40
41/*
42 * Atomically swap in the new signal mask, and wait for a signal.
43 */
44
45#ifdef CONFIG_TRAD_SIGNALS
46save_static_function(sys_sigsuspend);
47__attribute_used__ noinline static int
48_sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
49{
50 sigset_t *uset, saveset, newset;
51
52 uset = (sigset_t *) regs.regs[4];
53 if (copy_from_user(&newset, uset, sizeof(sigset_t)))
54 return -EFAULT;
55 sigdelsetmask(&newset, ~_BLOCKABLE);
56
57 spin_lock_irq(&current->sighand->siglock);
58 saveset = current->blocked;
59 current->blocked = newset;
60 recalc_sigpending();
61 spin_unlock_irq(&current->sighand->siglock);
62
63 regs.regs[2] = EINTR;
64 regs.regs[7] = 1;
65 while (1) {
66 current->state = TASK_INTERRUPTIBLE;
67 schedule();
68 if (do_signal(&saveset, &regs))
69 return -EINTR;
70 }
71}
72#endif
73
74save_static_function(sys_rt_sigsuspend);
75__attribute_used__ noinline static int
76_sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
77{
78 sigset_t *unewset, saveset, newset;
79 size_t sigsetsize;
80
81 /* XXX Don't preclude handling different sized sigset_t's. */
82 sigsetsize = regs.regs[5];
83 if (sigsetsize != sizeof(sigset_t))
84 return -EINVAL;
85
86 unewset = (sigset_t *) regs.regs[4];
87 if (copy_from_user(&newset, unewset, sizeof(newset)))
88 return -EFAULT;
89 sigdelsetmask(&newset, ~_BLOCKABLE);
90
91 spin_lock_irq(&current->sighand->siglock);
92 saveset = current->blocked;
93 current->blocked = newset;
94 recalc_sigpending();
95 spin_unlock_irq(&current->sighand->siglock);
96
97 regs.regs[2] = EINTR;
98 regs.regs[7] = 1;
99 while (1) {
100 current->state = TASK_INTERRUPTIBLE;
101 schedule();
102 if (do_signal(&saveset, &regs))
103 return -EINTR;
104 }
105}
106
107#ifdef CONFIG_TRAD_SIGNALS
108asmlinkage int sys_sigaction(int sig, const struct sigaction *act,
109 struct sigaction *oact)
110{
111 struct k_sigaction new_ka, old_ka;
112 int ret;
113 int err = 0;
114
115 if (act) {
116 old_sigset_t mask;
117
118 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
119 return -EFAULT;
120 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
121 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
122 err |= __get_user(mask, &act->sa_mask.sig[0]);
123 if (err)
124 return -EFAULT;
125
126 siginitset(&new_ka.sa.sa_mask, mask);
127 }
128
129 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
130
131 if (!ret && oact) {
132 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
133 return -EFAULT;
134 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
135 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
136 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
137 err |= __put_user(0, &oact->sa_mask.sig[1]);
138 err |= __put_user(0, &oact->sa_mask.sig[2]);
139 err |= __put_user(0, &oact->sa_mask.sig[3]);
140 if (err)
141 return -EFAULT;
142 }
143
144 return ret;
145}
146#endif
147
148asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs)
149{
150 const stack_t *uss = (const stack_t *) regs.regs[4];
151 stack_t *uoss = (stack_t *) regs.regs[5];
152 unsigned long usp = regs.regs[29];
153
154 return do_sigaltstack(uss, uoss, usp);
155}
156
157#if PLAT_TRAMPOLINE_STUFF_LINE
158#define __tramp __attribute__((aligned(PLAT_TRAMPOLINE_STUFF_LINE)))
159#else
160#define __tramp
161#endif
162
163#ifdef CONFIG_TRAD_SIGNALS
164struct sigframe {
165 u32 sf_ass[4]; /* argument save space for o32 */
166 u32 sf_code[2] __tramp; /* signal trampoline */
167 struct sigcontext sf_sc __tramp;
168 sigset_t sf_mask;
169};
170#endif
171
172struct rt_sigframe {
173 u32 rs_ass[4]; /* argument save space for o32 */
174 u32 rs_code[2] __tramp; /* signal trampoline */
175 struct siginfo rs_info __tramp;
176 struct ucontext rs_uc;
177};
178
179#ifdef CONFIG_TRAD_SIGNALS
180save_static_function(sys_sigreturn);
181__attribute_used__ noinline static void
182_sys_sigreturn(nabi_no_regargs struct pt_regs regs)
183{
184 struct sigframe *frame;
185 sigset_t blocked;
186
187 frame = (struct sigframe *) regs.regs[29];
188 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
189 goto badframe;
190 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
191 goto badframe;
192
193 sigdelsetmask(&blocked, ~_BLOCKABLE);
194 spin_lock_irq(&current->sighand->siglock);
195 current->blocked = blocked;
196 recalc_sigpending();
197 spin_unlock_irq(&current->sighand->siglock);
198
199 if (restore_sigcontext(&regs, &frame->sf_sc))
200 goto badframe;
201
202 /*
203 * Don't let your children do this ...
204 */
205 if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
206 do_syscall_trace(&regs, 1);
207 __asm__ __volatile__(
208 "move\t$29, %0\n\t"
209 "j\tsyscall_exit"
210 :/* no outputs */
211 :"r" (&regs));
212 /* Unreached */
213
214badframe:
215 force_sig(SIGSEGV, current);
216}
217#endif
218
219save_static_function(sys_rt_sigreturn);
220__attribute_used__ noinline static void
221_sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
222{
223 struct rt_sigframe *frame;
224 sigset_t set;
225 stack_t st;
226
227 frame = (struct rt_sigframe *) regs.regs[29];
228 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
229 goto badframe;
230 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
231 goto badframe;
232
233 sigdelsetmask(&set, ~_BLOCKABLE);
234 spin_lock_irq(&current->sighand->siglock);
235 current->blocked = set;
236 recalc_sigpending();
237 spin_unlock_irq(&current->sighand->siglock);
238
239 if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext))
240 goto badframe;
241
242 if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
243 goto badframe;
244 /* It is more difficult to avoid calling this function than to
245 call it and ignore errors. */
246 do_sigaltstack(&st, NULL, regs.regs[29]);
247
248 /*
249 * Don't let your children do this ...
250 */
251 __asm__ __volatile__(
252 "move\t$29, %0\n\t"
253 "j\tsyscall_exit"
254 :/* no outputs */
255 :"r" (&regs));
256 /* Unreached */
257
258badframe:
259 force_sig(SIGSEGV, current);
260}
261
262#ifdef CONFIG_TRAD_SIGNALS
263static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
264 int signr, sigset_t *set)
265{
266 struct sigframe *frame;
267 int err = 0;
268
269 frame = get_sigframe(ka, regs, sizeof(*frame));
270 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
271 goto give_sigsegv;
272
273 /*
274 * Set up the return code ...
275 *
276 * li v0, __NR_sigreturn
277 * syscall
278 */
279 if (PLAT_TRAMPOLINE_STUFF_LINE)
280 __clear_user(frame->sf_code, PLAT_TRAMPOLINE_STUFF_LINE);
281 err |= __put_user(0x24020000 + __NR_sigreturn, frame->sf_code + 0);
282 err |= __put_user(0x0000000c , frame->sf_code + 1);
283 flush_cache_sigtramp((unsigned long) frame->sf_code);
284
285 err |= setup_sigcontext(regs, &frame->sf_sc);
286 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
287 if (err)
288 goto give_sigsegv;
289
290 /*
291 * Arguments to signal handler:
292 *
293 * a0 = signal number
294 * a1 = 0 (should be cause)
295 * a2 = pointer to struct sigcontext
296 *
297 * $25 and c0_epc point to the signal handler, $29 points to the
298 * struct sigframe.
299 */
300 regs->regs[ 4] = signr;
301 regs->regs[ 5] = 0;
302 regs->regs[ 6] = (unsigned long) &frame->sf_sc;
303 regs->regs[29] = (unsigned long) frame;
304 regs->regs[31] = (unsigned long) frame->sf_code;
305 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
306
307#if DEBUG_SIG
308 printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
309 current->comm, current->pid,
310 frame, regs->cp0_epc, frame->regs[31]);
311#endif
312 return;
313
314give_sigsegv:
315 force_sigsegv(signr, current);
316}
317#endif
318
319static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
320 int signr, sigset_t *set, siginfo_t *info)
321{
322 struct rt_sigframe *frame;
323 int err = 0;
324
325 frame = get_sigframe(ka, regs, sizeof(*frame));
326 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
327 goto give_sigsegv;
328
329 /*
330 * Set up the return code ...
331 *
332 * li v0, __NR_rt_sigreturn
333 * syscall
334 */
335 if (PLAT_TRAMPOLINE_STUFF_LINE)
336 __clear_user(frame->rs_code, PLAT_TRAMPOLINE_STUFF_LINE);
337 err |= __put_user(0x24020000 + __NR_rt_sigreturn, frame->rs_code + 0);
338 err |= __put_user(0x0000000c , frame->rs_code + 1);
339 flush_cache_sigtramp((unsigned long) frame->rs_code);
340
341 /* Create siginfo. */
342 err |= copy_siginfo_to_user(&frame->rs_info, info);
343
344 /* Create the ucontext. */
345 err |= __put_user(0, &frame->rs_uc.uc_flags);
346 err |= __put_user(0, &frame->rs_uc.uc_link);
347 err |= __put_user((void *)current->sas_ss_sp,
348 &frame->rs_uc.uc_stack.ss_sp);
349 err |= __put_user(sas_ss_flags(regs->regs[29]),
350 &frame->rs_uc.uc_stack.ss_flags);
351 err |= __put_user(current->sas_ss_size,
352 &frame->rs_uc.uc_stack.ss_size);
353 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
354 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
355
356 if (err)
357 goto give_sigsegv;
358
359 /*
360 * Arguments to signal handler:
361 *
362 * a0 = signal number
363 * a1 = 0 (should be cause)
364 * a2 = pointer to ucontext
365 *
366 * $25 and c0_epc point to the signal handler, $29 points to
367 * the struct rt_sigframe.
368 */
369 regs->regs[ 4] = signr;
370 regs->regs[ 5] = (unsigned long) &frame->rs_info;
371 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
372 regs->regs[29] = (unsigned long) frame;
373 regs->regs[31] = (unsigned long) frame->rs_code;
374 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
375
376#if DEBUG_SIG
377 printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
378 current->comm, current->pid,
379 frame, regs->cp0_epc, regs->regs[31]);
380#endif
381 return;
382
383give_sigsegv:
384 force_sigsegv(signr, current);
385}
386
387extern void setup_rt_frame_n32(struct k_sigaction * ka,
388 struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info);
389
390static inline void handle_signal(unsigned long sig, siginfo_t *info,
391 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
392{
393 switch(regs->regs[0]) {
394 case ERESTART_RESTARTBLOCK:
395 case ERESTARTNOHAND:
396 regs->regs[2] = EINTR;
397 break;
398 case ERESTARTSYS:
399 if(!(ka->sa.sa_flags & SA_RESTART)) {
400 regs->regs[2] = EINTR;
401 break;
402 }
403 /* fallthrough */
404 case ERESTARTNOINTR: /* Userland will reload $v0. */
405 regs->regs[7] = regs->regs[26];
406 regs->cp0_epc -= 8;
407 }
408
409 regs->regs[0] = 0; /* Don't deal with this again. */
410
411#ifdef CONFIG_TRAD_SIGNALS
412 if (ka->sa.sa_flags & SA_SIGINFO) {
413#else
414 if (1) {
415#endif
416#ifdef CONFIG_MIPS32_N32
417 if ((current->thread.mflags & MF_ABI_MASK) == MF_N32)
418 setup_rt_frame_n32 (ka, regs, sig, oldset, info);
419 else
420#endif
421 setup_rt_frame(ka, regs, sig, oldset, info);
422 }
423#ifdef CONFIG_TRAD_SIGNALS
424 else
425 setup_frame(ka, regs, sig, oldset);
426#endif
427
428 if (!(ka->sa.sa_flags & SA_NODEFER)) {
429 spin_lock_irq(&current->sighand->siglock);
430 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
431 sigaddset(&current->blocked,sig);
432 recalc_sigpending();
433 spin_unlock_irq(&current->sighand->siglock);
434 }
435}
436
437extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
438extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs);
439
440static int do_signal(sigset_t *oldset, struct pt_regs *regs)
441{
442 struct k_sigaction ka;
443 siginfo_t info;
444 int signr;
445
446#ifdef CONFIG_BINFMT_ELF32
447 if ((current->thread.mflags & MF_ABI_MASK) == MF_O32) {
448 return do_signal32(oldset, regs);
449 }
450#endif
451
452 /*
453 * We want the common case to go fast, which is why we may in certain
454 * cases get here from kernel mode. Just return without doing anything
455 * if so.
456 */
457 if (!user_mode(regs))
458 return 1;
459
460 if (try_to_freeze(0))
461 goto no_signal;
462
463 if (!oldset)
464 oldset = &current->blocked;
465
466 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
467 if (signr > 0) {
468 handle_signal(signr, &info, &ka, oldset, regs);
469 return 1;
470 }
471
472no_signal:
473 /*
474 * Who's code doesn't conform to the restartable syscall convention
475 * dies here!!! The li instruction, a single machine instruction,
476 * must directly be followed by the syscall instruction.
477 */
478 if (regs->regs[0]) {
479 if (regs->regs[2] == ERESTARTNOHAND ||
480 regs->regs[2] == ERESTARTSYS ||
481 regs->regs[2] == ERESTARTNOINTR) {
482 regs->regs[7] = regs->regs[26];
483 regs->cp0_epc -= 8;
484 }
485 if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
486 regs->regs[2] = __NR_restart_syscall;
487 regs->regs[7] = regs->regs[26];
488 regs->cp0_epc -= 4;
489 }
490 }
491 return 0;
492}
493
494/*
495 * notification of userspace execution resumption
496 * - triggered by current->work.notify_resume
497 */
498asmlinkage void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
499 __u32 thread_info_flags)
500{
501 /* deal with pending signal delivery */
502 if (thread_info_flags & _TIF_SIGPENDING) {
503#ifdef CONFIG_BINFMT_ELF32
504 if (likely((current->thread.mflags & MF_ABI_MASK) == MF_O32)) {
505 do_signal32(oldset, regs);
506 return;
507 }
508#endif
509#ifdef CONFIG_BINFMT_IRIX
510 if (unlikely(current->personality != PER_LINUX)) {
511 do_irix_signal(oldset, regs);
512 return;
513 }
514#endif
515 do_signal(oldset, regs);
516 }
517}
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
new file mode 100644
index 000000000000..1f3b19124c01
--- /dev/null
+++ b/arch/mips/kernel/signal32.c
@@ -0,0 +1,905 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <linux/smp.h>
13#include <linux/smp_lock.h>
14#include <linux/kernel.h>
15#include <linux/signal.h>
16#include <linux/syscalls.h>
17#include <linux/errno.h>
18#include <linux/wait.h>
19#include <linux/ptrace.h>
20#include <linux/compat.h>
21#include <linux/suspend.h>
22#include <linux/compiler.h>
23
24#include <asm/asm.h>
25#include <linux/bitops.h>
26#include <asm/cacheflush.h>
27#include <asm/sim.h>
28#include <asm/uaccess.h>
29#include <asm/ucontext.h>
30#include <asm/system.h>
31#include <asm/fpu.h>
32
33#define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3)
34
35typedef struct compat_siginfo {
36 int si_signo;
37 int si_code;
38 int si_errno;
39
40 union {
41 int _pad[SI_PAD_SIZE32];
42
43 /* kill() */
44 struct {
45 compat_pid_t _pid; /* sender's pid */
46 compat_uid_t _uid; /* sender's uid */
47 } _kill;
48
49 /* SIGCHLD */
50 struct {
51 compat_pid_t _pid; /* which child */
52 compat_uid_t _uid; /* sender's uid */
53 int _status; /* exit code */
54 compat_clock_t _utime;
55 compat_clock_t _stime;
56 } _sigchld;
57
58 /* IRIX SIGCHLD */
59 struct {
60 compat_pid_t _pid; /* which child */
61 compat_clock_t _utime;
62 int _status; /* exit code */
63 compat_clock_t _stime;
64 } _irix_sigchld;
65
66 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
67 struct {
68 s32 _addr; /* faulting insn/memory ref. */
69 } _sigfault;
70
71 /* SIGPOLL, SIGXFSZ (To do ...) */
72 struct {
73 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
74 int _fd;
75 } _sigpoll;
76
77 /* POSIX.1b timers */
78 struct {
79 unsigned int _timer1;
80 unsigned int _timer2;
81 } _timer;
82
83 /* POSIX.1b signals */
84 struct {
85 compat_pid_t _pid; /* sender's pid */
86 compat_uid_t _uid; /* sender's uid */
87 compat_sigval_t _sigval;
88 } _rt;
89
90 } _sifields;
91} compat_siginfo_t;
92
93/*
94 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
95 */
96#define __NR_O32_sigreturn 4119
97#define __NR_O32_rt_sigreturn 4193
98#define __NR_O32_restart_syscall 4253
99
100#define DEBUG_SIG 0
101
102#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
103
104extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
105
106/* 32-bit compatibility types */
107
108#define _NSIG_BPW32 32
109#define _NSIG_WORDS32 (_NSIG / _NSIG_BPW32)
110
111typedef struct {
112 unsigned int sig[_NSIG_WORDS32];
113} sigset_t32;
114
115typedef unsigned int __sighandler32_t;
116typedef void (*vfptr_t)(void);
117
118struct sigaction32 {
119 unsigned int sa_flags;
120 __sighandler32_t sa_handler;
121 compat_sigset_t sa_mask;
122};
123
124/* IRIX compatible stack_t */
125typedef struct sigaltstack32 {
126 s32 ss_sp;
127 compat_size_t ss_size;
128 int ss_flags;
129} stack32_t;
130
131struct ucontext32 {
132 u32 uc_flags;
133 s32 uc_link;
134 stack32_t uc_stack;
135 struct sigcontext32 uc_mcontext;
136 sigset_t32 uc_sigmask; /* mask last for extensibility */
137};
138
139extern void __put_sigset_unknown_nsig(void);
140extern void __get_sigset_unknown_nsig(void);
141
142static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t *ubuf)
143{
144 int err = 0;
145
146 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
147 return -EFAULT;
148
149 switch (_NSIG_WORDS) {
150 default:
151 __put_sigset_unknown_nsig();
152 case 2:
153 err |= __put_user (kbuf->sig[1] >> 32, &ubuf->sig[3]);
154 err |= __put_user (kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
155 case 1:
156 err |= __put_user (kbuf->sig[0] >> 32, &ubuf->sig[1]);
157 err |= __put_user (kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
158 }
159
160 return err;
161}
162
163static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t *ubuf)
164{
165 int err = 0;
166 unsigned long sig[4];
167
168 if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf)))
169 return -EFAULT;
170
171 switch (_NSIG_WORDS) {
172 default:
173 __get_sigset_unknown_nsig();
174 case 2:
175 err |= __get_user (sig[3], &ubuf->sig[3]);
176 err |= __get_user (sig[2], &ubuf->sig[2]);
177 kbuf->sig[1] = sig[2] | (sig[3] << 32);
178 case 1:
179 err |= __get_user (sig[1], &ubuf->sig[1]);
180 err |= __get_user (sig[0], &ubuf->sig[0]);
181 kbuf->sig[0] = sig[0] | (sig[1] << 32);
182 }
183
184 return err;
185}
186
187/*
188 * Atomically swap in the new signal mask, and wait for a signal.
189 */
190
191save_static_function(sys32_sigsuspend);
192__attribute_used__ noinline static int
193_sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
194{
195 compat_sigset_t *uset;
196 sigset_t newset, saveset;
197
198 uset = (compat_sigset_t *) regs.regs[4];
199 if (get_sigset(&newset, uset))
200 return -EFAULT;
201 sigdelsetmask(&newset, ~_BLOCKABLE);
202
203 spin_lock_irq(&current->sighand->siglock);
204 saveset = current->blocked;
205 current->blocked = newset;
206 recalc_sigpending();
207 spin_unlock_irq(&current->sighand->siglock);
208
209 regs.regs[2] = EINTR;
210 regs.regs[7] = 1;
211 while (1) {
212 current->state = TASK_INTERRUPTIBLE;
213 schedule();
214 if (do_signal32(&saveset, &regs))
215 return -EINTR;
216 }
217}
218
219save_static_function(sys32_rt_sigsuspend);
220__attribute_used__ noinline static int
221_sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
222{
223 compat_sigset_t *uset;
224 sigset_t newset, saveset;
225 size_t sigsetsize;
226
227 /* XXX Don't preclude handling different sized sigset_t's. */
228 sigsetsize = regs.regs[5];
229 if (sigsetsize != sizeof(compat_sigset_t))
230 return -EINVAL;
231
232 uset = (compat_sigset_t *) regs.regs[4];
233 if (get_sigset(&newset, uset))
234 return -EFAULT;
235 sigdelsetmask(&newset, ~_BLOCKABLE);
236
237 spin_lock_irq(&current->sighand->siglock);
238 saveset = current->blocked;
239 current->blocked = newset;
240 recalc_sigpending();
241 spin_unlock_irq(&current->sighand->siglock);
242
243 regs.regs[2] = EINTR;
244 regs.regs[7] = 1;
245 while (1) {
246 current->state = TASK_INTERRUPTIBLE;
247 schedule();
248 if (do_signal32(&saveset, &regs))
249 return -EINTR;
250 }
251}
252
253asmlinkage int sys32_sigaction(int sig, const struct sigaction32 *act,
254 struct sigaction32 *oact)
255{
256 struct k_sigaction new_ka, old_ka;
257 int ret;
258 int err = 0;
259
260 if (act) {
261 old_sigset_t mask;
262
263 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
264 return -EFAULT;
265 err |= __get_user((u32)(u64)new_ka.sa.sa_handler,
266 &act->sa_handler);
267 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
268 err |= __get_user(mask, &act->sa_mask.sig[0]);
269 if (err)
270 return -EFAULT;
271
272 siginitset(&new_ka.sa.sa_mask, mask);
273 }
274
275 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
276
277 if (!ret && oact) {
278 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
279 return -EFAULT;
280 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
281 err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
282 &oact->sa_handler);
283 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
284 err |= __put_user(0, &oact->sa_mask.sig[1]);
285 err |= __put_user(0, &oact->sa_mask.sig[2]);
286 err |= __put_user(0, &oact->sa_mask.sig[3]);
287 if (err)
288 return -EFAULT;
289 }
290
291 return ret;
292}
293
294asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs)
295{
296 const stack32_t *uss = (const stack32_t *) regs.regs[4];
297 stack32_t *uoss = (stack32_t *) regs.regs[5];
298 unsigned long usp = regs.regs[29];
299 stack_t kss, koss;
300 int ret, err = 0;
301 mm_segment_t old_fs = get_fs();
302 s32 sp;
303
304 if (uss) {
305 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
306 return -EFAULT;
307 err |= __get_user(sp, &uss->ss_sp);
308 kss.ss_sp = (void *) (long) sp;
309 err |= __get_user(kss.ss_size, &uss->ss_size);
310 err |= __get_user(kss.ss_flags, &uss->ss_flags);
311 if (err)
312 return -EFAULT;
313 }
314
315 set_fs (KERNEL_DS);
316 ret = do_sigaltstack(uss ? &kss : NULL , uoss ? &koss : NULL, usp);
317 set_fs (old_fs);
318
319 if (!ret && uoss) {
320 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
321 return -EFAULT;
322 sp = (int) (long) koss.ss_sp;
323 err |= __put_user(sp, &uoss->ss_sp);
324 err |= __put_user(koss.ss_size, &uoss->ss_size);
325 err |= __put_user(koss.ss_flags, &uoss->ss_flags);
326 if (err)
327 return -EFAULT;
328 }
329 return ret;
330}
331
332static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc)
333{
334 int err = 0;
335 __u32 used_math;
336
337 /* Always make any pending restarted system calls return -EINTR */
338 current_thread_info()->restart_block.fn = do_no_restart_syscall;
339
340 err |= __get_user(regs->cp0_epc, &sc->sc_pc);
341 err |= __get_user(regs->hi, &sc->sc_mdhi);
342 err |= __get_user(regs->lo, &sc->sc_mdlo);
343
344#define restore_gp_reg(i) do { \
345 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
346} while(0)
347 restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3);
348 restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6);
349 restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9);
350 restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12);
351 restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15);
352 restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18);
353 restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21);
354 restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24);
355 restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27);
356 restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30);
357 restore_gp_reg(31);
358#undef restore_gp_reg
359
360 err |= __get_user(used_math, &sc->sc_used_math);
361 conditional_used_math(used_math);
362
363 preempt_disable();
364
365 if (used_math()) {
366 /* restore fpu context if we have used it before */
367 own_fpu();
368 err |= restore_fp_context32(sc);
369 } else {
370 /* signal handler may have used FPU. Give it up. */
371 lose_fpu();
372 }
373
374 preempt_enable();
375
376 return err;
377}
378
379struct sigframe {
380 u32 sf_ass[4]; /* argument save space for o32 */
381 u32 sf_code[2]; /* signal trampoline */
382 struct sigcontext32 sf_sc;
383 sigset_t sf_mask;
384};
385
386struct rt_sigframe32 {
387 u32 rs_ass[4]; /* argument save space for o32 */
388 u32 rs_code[2]; /* signal trampoline */
389 compat_siginfo_t rs_info;
390 struct ucontext32 rs_uc;
391};
392
393int copy_siginfo_to_user32(compat_siginfo_t *to, siginfo_t *from)
394{
395 int err;
396
397 if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
398 return -EFAULT;
399
400 /* If you change siginfo_t structure, please be sure
401 this code is fixed accordingly.
402 It should never copy any pad contained in the structure
403 to avoid security leaks, but must copy the generic
404 3 ints plus the relevant union member.
405 This routine must convert siginfo from 64bit to 32bit as well
406 at the same time. */
407 err = __put_user(from->si_signo, &to->si_signo);
408 err |= __put_user(from->si_errno, &to->si_errno);
409 err |= __put_user((short)from->si_code, &to->si_code);
410 if (from->si_code < 0)
411 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
412 else {
413 switch (from->si_code >> 16) {
414 case __SI_CHLD >> 16:
415 err |= __put_user(from->si_utime, &to->si_utime);
416 err |= __put_user(from->si_stime, &to->si_stime);
417 err |= __put_user(from->si_status, &to->si_status);
418 default:
419 err |= __put_user(from->si_pid, &to->si_pid);
420 err |= __put_user(from->si_uid, &to->si_uid);
421 break;
422 case __SI_FAULT >> 16:
423 err |= __put_user((long)from->si_addr, &to->si_addr);
424 break;
425 case __SI_POLL >> 16:
426 err |= __put_user(from->si_band, &to->si_band);
427 err |= __put_user(from->si_fd, &to->si_fd);
428 break;
429 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
430 case __SI_MESGQ >> 16:
431 err |= __put_user(from->si_pid, &to->si_pid);
432 err |= __put_user(from->si_uid, &to->si_uid);
433 err |= __put_user(from->si_int, &to->si_int);
434 break;
435 }
436 }
437 return err;
438}
439
440save_static_function(sys32_sigreturn);
441__attribute_used__ noinline static void
442_sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
443{
444 struct sigframe *frame;
445 sigset_t blocked;
446
447 frame = (struct sigframe *) regs.regs[29];
448 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
449 goto badframe;
450 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
451 goto badframe;
452
453 sigdelsetmask(&blocked, ~_BLOCKABLE);
454 spin_lock_irq(&current->sighand->siglock);
455 current->blocked = blocked;
456 recalc_sigpending();
457 spin_unlock_irq(&current->sighand->siglock);
458
459 if (restore_sigcontext32(&regs, &frame->sf_sc))
460 goto badframe;
461
462 /*
463 * Don't let your children do this ...
464 */
465 if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
466 do_syscall_trace(&regs, 1);
467 __asm__ __volatile__(
468 "move\t$29, %0\n\t"
469 "j\tsyscall_exit"
470 :/* no outputs */
471 :"r" (&regs));
472 /* Unreached */
473
474badframe:
475 force_sig(SIGSEGV, current);
476}
477
478save_static_function(sys32_rt_sigreturn);
479__attribute_used__ noinline static void
480_sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
481{
482 struct rt_sigframe32 *frame;
483 sigset_t set;
484 stack_t st;
485 s32 sp;
486
487 frame = (struct rt_sigframe32 *) regs.regs[29];
488 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
489 goto badframe;
490 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
491 goto badframe;
492
493 sigdelsetmask(&set, ~_BLOCKABLE);
494 spin_lock_irq(&current->sighand->siglock);
495 current->blocked = set;
496 recalc_sigpending();
497 spin_unlock_irq(&current->sighand->siglock);
498
499 if (restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext))
500 goto badframe;
501
502 /* The ucontext contains a stack32_t, so we must convert! */
503 if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
504 goto badframe;
505 st.ss_size = (long) sp;
506 if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size))
507 goto badframe;
508 if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags))
509 goto badframe;
510
511 /* It is more difficult to avoid calling this function than to
512 call it and ignore errors. */
513 do_sigaltstack(&st, NULL, regs.regs[29]);
514
515 /*
516 * Don't let your children do this ...
517 */
518 __asm__ __volatile__(
519 "move\t$29, %0\n\t"
520 "j\tsyscall_exit"
521 :/* no outputs */
522 :"r" (&regs));
523 /* Unreached */
524
525badframe:
526 force_sig(SIGSEGV, current);
527}
528
529static inline int setup_sigcontext32(struct pt_regs *regs,
530 struct sigcontext32 *sc)
531{
532 int err = 0;
533
534 err |= __put_user(regs->cp0_epc, &sc->sc_pc);
535 err |= __put_user(regs->cp0_status, &sc->sc_status);
536
537#define save_gp_reg(i) { \
538 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \
539} while(0)
540 __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2);
541 save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6);
542 save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10);
543 save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14);
544 save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18);
545 save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22);
546 save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26);
547 save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30);
548 save_gp_reg(31);
549#undef save_gp_reg
550
551 err |= __put_user(regs->hi, &sc->sc_mdhi);
552 err |= __put_user(regs->lo, &sc->sc_mdlo);
553 err |= __put_user(regs->cp0_cause, &sc->sc_cause);
554 err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr);
555
556 err |= __put_user(!!used_math(), &sc->sc_used_math);
557
558 if (!used_math())
559 goto out;
560
561 /*
562 * Save FPU state to signal context. Signal handler will "inherit"
563 * current FPU state.
564 */
565 preempt_disable();
566
567 if (!is_fpu_owner()) {
568 own_fpu();
569 restore_fp(current);
570 }
571 err |= save_fp_context32(sc);
572
573 preempt_enable();
574
575out:
576 return err;
577}
578
579/*
580 * Determine which stack to use..
581 */
582static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
583 size_t frame_size)
584{
585 unsigned long sp;
586
587 /* Default to using normal stack */
588 sp = regs->regs[29];
589
590 /*
591 * FPU emulator may have it's own trampoline active just
592 * above the user stack, 16-bytes before the next lowest
593 * 16 byte boundary. Try to avoid trashing it.
594 */
595 sp -= 32;
596
597 /* This is the X/Open sanctioned signal stack switching. */
598 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
599 sp = current->sas_ss_sp + current->sas_ss_size;
600
601 return (void *)((sp - frame_size) & ALMASK);
602}
603
604static inline void setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
605 int signr, sigset_t *set)
606{
607 struct sigframe *frame;
608 int err = 0;
609
610 frame = get_sigframe(ka, regs, sizeof(*frame));
611 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
612 goto give_sigsegv;
613
614 /*
615 * Set up the return code ...
616 *
617 * li v0, __NR_O32_sigreturn
618 * syscall
619 */
620 err |= __put_user(0x24020000 + __NR_O32_sigreturn, frame->sf_code + 0);
621 err |= __put_user(0x0000000c , frame->sf_code + 1);
622 flush_cache_sigtramp((unsigned long) frame->sf_code);
623
624 err |= setup_sigcontext32(regs, &frame->sf_sc);
625 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
626 if (err)
627 goto give_sigsegv;
628
629 /*
630 * Arguments to signal handler:
631 *
632 * a0 = signal number
633 * a1 = 0 (should be cause)
634 * a2 = pointer to struct sigcontext
635 *
636 * $25 and c0_epc point to the signal handler, $29 points to the
637 * struct sigframe.
638 */
639 regs->regs[ 4] = signr;
640 regs->regs[ 5] = 0;
641 regs->regs[ 6] = (unsigned long) &frame->sf_sc;
642 regs->regs[29] = (unsigned long) frame;
643 regs->regs[31] = (unsigned long) frame->sf_code;
644 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
645
646#if DEBUG_SIG
647 printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
648 current->comm, current->pid,
649 frame, regs->cp0_epc, frame->sf_code);
650#endif
651 return;
652
653give_sigsegv:
654 force_sigsegv(signr, current);
655}
656
657static inline void setup_rt_frame(struct k_sigaction * ka,
658 struct pt_regs *regs, int signr,
659 sigset_t *set, siginfo_t *info)
660{
661 struct rt_sigframe32 *frame;
662 int err = 0;
663 s32 sp;
664
665 frame = get_sigframe(ka, regs, sizeof(*frame));
666 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
667 goto give_sigsegv;
668
669 /* Set up to return from userspace. If provided, use a stub already
670 in userspace. */
671 /*
672 * Set up the return code ...
673 *
674 * li v0, __NR_O32_rt_sigreturn
675 * syscall
676 */
677 err |= __put_user(0x24020000 + __NR_O32_rt_sigreturn, frame->rs_code + 0);
678 err |= __put_user(0x0000000c , frame->rs_code + 1);
679 flush_cache_sigtramp((unsigned long) frame->rs_code);
680
681 /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
682 err |= copy_siginfo_to_user32(&frame->rs_info, info);
683
684 /* Create the ucontext. */
685 err |= __put_user(0, &frame->rs_uc.uc_flags);
686 err |= __put_user(0, &frame->rs_uc.uc_link);
687 sp = (int) (long) current->sas_ss_sp;
688 err |= __put_user(sp,
689 &frame->rs_uc.uc_stack.ss_sp);
690 err |= __put_user(sas_ss_flags(regs->regs[29]),
691 &frame->rs_uc.uc_stack.ss_flags);
692 err |= __put_user(current->sas_ss_size,
693 &frame->rs_uc.uc_stack.ss_size);
694 err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
695 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
696
697 if (err)
698 goto give_sigsegv;
699
700 /*
701 * Arguments to signal handler:
702 *
703 * a0 = signal number
704 * a1 = 0 (should be cause)
705 * a2 = pointer to ucontext
706 *
707 * $25 and c0_epc point to the signal handler, $29 points to
708 * the struct rt_sigframe32.
709 */
710 regs->regs[ 4] = signr;
711 regs->regs[ 5] = (unsigned long) &frame->rs_info;
712 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
713 regs->regs[29] = (unsigned long) frame;
714 regs->regs[31] = (unsigned long) frame->rs_code;
715 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
716
717#if DEBUG_SIG
718 printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
719 current->comm, current->pid,
720 frame, regs->cp0_epc, frame->rs_code);
721#endif
722 return;
723
724give_sigsegv:
725 force_sigsegv(signr, current);
726}
727
728static inline void handle_signal(unsigned long sig, siginfo_t *info,
729 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs)
730{
731 switch (regs->regs[0]) {
732 case ERESTART_RESTARTBLOCK:
733 case ERESTARTNOHAND:
734 regs->regs[2] = EINTR;
735 break;
736 case ERESTARTSYS:
737 if(!(ka->sa.sa_flags & SA_RESTART)) {
738 regs->regs[2] = EINTR;
739 break;
740 }
741 /* fallthrough */
742 case ERESTARTNOINTR: /* Userland will reload $v0. */
743 regs->regs[7] = regs->regs[26];
744 regs->cp0_epc -= 8;
745 }
746
747 regs->regs[0] = 0; /* Don't deal with this again. */
748
749 if (ka->sa.sa_flags & SA_SIGINFO)
750 setup_rt_frame(ka, regs, sig, oldset, info);
751 else
752 setup_frame(ka, regs, sig, oldset);
753
754 if (!(ka->sa.sa_flags & SA_NODEFER)) {
755 spin_lock_irq(&current->sighand->siglock);
756 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
757 sigaddset(&current->blocked,sig);
758 recalc_sigpending();
759 spin_unlock_irq(&current->sighand->siglock);
760 }
761}
762
763int do_signal32(sigset_t *oldset, struct pt_regs *regs)
764{
765 struct k_sigaction ka;
766 siginfo_t info;
767 int signr;
768
769 /*
770 * We want the common case to go fast, which is why we may in certain
771 * cases get here from kernel mode. Just return without doing anything
772 * if so.
773 */
774 if (!user_mode(regs))
775 return 1;
776
777 if (try_to_freeze(0))
778 goto no_signal;
779
780 if (!oldset)
781 oldset = &current->blocked;
782
783 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
784 if (signr > 0) {
785 handle_signal(signr, &info, &ka, oldset, regs);
786 return 1;
787 }
788
789no_signal:
790 /*
791 * Who's code doesn't conform to the restartable syscall convention
792 * dies here!!! The li instruction, a single machine instruction,
793 * must directly be followed by the syscall instruction.
794 */
795 if (regs->regs[0]) {
796 if (regs->regs[2] == ERESTARTNOHAND ||
797 regs->regs[2] == ERESTARTSYS ||
798 regs->regs[2] == ERESTARTNOINTR) {
799 regs->regs[7] = regs->regs[26];
800 regs->cp0_epc -= 8;
801 }
802 if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
803 regs->regs[2] = __NR_O32_restart_syscall;
804 regs->regs[7] = regs->regs[26];
805 regs->cp0_epc -= 4;
806 }
807 }
808 return 0;
809}
810
811asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 *act,
812 struct sigaction32 *oact,
813 unsigned int sigsetsize)
814{
815 struct k_sigaction new_sa, old_sa;
816 int ret = -EINVAL;
817
818 /* XXX: Don't preclude handling different sized sigset_t's. */
819 if (sigsetsize != sizeof(sigset_t))
820 goto out;
821
822 if (act) {
823 int err = 0;
824
825 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
826 return -EFAULT;
827 err |= __get_user((u32)(u64)new_sa.sa.sa_handler,
828 &act->sa_handler);
829 err |= __get_user(new_sa.sa.sa_flags, &act->sa_flags);
830 err |= get_sigset(&new_sa.sa.sa_mask, &act->sa_mask);
831 if (err)
832 return -EFAULT;
833 }
834
835 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
836
837 if (!ret && oact) {
838 int err = 0;
839
840 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
841 return -EFAULT;
842
843 err |= __put_user((u32)(u64)old_sa.sa.sa_handler,
844 &oact->sa_handler);
845 err |= __put_user(old_sa.sa.sa_flags, &oact->sa_flags);
846 err |= put_sigset(&old_sa.sa.sa_mask, &oact->sa_mask);
847 if (err)
848 return -EFAULT;
849 }
850out:
851 return ret;
852}
853
854asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t *set,
855 compat_sigset_t *oset, unsigned int sigsetsize)
856{
857 sigset_t old_set, new_set;
858 int ret;
859 mm_segment_t old_fs = get_fs();
860
861 if (set && get_sigset(&new_set, set))
862 return -EFAULT;
863
864 set_fs (KERNEL_DS);
865 ret = sys_rt_sigprocmask(how, set ? &new_set : NULL,
866 oset ? &old_set : NULL, sigsetsize);
867 set_fs (old_fs);
868
869 if (!ret && oset && put_sigset(&old_set, oset))
870 return -EFAULT;
871
872 return ret;
873}
874
875asmlinkage int sys32_rt_sigpending(compat_sigset_t *uset,
876 unsigned int sigsetsize)
877{
878 int ret;
879 sigset_t set;
880 mm_segment_t old_fs = get_fs();
881
882 set_fs (KERNEL_DS);
883 ret = sys_rt_sigpending(&set, sigsetsize);
884 set_fs (old_fs);
885
886 if (!ret && put_sigset(&set, uset))
887 return -EFAULT;
888
889 return ret;
890}
891
892asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t *uinfo)
893{
894 siginfo_t info;
895 int ret;
896 mm_segment_t old_fs = get_fs();
897
898 if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
899 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
900 return -EFAULT;
901 set_fs (KERNEL_DS);
902 ret = sys_rt_sigqueueinfo(pid, sig, &info);
903 set_fs (old_fs);
904 return ret;
905}
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
new file mode 100644
index 000000000000..3544208d4b4b
--- /dev/null
+++ b/arch/mips/kernel/signal_n32.c
@@ -0,0 +1,197 @@
1/*
2 * Copyright (C) 2003 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/kernel.h>
23#include <linux/signal.h>
24#include <linux/errno.h>
25#include <linux/wait.h>
26#include <linux/ptrace.h>
27#include <linux/unistd.h>
28#include <linux/compat.h>
29#include <linux/bitops.h>
30
31#include <asm/asm.h>
32#include <asm/cacheflush.h>
33#include <asm/sim.h>
34#include <asm/uaccess.h>
35#include <asm/ucontext.h>
36#include <asm/system.h>
37#include <asm/fpu.h>
38#include <asm/cpu-features.h>
39
40#include "signal-common.h"
41
42/*
43 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
44 */
45#define __NR_N32_rt_sigreturn 6211
46#define __NR_N32_restart_syscall 6214
47
48#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
49
50/* IRIX compatible stack_t */
51typedef struct sigaltstack32 {
52 s32 ss_sp;
53 compat_size_t ss_size;
54 int ss_flags;
55} stack32_t;
56
57struct ucontextn32 {
58 u32 uc_flags;
59 s32 uc_link;
60 stack32_t uc_stack;
61 struct sigcontext uc_mcontext;
62 sigset_t uc_sigmask; /* mask last for extensibility */
63};
64
65#if PLAT_TRAMPOLINE_STUFF_LINE
66#define __tramp __attribute__((aligned(PLAT_TRAMPOLINE_STUFF_LINE)))
67#else
68#define __tramp
69#endif
70
71struct rt_sigframe_n32 {
72 u32 rs_ass[4]; /* argument save space for o32 */
73 u32 rs_code[2] __tramp; /* signal trampoline */
74 struct siginfo rs_info __tramp;
75 struct ucontextn32 rs_uc;
76};
77
78save_static_function(sysn32_rt_sigreturn);
79__attribute_used__ noinline static void
80_sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
81{
82 struct rt_sigframe_n32 *frame;
83 sigset_t set;
84 stack_t st;
85 s32 sp;
86
87 frame = (struct rt_sigframe_n32 *) regs.regs[29];
88 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
89 goto badframe;
90 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
91 goto badframe;
92
93 sigdelsetmask(&set, ~_BLOCKABLE);
94 spin_lock_irq(&current->sighand->siglock);
95 current->blocked = set;
96 recalc_sigpending();
97 spin_unlock_irq(&current->sighand->siglock);
98
99 if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext))
100 goto badframe;
101
102 /* The ucontext contains a stack32_t, so we must convert! */
103 if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
104 goto badframe;
105 st.ss_size = (long) sp;
106 if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size))
107 goto badframe;
108 if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags))
109 goto badframe;
110
111 /* It is more difficult to avoid calling this function than to
112 call it and ignore errors. */
113 do_sigaltstack(&st, NULL, regs.regs[29]);
114
115 /*
116 * Don't let your children do this ...
117 */
118 __asm__ __volatile__(
119 "move\t$29, %0\n\t"
120 "j\tsyscall_exit"
121 :/* no outputs */
122 :"r" (&regs));
123 /* Unreached */
124
125badframe:
126 force_sig(SIGSEGV, current);
127}
128
129void setup_rt_frame_n32(struct k_sigaction * ka,
130 struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info)
131{
132 struct rt_sigframe_n32 *frame;
133 int err = 0;
134 s32 sp;
135
136 frame = get_sigframe(ka, regs, sizeof(*frame));
137 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
138 goto give_sigsegv;
139
140 /*
141 * Set up the return code ...
142 *
143 * li v0, __NR_rt_sigreturn
144 * syscall
145 */
146 if (PLAT_TRAMPOLINE_STUFF_LINE)
147 __clear_user(frame->rs_code, PLAT_TRAMPOLINE_STUFF_LINE);
148 err |= __put_user(0x24020000 + __NR_N32_rt_sigreturn, frame->rs_code + 0);
149 err |= __put_user(0x0000000c , frame->rs_code + 1);
150 flush_cache_sigtramp((unsigned long) frame->rs_code);
151
152 /* Create siginfo. */
153 err |= copy_siginfo_to_user(&frame->rs_info, info);
154
155 /* Create the ucontext. */
156 err |= __put_user(0, &frame->rs_uc.uc_flags);
157 err |= __put_user(0, &frame->rs_uc.uc_link);
158 sp = (int) (long) current->sas_ss_sp;
159 err |= __put_user(sp,
160 &frame->rs_uc.uc_stack.ss_sp);
161 err |= __put_user(sas_ss_flags(regs->regs[29]),
162 &frame->rs_uc.uc_stack.ss_flags);
163 err |= __put_user(current->sas_ss_size,
164 &frame->rs_uc.uc_stack.ss_size);
165 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
166 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
167
168 if (err)
169 goto give_sigsegv;
170
171 /*
172 * Arguments to signal handler:
173 *
174 * a0 = signal number
175 * a1 = 0 (should be cause)
176 * a2 = pointer to ucontext
177 *
178 * $25 and c0_epc point to the signal handler, $29 points to
179 * the struct rt_sigframe.
180 */
181 regs->regs[ 4] = signr;
182 regs->regs[ 5] = (unsigned long) &frame->rs_info;
183 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
184 regs->regs[29] = (unsigned long) frame;
185 regs->regs[31] = (unsigned long) frame->rs_code;
186 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
187
188#if DEBUG_SIG
189 printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
190 current->comm, current->pid,
191 frame, regs->cp0_epc, regs->regs[31]);
192#endif
193 return;
194
195give_sigsegv:
196 force_sigsegv(signr, current);
197}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
new file mode 100644
index 000000000000..af5cd3b8a396
--- /dev/null
+++ b/arch/mips/kernel/smp.c
@@ -0,0 +1,425 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21#include <linux/cache.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/threads.h>
27#include <linux/module.h>
28#include <linux/time.h>
29#include <linux/timex.h>
30#include <linux/sched.h>
31#include <linux/cpumask.h>
32
33#include <asm/atomic.h>
34#include <asm/cpu.h>
35#include <asm/processor.h>
36#include <asm/system.h>
37#include <asm/mmu_context.h>
38#include <asm/smp.h>
39
40cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
41volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
42cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
43int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
44int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
45
46EXPORT_SYMBOL(phys_cpu_present_map);
47EXPORT_SYMBOL(cpu_online_map);
48
49static void smp_tune_scheduling (void)
50{
51 struct cache_desc *cd = &current_cpu_data.scache;
52 unsigned long cachesize; /* kB */
53 unsigned long bandwidth = 350; /* MB/s */
54 unsigned long cpu_khz;
55
56 /*
57 * Crude estimate until we actually meassure ...
58 */
59 cpu_khz = loops_per_jiffy * 2 * HZ / 1000;
60
61 /*
62 * Rough estimation for SMP scheduling, this is the number of
63 * cycles it takes for a fully memory-limited process to flush
64 * the SMP-local cache.
65 *
66 * (For a P5 this pretty much means we will choose another idle
67 * CPU almost always at wakeup time (this is due to the small
68 * L1 cache), on PIIs it's around 50-100 usecs, depending on
69 * the cache size)
70 */
71 if (!cpu_khz)
72 return;
73
74 cachesize = cd->linesz * cd->sets * cd->ways;
75}
76
77extern void __init calibrate_delay(void);
78extern ATTRIB_NORET void cpu_idle(void);
79
80/*
81 * First C code run on the secondary CPUs after being started up by
82 * the master.
83 */
84asmlinkage void start_secondary(void)
85{
86 unsigned int cpu = smp_processor_id();
87
88 cpu_probe();
89 cpu_report();
90 per_cpu_trap_init();
91 prom_init_secondary();
92
93 /*
94 * XXX parity protection should be folded in here when it's converted
95 * to an option instead of something based on .cputype
96 */
97
98 calibrate_delay();
99 cpu_data[cpu].udelay_val = loops_per_jiffy;
100
101 prom_smp_finish();
102
103 cpu_set(cpu, cpu_callin_map);
104
105 cpu_idle();
106}
107
108DEFINE_SPINLOCK(smp_call_lock);
109
110struct call_data_struct *call_data;
111
112/*
113 * Run a function on all other CPUs.
114 * <func> The function to run. This must be fast and non-blocking.
115 * <info> An arbitrary pointer to pass to the function.
116 * <retry> If true, keep retrying until ready.
117 * <wait> If true, wait until function has completed on other CPUs.
118 * [RETURNS] 0 on success, else a negative status code.
119 *
120 * Does not return until remote CPUs are nearly ready to execute <func>
121 * or are or have executed.
122 *
123 * You must not call this function with disabled interrupts or from a
124 * hardware interrupt handler or from a bottom half handler.
125 */
126int smp_call_function (void (*func) (void *info), void *info, int retry,
127 int wait)
128{
129 struct call_data_struct data;
130 int i, cpus = num_online_cpus() - 1;
131 int cpu = smp_processor_id();
132
133 if (!cpus)
134 return 0;
135
136 /* Can deadlock when called with interrupts disabled */
137 WARN_ON(irqs_disabled());
138
139 data.func = func;
140 data.info = info;
141 atomic_set(&data.started, 0);
142 data.wait = wait;
143 if (wait)
144 atomic_set(&data.finished, 0);
145
146 spin_lock(&smp_call_lock);
147 call_data = &data;
148 mb();
149
150 /* Send a message to all other CPUs and wait for them to respond */
151 for (i = 0; i < NR_CPUS; i++)
152 if (cpu_online(i) && i != cpu)
153 core_send_ipi(i, SMP_CALL_FUNCTION);
154
155 /* Wait for response */
156 /* FIXME: lock-up detection, backtrace on lock-up */
157 while (atomic_read(&data.started) != cpus)
158 barrier();
159
160 if (wait)
161 while (atomic_read(&data.finished) != cpus)
162 barrier();
163 spin_unlock(&smp_call_lock);
164
165 return 0;
166}
167
168void smp_call_function_interrupt(void)
169{
170 void (*func) (void *info) = call_data->func;
171 void *info = call_data->info;
172 int wait = call_data->wait;
173
174 /*
175 * Notify initiating CPU that I've grabbed the data and am
176 * about to execute the function.
177 */
178 mb();
179 atomic_inc(&call_data->started);
180
181 /*
182 * At this point the info structure may be out of scope unless wait==1.
183 */
184 irq_enter();
185 (*func)(info);
186 irq_exit();
187
188 if (wait) {
189 mb();
190 atomic_inc(&call_data->finished);
191 }
192}
193
194static void stop_this_cpu(void *dummy)
195{
196 /*
197 * Remove this CPU:
198 */
199 cpu_clear(smp_processor_id(), cpu_online_map);
200 local_irq_enable(); /* May need to service _machine_restart IPI */
201 for (;;); /* Wait if available. */
202}
203
204void smp_send_stop(void)
205{
206 smp_call_function(stop_this_cpu, NULL, 1, 0);
207}
208
209void __init smp_cpus_done(unsigned int max_cpus)
210{
211 prom_cpus_done();
212}
213
214/* called from main before smp_init() */
215void __init smp_prepare_cpus(unsigned int max_cpus)
216{
217 cpu_data[0].udelay_val = loops_per_jiffy;
218 init_new_context(current, &init_mm);
219 current_thread_info()->cpu = 0;
220 smp_tune_scheduling();
221 prom_prepare_cpus(max_cpus);
222}
223
224/* preload SMP state for boot cpu */
225void __devinit smp_prepare_boot_cpu(void)
226{
227 /*
228 * This assumes that bootup is always handled by the processor
229 * with the logic and physical number 0.
230 */
231 __cpu_number_map[0] = 0;
232 __cpu_logical_map[0] = 0;
233 cpu_set(0, phys_cpu_present_map);
234 cpu_set(0, cpu_online_map);
235 cpu_set(0, cpu_callin_map);
236}
237
238/*
239 * Startup the CPU with this logical number
240 */
241static int __init do_boot_cpu(int cpu)
242{
243 struct task_struct *idle;
244
245 /*
246 * The following code is purely to make sure
247 * Linux can schedule processes on this slave.
248 */
249 idle = fork_idle(cpu);
250 if (IS_ERR(idle))
251 panic("failed fork for CPU %d\n", cpu);
252
253 prom_boot_secondary(cpu, idle);
254
255 /* XXXKW timeout */
256 while (!cpu_isset(cpu, cpu_callin_map))
257 udelay(100);
258
259 cpu_set(cpu, cpu_online_map);
260
261 return 0;
262}
263
264/*
265 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
266 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
267 * physical, not logical.
268 */
269int __devinit __cpu_up(unsigned int cpu)
270{
271 int ret;
272
273 /* Processor goes to start_secondary(), sets online flag */
274 ret = do_boot_cpu(cpu);
275 if (ret < 0)
276 return ret;
277
278 return 0;
279}
280
281/* Not really SMP stuff ... */
282int setup_profiling_timer(unsigned int multiplier)
283{
284 return 0;
285}
286
287static void flush_tlb_all_ipi(void *info)
288{
289 local_flush_tlb_all();
290}
291
292void flush_tlb_all(void)
293{
294 on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
295}
296
297static void flush_tlb_mm_ipi(void *mm)
298{
299 local_flush_tlb_mm((struct mm_struct *)mm);
300}
301
302/*
303 * The following tlb flush calls are invoked when old translations are
304 * being torn down, or pte attributes are changing. For single threaded
305 * address spaces, a new context is obtained on the current cpu, and tlb
306 * context on other cpus are invalidated to force a new context allocation
307 * at switch_mm time, should the mm ever be used on other cpus. For
308 * multithreaded address spaces, intercpu interrupts have to be sent.
309 * Another case where intercpu interrupts are required is when the target
310 * mm might be active on another cpu (eg debuggers doing the flushes on
311 * behalf of debugees, kswapd stealing pages from another process etc).
312 * Kanoj 07/00.
313 */
314
315void flush_tlb_mm(struct mm_struct *mm)
316{
317 preempt_disable();
318
319 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
320 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
321 } else {
322 int i;
323 for (i = 0; i < num_online_cpus(); i++)
324 if (smp_processor_id() != i)
325 cpu_context(i, mm) = 0;
326 }
327 local_flush_tlb_mm(mm);
328
329 preempt_enable();
330}
331
332struct flush_tlb_data {
333 struct vm_area_struct *vma;
334 unsigned long addr1;
335 unsigned long addr2;
336};
337
338static void flush_tlb_range_ipi(void *info)
339{
340 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
341
342 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
343}
344
345void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
346{
347 struct mm_struct *mm = vma->vm_mm;
348
349 preempt_disable();
350 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
351 struct flush_tlb_data fd;
352
353 fd.vma = vma;
354 fd.addr1 = start;
355 fd.addr2 = end;
356 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
357 } else {
358 int i;
359 for (i = 0; i < num_online_cpus(); i++)
360 if (smp_processor_id() != i)
361 cpu_context(i, mm) = 0;
362 }
363 local_flush_tlb_range(vma, start, end);
364 preempt_enable();
365}
366
367static void flush_tlb_kernel_range_ipi(void *info)
368{
369 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
370
371 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
372}
373
374void flush_tlb_kernel_range(unsigned long start, unsigned long end)
375{
376 struct flush_tlb_data fd;
377
378 fd.addr1 = start;
379 fd.addr2 = end;
380 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
381}
382
383static void flush_tlb_page_ipi(void *info)
384{
385 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
386
387 local_flush_tlb_page(fd->vma, fd->addr1);
388}
389
390void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
391{
392 preempt_disable();
393 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
394 struct flush_tlb_data fd;
395
396 fd.vma = vma;
397 fd.addr1 = page;
398 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
399 } else {
400 int i;
401 for (i = 0; i < num_online_cpus(); i++)
402 if (smp_processor_id() != i)
403 cpu_context(i, vma->vm_mm) = 0;
404 }
405 local_flush_tlb_page(vma, page);
406 preempt_enable();
407}
408
409static void flush_tlb_one_ipi(void *info)
410{
411 unsigned long vaddr = (unsigned long) info;
412
413 local_flush_tlb_one(vaddr);
414}
415
416void flush_tlb_one(unsigned long vaddr)
417{
418 smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1);
419 local_flush_tlb_one(vaddr);
420}
421
422EXPORT_SYMBOL(flush_tlb_page);
423EXPORT_SYMBOL(flush_tlb_one);
424EXPORT_SYMBOL(cpu_data);
425EXPORT_SYMBOL(synchronize_irq);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
new file mode 100644
index 000000000000..598bfe7426a2
--- /dev/null
+++ b/arch/mips/kernel/syscall.c
@@ -0,0 +1,407 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 */
10#include <linux/a.out.h>
11#include <linux/errno.h>
12#include <linux/linkage.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/mman.h>
17#include <linux/ptrace.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/syscalls.h>
21#include <linux/file.h>
22#include <linux/slab.h>
23#include <linux/utsname.h>
24#include <linux/unistd.h>
25#include <linux/sem.h>
26#include <linux/msg.h>
27#include <linux/shm.h>
28#include <linux/compiler.h>
29
30#include <asm/branch.h>
31#include <asm/cachectl.h>
32#include <asm/cacheflush.h>
33#include <asm/ipc.h>
34#include <asm/offset.h>
35#include <asm/signal.h>
36#include <asm/sim.h>
37#include <asm/shmparam.h>
38#include <asm/sysmips.h>
39#include <asm/uaccess.h>
40
41asmlinkage int sys_pipe(nabi_no_regargs volatile struct pt_regs regs)
42{
43 int fd[2];
44 int error, res;
45
46 error = do_pipe(fd);
47 if (error) {
48 res = error;
49 goto out;
50 }
51 regs.regs[3] = fd[1];
52 res = fd[0];
53out:
54 return res;
55}
56
57unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
58
59#define COLOUR_ALIGN(addr,pgoff) \
60 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
61 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
62
63unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
64 unsigned long len, unsigned long pgoff, unsigned long flags)
65{
66 struct vm_area_struct * vmm;
67 int do_color_align;
68 unsigned long task_size;
69
70 task_size = STACK_TOP;
71
72 if (flags & MAP_FIXED) {
73 /*
74 * We do not accept a shared mapping if it would violate
75 * cache aliasing constraints.
76 */
77 if ((flags & MAP_SHARED) && (addr & shm_align_mask))
78 return -EINVAL;
79 return addr;
80 }
81
82 if (len > task_size)
83 return -ENOMEM;
84 do_color_align = 0;
85 if (filp || (flags & MAP_SHARED))
86 do_color_align = 1;
87 if (addr) {
88 if (do_color_align)
89 addr = COLOUR_ALIGN(addr, pgoff);
90 else
91 addr = PAGE_ALIGN(addr);
92 vmm = find_vma(current->mm, addr);
93 if (task_size - len >= addr &&
94 (!vmm || addr + len <= vmm->vm_start))
95 return addr;
96 }
97 addr = TASK_UNMAPPED_BASE;
98 if (do_color_align)
99 addr = COLOUR_ALIGN(addr, pgoff);
100 else
101 addr = PAGE_ALIGN(addr);
102
103 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
104 /* At this point: (!vmm || addr < vmm->vm_end). */
105 if (task_size - len < addr)
106 return -ENOMEM;
107 if (!vmm || addr + len <= vmm->vm_start)
108 return addr;
109 addr = vmm->vm_end;
110 if (do_color_align)
111 addr = COLOUR_ALIGN(addr, pgoff);
112 }
113}
114
115/* common code for old and new mmaps */
116static inline unsigned long
117do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
118 unsigned long flags, unsigned long fd, unsigned long pgoff)
119{
120 unsigned long error = -EBADF;
121 struct file * file = NULL;
122
123 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
124 if (!(flags & MAP_ANONYMOUS)) {
125 file = fget(fd);
126 if (!file)
127 goto out;
128 }
129
130 down_write(&current->mm->mmap_sem);
131 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
132 up_write(&current->mm->mmap_sem);
133
134 if (file)
135 fput(file);
136out:
137 return error;
138}
139
140asmlinkage unsigned long
141old_mmap(unsigned long addr, unsigned long len, int prot,
142 int flags, int fd, off_t offset)
143{
144 unsigned long result;
145
146 result = -EINVAL;
147 if (offset & ~PAGE_MASK)
148 goto out;
149
150 result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
151
152out:
153 return result;
154}
155
156asmlinkage unsigned long
157sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
158 unsigned long flags, unsigned long fd, unsigned long pgoff)
159{
160 return do_mmap2(addr, len, prot, flags, fd, pgoff);
161}
162
163save_static_function(sys_fork);
164__attribute_used__ noinline static int
165_sys_fork(nabi_no_regargs struct pt_regs regs)
166{
167 return do_fork(SIGCHLD, regs.regs[29], &regs, 0, NULL, NULL);
168}
169
170save_static_function(sys_clone);
171__attribute_used__ noinline static int
172_sys_clone(nabi_no_regargs struct pt_regs regs)
173{
174 unsigned long clone_flags;
175 unsigned long newsp;
176 int *parent_tidptr, *child_tidptr;
177
178 clone_flags = regs.regs[4];
179 newsp = regs.regs[5];
180 if (!newsp)
181 newsp = regs.regs[29];
182 parent_tidptr = (int *) regs.regs[6];
183 child_tidptr = (int *) regs.regs[7];
184 return do_fork(clone_flags, newsp, &regs, 0,
185 parent_tidptr, child_tidptr);
186}
187
188/*
189 * sys_execve() executes a new program.
190 */
191asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs)
192{
193 int error;
194 char * filename;
195
196 filename = getname((char *) (long)regs.regs[4]);
197 error = PTR_ERR(filename);
198 if (IS_ERR(filename))
199 goto out;
200 error = do_execve(filename, (char **) (long)regs.regs[5],
201 (char **) (long)regs.regs[6], &regs);
202 putname(filename);
203
204out:
205 return error;
206}
207
208/*
209 * Compacrapability ...
210 */
211asmlinkage int sys_uname(struct old_utsname * name)
212{
213 if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
214 return 0;
215 return -EFAULT;
216}
217
218/*
219 * Compacrapability ...
220 */
221asmlinkage int sys_olduname(struct oldold_utsname * name)
222{
223 int error;
224
225 if (!name)
226 return -EFAULT;
227 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
228 return -EFAULT;
229
230 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
231 error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
232 error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
233 error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
234 error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
235 error -= __put_user(0,name->release+__OLD_UTS_LEN);
236 error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
237 error -= __put_user(0,name->version+__OLD_UTS_LEN);
238 error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
239 error = __put_user(0,name->machine+__OLD_UTS_LEN);
240 error = error ? -EFAULT : 0;
241
242 return error;
243}
244
245asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
246{
247 int tmp, len;
248 char *name;
249
250 switch(cmd) {
251 case SETNAME: {
252 char nodename[__NEW_UTS_LEN + 1];
253
254 if (!capable(CAP_SYS_ADMIN))
255 return -EPERM;
256
257 name = (char *) arg1;
258
259 len = strncpy_from_user(nodename, name, __NEW_UTS_LEN);
260 if (len < 0)
261 return -EFAULT;
262
263 down_write(&uts_sem);
264 strncpy(system_utsname.nodename, nodename, len);
265 nodename[__NEW_UTS_LEN] = '\0';
266 strlcpy(system_utsname.nodename, nodename,
267 sizeof(system_utsname.nodename));
268 up_write(&uts_sem);
269 return 0;
270 }
271
272 case MIPS_ATOMIC_SET:
273 printk(KERN_CRIT "How did I get here?\n");
274 return -EINVAL;
275
276 case MIPS_FIXADE:
277 tmp = current->thread.mflags & ~3;
278 current->thread.mflags = tmp | (arg1 & 3);
279 return 0;
280
281 case FLUSH_CACHE:
282 __flush_cache_all();
283 return 0;
284
285 case MIPS_RDNVRAM:
286 return -EIO;
287 }
288
289 return -EINVAL;
290}
291
292/*
293 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
294 *
295 * This is really horribly ugly.
296 */
297asmlinkage int sys_ipc (uint call, int first, int second,
298 unsigned long third, void *ptr, long fifth)
299{
300 int version, ret;
301
302 version = call >> 16; /* hack for backward compatibility */
303 call &= 0xffff;
304
305 switch (call) {
306 case SEMOP:
307 return sys_semtimedop (first, (struct sembuf *)ptr, second,
308 NULL);
309 case SEMTIMEDOP:
310 return sys_semtimedop (first, (struct sembuf *)ptr, second,
311 (const struct timespec __user *)fifth);
312 case SEMGET:
313 return sys_semget (first, second, third);
314 case SEMCTL: {
315 union semun fourth;
316 if (!ptr)
317 return -EINVAL;
318 if (get_user(fourth.__pad, (void **) ptr))
319 return -EFAULT;
320 return sys_semctl (first, second, third, fourth);
321 }
322
323 case MSGSND:
324 return sys_msgsnd (first, (struct msgbuf *) ptr,
325 second, third);
326 case MSGRCV:
327 switch (version) {
328 case 0: {
329 struct ipc_kludge tmp;
330 if (!ptr)
331 return -EINVAL;
332
333 if (copy_from_user(&tmp,
334 (struct ipc_kludge *) ptr,
335 sizeof (tmp)))
336 return -EFAULT;
337 return sys_msgrcv (first, tmp.msgp, second,
338 tmp.msgtyp, third);
339 }
340 default:
341 return sys_msgrcv (first,
342 (struct msgbuf *) ptr,
343 second, fifth, third);
344 }
345 case MSGGET:
346 return sys_msgget ((key_t) first, second);
347 case MSGCTL:
348 return sys_msgctl (first, second, (struct msqid_ds *) ptr);
349
350 case SHMAT:
351 switch (version) {
352 default: {
353 ulong raddr;
354 ret = do_shmat (first, (char *) ptr, second, &raddr);
355 if (ret)
356 return ret;
357 return put_user (raddr, (ulong *) third);
358 }
359 case 1: /* iBCS2 emulator entry point */
360 if (!segment_eq(get_fs(), get_ds()))
361 return -EINVAL;
362 return do_shmat (first, (char *) ptr, second, (ulong *) third);
363 }
364 case SHMDT:
365 return sys_shmdt ((char *)ptr);
366 case SHMGET:
367 return sys_shmget (first, second, third);
368 case SHMCTL:
369 return sys_shmctl (first, second,
370 (struct shmid_ds *) ptr);
371 default:
372 return -ENOSYS;
373 }
374}
375
376/*
377 * Native ABI that is O32 or N64 version
378 */
379asmlinkage long sys_shmat(int shmid, char __user *shmaddr,
380 int shmflg, unsigned long *addr)
381{
382 unsigned long raddr;
383 int err;
384
385 err = do_shmat(shmid, shmaddr, shmflg, &raddr);
386 if (err)
387 return err;
388
389 return put_user(raddr, addr);
390}
391
392/*
393 * No implemented yet ...
394 */
395asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
396{
397 return -ENOSYS;
398}
399
400/*
401 * If we ever come here the user sp is bad. Zap the process right away.
402 * Due to the bad stack signaling wouldn't work.
403 */
404asmlinkage void bad_stack(void)
405{
406 do_exit(SIGSEGV);
407}
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
new file mode 100644
index 000000000000..f3bf0e43b8bb
--- /dev/null
+++ b/arch/mips/kernel/sysirix.c
@@ -0,0 +1,2179 @@
1/*
2 * sysirix.c: IRIX system call emulation.
3 *
4 * Copyright (C) 1996 David S. Miller
5 * Copyright (C) 1997 Miguel de Icaza
6 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle
7 */
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/binfmts.h>
11#include <linux/highuid.h>
12#include <linux/pagemap.h>
13#include <linux/mm.h>
14#include <linux/mman.h>
15#include <linux/slab.h>
16#include <linux/swap.h>
17#include <linux/errno.h>
18#include <linux/time.h>
19#include <linux/timex.h>
20#include <linux/times.h>
21#include <linux/elf.h>
22#include <linux/msg.h>
23#include <linux/shm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/utsname.h>
27#include <linux/file.h>
28#include <linux/vfs.h>
29#include <linux/namei.h>
30#include <linux/socket.h>
31#include <linux/security.h>
32#include <linux/syscalls.h>
33
34#include <asm/ptrace.h>
35#include <asm/page.h>
36#include <asm/uaccess.h>
37#include <asm/inventory.h>
38
39/* 2,191 lines of complete and utter shit coming up... */
40
41extern int max_threads;
42
43/* The sysmp commands supported thus far. */
44#define MP_NPROCS 1 /* # processor in complex */
45#define MP_NAPROCS 2 /* # active processors in complex */
46#define MP_PGSIZE 14 /* Return system page size in v1. */
47
48asmlinkage int irix_sysmp(struct pt_regs *regs)
49{
50 unsigned long cmd;
51 int base = 0;
52 int error = 0;
53
54 if(regs->regs[2] == 1000)
55 base = 1;
56 cmd = regs->regs[base + 4];
57 switch(cmd) {
58 case MP_PGSIZE:
59 error = PAGE_SIZE;
60 break;
61 case MP_NPROCS:
62 case MP_NAPROCS:
63 error = num_online_cpus();
64 break;
65 default:
66 printk("SYSMP[%s:%d]: Unsupported opcode %d\n",
67 current->comm, current->pid, (int)cmd);
68 error = -EINVAL;
69 break;
70 }
71
72 return error;
73}
74
75/* The prctl commands. */
76#define PR_MAXPROCS 1 /* Tasks/user. */
77#define PR_ISBLOCKED 2 /* If blocked, return 1. */
78#define PR_SETSTACKSIZE 3 /* Set largest task stack size. */
79#define PR_GETSTACKSIZE 4 /* Get largest task stack size. */
80#define PR_MAXPPROCS 5 /* Num parallel tasks. */
81#define PR_UNBLKONEXEC 6 /* When task exec/exit's, unblock. */
82#define PR_SETEXITSIG 8 /* When task exit's, set signal. */
83#define PR_RESIDENT 9 /* Make task unswappable. */
84#define PR_ATTACHADDR 10 /* (Re-)Connect a vma to a task. */
85#define PR_DETACHADDR 11 /* Disconnect a vma from a task. */
86#define PR_TERMCHILD 12 /* When parent sleeps with fishes, kill child. */
87#define PR_GETSHMASK 13 /* Get the sproc() share mask. */
88#define PR_GETNSHARE 14 /* Number of share group members. */
89#define PR_COREPID 15 /* Add task pid to name when it core. */
90#define PR_ATTACHADDRPERM 16 /* (Re-)Connect vma, with specified prot. */
91#define PR_PTHREADEXIT 17 /* Kill a pthread without prejudice. */
92
93asmlinkage int irix_prctl(struct pt_regs *regs)
94{
95 unsigned long cmd;
96 int error = 0, base = 0;
97
98 if (regs->regs[2] == 1000)
99 base = 1;
100 cmd = regs->regs[base + 4];
101 switch (cmd) {
102 case PR_MAXPROCS:
103 printk("irix_prctl[%s:%d]: Wants PR_MAXPROCS\n",
104 current->comm, current->pid);
105 error = max_threads;
106 break;
107
108 case PR_ISBLOCKED: {
109 struct task_struct *task;
110
111 printk("irix_prctl[%s:%d]: Wants PR_ISBLOCKED\n",
112 current->comm, current->pid);
113 read_lock(&tasklist_lock);
114 task = find_task_by_pid(regs->regs[base + 5]);
115 error = -ESRCH;
116 if (error)
117 error = (task->run_list.next != NULL);
118 read_unlock(&tasklist_lock);
119 /* Can _your_ OS find this out that fast? */
120 break;
121 }
122
123 case PR_SETSTACKSIZE: {
124 long value = regs->regs[base + 5];
125
126 printk("irix_prctl[%s:%d]: Wants PR_SETSTACKSIZE<%08lx>\n",
127 current->comm, current->pid, (unsigned long) value);
128 if (value > RLIM_INFINITY)
129 value = RLIM_INFINITY;
130 if (capable(CAP_SYS_ADMIN)) {
131 task_lock(current->group_leader);
132 current->signal->rlim[RLIMIT_STACK].rlim_max =
133 current->signal->rlim[RLIMIT_STACK].rlim_cur = value;
134 task_unlock(current->group_leader);
135 error = value;
136 break;
137 }
138 task_lock(current->group_leader);
139 if (value > current->signal->rlim[RLIMIT_STACK].rlim_max) {
140 error = -EINVAL;
141 task_unlock(current->group_leader);
142 break;
143 }
144 current->signal->rlim[RLIMIT_STACK].rlim_cur = value;
145 task_unlock(current->group_leader);
146 error = value;
147 break;
148 }
149
150 case PR_GETSTACKSIZE:
151 printk("irix_prctl[%s:%d]: Wants PR_GETSTACKSIZE\n",
152 current->comm, current->pid);
153 error = current->signal->rlim[RLIMIT_STACK].rlim_cur;
154 break;
155
156 case PR_MAXPPROCS:
157 printk("irix_prctl[%s:%d]: Wants PR_MAXPROCS\n",
158 current->comm, current->pid);
159 error = 1;
160 break;
161
162 case PR_UNBLKONEXEC:
163 printk("irix_prctl[%s:%d]: Wants PR_UNBLKONEXEC\n",
164 current->comm, current->pid);
165 error = -EINVAL;
166 break;
167
168 case PR_SETEXITSIG:
169 printk("irix_prctl[%s:%d]: Wants PR_SETEXITSIG\n",
170 current->comm, current->pid);
171
172 /* We can probably play some game where we set the task
173 * exit_code to some non-zero value when this is requested,
174 * and check whether exit_code is already set in do_exit().
175 */
176 error = -EINVAL;
177 break;
178
179 case PR_RESIDENT:
180 printk("irix_prctl[%s:%d]: Wants PR_RESIDENT\n",
181 current->comm, current->pid);
182 error = 0; /* Compatibility indeed. */
183 break;
184
185 case PR_ATTACHADDR:
186 printk("irix_prctl[%s:%d]: Wants PR_ATTACHADDR\n",
187 current->comm, current->pid);
188 error = -EINVAL;
189 break;
190
191 case PR_DETACHADDR:
192 printk("irix_prctl[%s:%d]: Wants PR_DETACHADDR\n",
193 current->comm, current->pid);
194 error = -EINVAL;
195 break;
196
197 case PR_TERMCHILD:
198 printk("irix_prctl[%s:%d]: Wants PR_TERMCHILD\n",
199 current->comm, current->pid);
200 error = -EINVAL;
201 break;
202
203 case PR_GETSHMASK:
204 printk("irix_prctl[%s:%d]: Wants PR_GETSHMASK\n",
205 current->comm, current->pid);
206 error = -EINVAL; /* Until I have the sproc() stuff in. */
207 break;
208
209 case PR_GETNSHARE:
210 error = 0; /* Until I have the sproc() stuff in. */
211 break;
212
213 case PR_COREPID:
214 printk("irix_prctl[%s:%d]: Wants PR_COREPID\n",
215 current->comm, current->pid);
216 error = -EINVAL;
217 break;
218
219 case PR_ATTACHADDRPERM:
220 printk("irix_prctl[%s:%d]: Wants PR_ATTACHADDRPERM\n",
221 current->comm, current->pid);
222 error = -EINVAL;
223 break;
224
225 case PR_PTHREADEXIT:
226 printk("irix_prctl[%s:%d]: Wants PR_PTHREADEXIT\n",
227 current->comm, current->pid);
228 do_exit(regs->regs[base + 5]);
229
230 default:
231 printk("irix_prctl[%s:%d]: Non-existant opcode %d\n",
232 current->comm, current->pid, (int)cmd);
233 error = -EINVAL;
234 break;
235 }
236
237 return error;
238}
239
240#undef DEBUG_PROCGRPS
241
242extern unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt);
243extern int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
244extern char *prom_getenv(char *name);
245extern long prom_setenv(char *name, char *value);
246
247/* The syssgi commands supported thus far. */
248#define SGI_SYSID 1 /* Return unique per-machine identifier. */
249#define SGI_INVENT 5 /* Fetch inventory */
250# define SGI_INV_SIZEOF 1
251# define SGI_INV_READ 2
252#define SGI_RDNAME 6 /* Return string name of a process. */
253#define SGI_SETNVRAM 8 /* Set PROM variable. */
254#define SGI_GETNVRAM 9 /* Get PROM variable. */
255#define SGI_SETPGID 21 /* Set process group id. */
256#define SGI_SYSCONF 22 /* POSIX sysconf garbage. */
257#define SGI_PATHCONF 24 /* POSIX sysconf garbage. */
258#define SGI_SETGROUPS 40 /* POSIX sysconf garbage. */
259#define SGI_GETGROUPS 41 /* POSIX sysconf garbage. */
260#define SGI_RUSAGE 56 /* BSD style rusage(). */
261#define SGI_SSYNC 62 /* Synchronous fs sync. */
262#define SGI_GETSID 65 /* SysVr4 get session id. */
263#define SGI_ELFMAP 68 /* Map an elf image. */
264#define SGI_TOSSTSAVE 108 /* Toss saved vma's. */
265#define SGI_FP_BCOPY 129 /* Should FPU bcopy be used on this machine? */
266#define SGI_PHYSP 1011 /* Translate virtual into physical page. */
267
268asmlinkage int irix_syssgi(struct pt_regs *regs)
269{
270 unsigned long cmd;
271 int retval, base = 0;
272
273 if (regs->regs[2] == 1000)
274 base = 1;
275
276 cmd = regs->regs[base + 4];
277 switch(cmd) {
278 case SGI_SYSID: {
279 char *buf = (char *) regs->regs[base + 5];
280
281 /* XXX Use ethernet addr.... */
282 retval = clear_user(buf, 64);
283 break;
284 }
285#if 0
286 case SGI_RDNAME: {
287 int pid = (int) regs->regs[base + 5];
288 char *buf = (char *) regs->regs[base + 6];
289 struct task_struct *p;
290 char tcomm[sizeof(current->comm)];
291
292 if (!access_ok(VERIFY_WRITE, buf, sizeof(tcomm))) {
293 retval = -EFAULT;
294 break;
295 }
296 read_lock(&tasklist_lock);
297 p = find_task_by_pid(pid);
298 if (!p) {
299 read_unlock(&tasklist_lock);
300 retval = -ESRCH;
301 break;
302 }
303 get_task_comm(tcomm, p);
304 read_unlock(&tasklist_lock);
305
306 /* XXX Need to check sizes. */
307 copy_to_user(buf, tcomm, sizeof(tcomm));
308 retval = 0;
309 break;
310 }
311
312 case SGI_GETNVRAM: {
313 char *name = (char *) regs->regs[base+5];
314 char *buf = (char *) regs->regs[base+6];
315 char *value;
316 return -EINVAL; /* til I fix it */
317 if (!access_ok(VERIFY_WRITE, buf, 128)) {
318 retval = -EFAULT;
319 break;
320 }
321 value = prom_getenv(name); /* PROM lock? */
322 if (!value) {
323 retval = -EINVAL;
324 break;
325 }
326 /* Do I strlen() for the length? */
327 copy_to_user(buf, value, 128);
328 retval = 0;
329 break;
330 }
331
332 case SGI_SETNVRAM: {
333 char *name = (char *) regs->regs[base+5];
334 char *value = (char *) regs->regs[base+6];
335 return -EINVAL; /* til I fix it */
336 retval = prom_setenv(name, value);
337 /* XXX make sure retval conforms to syssgi(2) */
338 printk("[%s:%d] setnvram(\"%s\", \"%s\"): retval %d",
339 current->comm, current->pid, name, value, retval);
340/* if (retval == PROM_ENOENT)
341 retval = -ENOENT; */
342 break;
343 }
344#endif
345
346 case SGI_SETPGID: {
347#ifdef DEBUG_PROCGRPS
348 printk("[%s:%d] setpgid(%d, %d) ",
349 current->comm, current->pid,
350 (int) regs->regs[base + 5], (int)regs->regs[base + 6]);
351#endif
352 retval = sys_setpgid(regs->regs[base + 5], regs->regs[base + 6]);
353
354#ifdef DEBUG_PROCGRPS
355 printk("retval=%d\n", retval);
356#endif
357 }
358
359 case SGI_SYSCONF: {
360 switch(regs->regs[base + 5]) {
361 case 1:
362 retval = (MAX_ARG_PAGES >> 4); /* XXX estimate... */
363 goto out;
364 case 2:
365 retval = max_threads;
366 goto out;
367 case 3:
368 retval = HZ;
369 goto out;
370 case 4:
371 retval = NGROUPS_MAX;
372 goto out;
373 case 5:
374 retval = NR_OPEN;
375 goto out;
376 case 6:
377 retval = 1;
378 goto out;
379 case 7:
380 retval = 1;
381 goto out;
382 case 8:
383 retval = 199009;
384 goto out;
385 case 11:
386 retval = PAGE_SIZE;
387 goto out;
388 case 12:
389 retval = 4;
390 goto out;
391 case 25:
392 case 26:
393 case 27:
394 case 28:
395 case 29:
396 case 30:
397 retval = 0;
398 goto out;
399 case 31:
400 retval = 32;
401 goto out;
402 default:
403 retval = -EINVAL;
404 goto out;
405 };
406 }
407
408 case SGI_SETGROUPS:
409 retval = sys_setgroups((int) regs->regs[base + 5],
410 (gid_t *) regs->regs[base + 6]);
411 break;
412
413 case SGI_GETGROUPS:
414 retval = sys_getgroups((int) regs->regs[base + 5],
415 (gid_t *) regs->regs[base + 6]);
416 break;
417
418 case SGI_RUSAGE: {
419 struct rusage *ru = (struct rusage *) regs->regs[base + 6];
420
421 switch((int) regs->regs[base + 5]) {
422 case 0:
423 /* rusage self */
424 retval = getrusage(current, RUSAGE_SELF, ru);
425 goto out;
426
427 case -1:
428 /* rusage children */
429 retval = getrusage(current, RUSAGE_CHILDREN, ru);
430 goto out;
431
432 default:
433 retval = -EINVAL;
434 goto out;
435 };
436 }
437
438 case SGI_SSYNC:
439 sys_sync();
440 retval = 0;
441 break;
442
443 case SGI_GETSID:
444#ifdef DEBUG_PROCGRPS
445 printk("[%s:%d] getsid(%d) ", current->comm, current->pid,
446 (int) regs->regs[base + 5]);
447#endif
448 retval = sys_getsid(regs->regs[base + 5]);
449#ifdef DEBUG_PROCGRPS
450 printk("retval=%d\n", retval);
451#endif
452 break;
453
454 case SGI_ELFMAP:
455 retval = irix_mapelf((int) regs->regs[base + 5],
456 (struct elf_phdr *) regs->regs[base + 6],
457 (int) regs->regs[base + 7]);
458 break;
459
460 case SGI_TOSSTSAVE:
461 /* XXX We don't need to do anything? */
462 retval = 0;
463 break;
464
465 case SGI_FP_BCOPY:
466 retval = 0;
467 break;
468
469 case SGI_PHYSP: {
470 unsigned long addr = regs->regs[base + 5];
471 int *pageno = (int *) (regs->regs[base + 6]);
472 struct mm_struct *mm = current->mm;
473 pgd_t *pgdp;
474 pmd_t *pmdp;
475 pte_t *ptep;
476
477 if (!access_ok(VERIFY_WRITE, pageno, sizeof(int)))
478 return -EFAULT;
479
480 down_read(&mm->mmap_sem);
481 pgdp = pgd_offset(mm, addr);
482 pmdp = pmd_offset(pgdp, addr);
483 ptep = pte_offset(pmdp, addr);
484 retval = -EINVAL;
485 if (ptep) {
486 pte_t pte = *ptep;
487
488 if (pte_val(pte) & (_PAGE_VALID | _PAGE_PRESENT)) {
489 retval = put_user((pte_val(pte) & PAGE_MASK) >>
490 PAGE_SHIFT, pageno);
491 }
492 }
493 up_read(&mm->mmap_sem);
494 break;
495 }
496
497 case SGI_INVENT: {
498 int arg1 = (int) regs->regs [base + 5];
499 void *buffer = (void *) regs->regs [base + 6];
500 int count = (int) regs->regs [base + 7];
501
502 switch (arg1) {
503 case SGI_INV_SIZEOF:
504 retval = sizeof (inventory_t);
505 break;
506 case SGI_INV_READ:
507 retval = dump_inventory_to_user (buffer, count);
508 break;
509 default:
510 retval = -EINVAL;
511 }
512 break;
513 }
514
515 default:
516 printk("irix_syssgi: Unsupported command %d\n", (int)cmd);
517 retval = -EINVAL;
518 break;
519 };
520
521out:
522 return retval;
523}
524
525asmlinkage int irix_gtime(struct pt_regs *regs)
526{
527 return get_seconds();
528}
529
530/*
531 * IRIX is completely broken... it returns 0 on success, otherwise
532 * ENOMEM.
533 */
534asmlinkage int irix_brk(unsigned long brk)
535{
536 unsigned long rlim;
537 unsigned long newbrk, oldbrk;
538 struct mm_struct *mm = current->mm;
539 int ret;
540
541 down_write(&mm->mmap_sem);
542 if (brk < mm->end_code) {
543 ret = -ENOMEM;
544 goto out;
545 }
546
547 newbrk = PAGE_ALIGN(brk);
548 oldbrk = PAGE_ALIGN(mm->brk);
549 if (oldbrk == newbrk) {
550 mm->brk = brk;
551 ret = 0;
552 goto out;
553 }
554
555 /*
556 * Always allow shrinking brk
557 */
558 if (brk <= mm->brk) {
559 mm->brk = brk;
560 do_munmap(mm, newbrk, oldbrk-newbrk);
561 ret = 0;
562 goto out;
563 }
564 /*
565 * Check against rlimit and stack..
566 */
567 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
568 if (rlim >= RLIM_INFINITY)
569 rlim = ~0;
570 if (brk - mm->end_code > rlim) {
571 ret = -ENOMEM;
572 goto out;
573 }
574
575 /*
576 * Check against existing mmap mappings.
577 */
578 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) {
579 ret = -ENOMEM;
580 goto out;
581 }
582
583 /*
584 * Check if we have enough memory..
585 */
586 if (security_vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT)) {
587 ret = -ENOMEM;
588 goto out;
589 }
590
591 /*
592 * Ok, looks good - let it rip.
593 */
594 mm->brk = brk;
595 do_brk(oldbrk, newbrk-oldbrk);
596 ret = 0;
597
598out:
599 up_write(&mm->mmap_sem);
600 return ret;
601}
602
603asmlinkage int irix_getpid(struct pt_regs *regs)
604{
605 regs->regs[3] = current->real_parent->pid;
606 return current->pid;
607}
608
609asmlinkage int irix_getuid(struct pt_regs *regs)
610{
611 regs->regs[3] = current->euid;
612 return current->uid;
613}
614
615asmlinkage int irix_getgid(struct pt_regs *regs)
616{
617 regs->regs[3] = current->egid;
618 return current->gid;
619}
620
621asmlinkage int irix_stime(int value)
622{
623 int err;
624 struct timespec tv;
625
626 tv.tv_sec = value;
627 tv.tv_nsec = 0;
628 err = security_settime(&tv, NULL);
629 if (err)
630 return err;
631
632 write_seqlock_irq(&xtime_lock);
633 xtime.tv_sec = value;
634 xtime.tv_nsec = 0;
635 time_adjust = 0; /* stop active adjtime() */
636 time_status |= STA_UNSYNC;
637 time_maxerror = NTP_PHASE_LIMIT;
638 time_esterror = NTP_PHASE_LIMIT;
639 write_sequnlock_irq(&xtime_lock);
640
641 return 0;
642}
643
644static inline void jiffiestotv(unsigned long jiffies, struct timeval *value)
645{
646 value->tv_usec = (jiffies % HZ) * (1000000 / HZ);
647 value->tv_sec = jiffies / HZ;
648}
649
650static inline void getitimer_real(struct itimerval *value)
651{
652 register unsigned long val, interval;
653
654 interval = current->it_real_incr;
655 val = 0;
656 if (del_timer(&current->real_timer)) {
657 unsigned long now = jiffies;
658 val = current->real_timer.expires;
659 add_timer(&current->real_timer);
660 /* look out for negative/zero itimer.. */
661 if (val <= now)
662 val = now+1;
663 val -= now;
664 }
665 jiffiestotv(val, &value->it_value);
666 jiffiestotv(interval, &value->it_interval);
667}
668
669asmlinkage unsigned int irix_alarm(unsigned int seconds)
670{
671 struct itimerval it_new, it_old;
672 unsigned int oldalarm;
673
674 if (!seconds) {
675 getitimer_real(&it_old);
676 del_timer(&current->real_timer);
677 } else {
678 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
679 it_new.it_value.tv_sec = seconds;
680 it_new.it_value.tv_usec = 0;
681 do_setitimer(ITIMER_REAL, &it_new, &it_old);
682 }
683 oldalarm = it_old.it_value.tv_sec;
684 /*
685 * ehhh.. We can't return 0 if we have an alarm pending ...
686 * And we'd better return too much than too little anyway
687 */
688 if (it_old.it_value.tv_usec)
689 oldalarm++;
690
691 return oldalarm;
692}
693
694asmlinkage int irix_pause(void)
695{
696 current->state = TASK_INTERRUPTIBLE;
697 schedule();
698
699 return -EINTR;
700}
701
702/* XXX need more than this... */
703asmlinkage int irix_mount(char *dev_name, char *dir_name, unsigned long flags,
704 char *type, void *data, int datalen)
705{
706 printk("[%s:%d] irix_mount(%p,%p,%08lx,%p,%p,%d)\n",
707 current->comm, current->pid,
708 dev_name, dir_name, flags, type, data, datalen);
709
710 return sys_mount(dev_name, dir_name, type, flags, data);
711}
712
713struct irix_statfs {
714 short f_type;
715 long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree;
716 char f_fname[6], f_fpack[6];
717};
718
719asmlinkage int irix_statfs(const char *path, struct irix_statfs *buf,
720 int len, int fs_type)
721{
722 struct nameidata nd;
723 struct kstatfs kbuf;
724 int error, i;
725
726 /* We don't support this feature yet. */
727 if (fs_type) {
728 error = -EINVAL;
729 goto out;
730 }
731 if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statfs))) {
732 error = -EFAULT;
733 goto out;
734 }
735 error = user_path_walk(path, &nd);
736 if (error)
737 goto out;
738
739 error = vfs_statfs(nd.dentry->d_inode->i_sb, &kbuf);
740 if (error)
741 goto dput_and_out;
742
743 __put_user(kbuf.f_type, &buf->f_type);
744 __put_user(kbuf.f_bsize, &buf->f_bsize);
745 __put_user(kbuf.f_frsize, &buf->f_frsize);
746 __put_user(kbuf.f_blocks, &buf->f_blocks);
747 __put_user(kbuf.f_bfree, &buf->f_bfree);
748 __put_user(kbuf.f_files, &buf->f_files);
749 __put_user(kbuf.f_ffree, &buf->f_ffree);
750 for (i = 0; i < 6; i++) {
751 __put_user(0, &buf->f_fname[i]);
752 __put_user(0, &buf->f_fpack[i]);
753 }
754 error = 0;
755
756dput_and_out:
757 path_release(&nd);
758out:
759 return error;
760}
761
762asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs *buf)
763{
764 struct kstatfs kbuf;
765 struct file *file;
766 int error, i;
767
768 if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statfs))) {
769 error = -EFAULT;
770 goto out;
771 }
772 if (!(file = fget(fd))) {
773 error = -EBADF;
774 goto out;
775 }
776
777 error = vfs_statfs(file->f_dentry->d_inode->i_sb, &kbuf);
778 if (error)
779 goto out_f;
780
781 __put_user(kbuf.f_type, &buf->f_type);
782 __put_user(kbuf.f_bsize, &buf->f_bsize);
783 __put_user(kbuf.f_frsize, &buf->f_frsize);
784 __put_user(kbuf.f_blocks, &buf->f_blocks);
785 __put_user(kbuf.f_bfree, &buf->f_bfree);
786 __put_user(kbuf.f_files, &buf->f_files);
787 __put_user(kbuf.f_ffree, &buf->f_ffree);
788 for(i = 0; i < 6; i++) {
789 __put_user(0, &buf->f_fname[i]);
790 __put_user(0, &buf->f_fpack[i]);
791 }
792
793out_f:
794 fput(file);
795out:
796 return error;
797}
798
799asmlinkage int irix_setpgrp(int flags)
800{
801 int error;
802
803#ifdef DEBUG_PROCGRPS
804 printk("[%s:%d] setpgrp(%d) ", current->comm, current->pid, flags);
805#endif
806 if(!flags)
807 error = process_group(current);
808 else
809 error = sys_setsid();
810#ifdef DEBUG_PROCGRPS
811 printk("returning %d\n", process_group(current));
812#endif
813
814 return error;
815}
816
817asmlinkage int irix_times(struct tms * tbuf)
818{
819 int err = 0;
820
821 if (tbuf) {
822 if (!access_ok(VERIFY_WRITE,tbuf,sizeof *tbuf))
823 return -EFAULT;
824 err |= __put_user(current->utime, &tbuf->tms_utime);
825 err |= __put_user(current->stime, &tbuf->tms_stime);
826 err |= __put_user(current->signal->cutime, &tbuf->tms_cutime);
827 err |= __put_user(current->signal->cstime, &tbuf->tms_cstime);
828 }
829
830 return err;
831}
832
833asmlinkage int irix_exec(struct pt_regs *regs)
834{
835 int error, base = 0;
836 char *filename;
837
838 if(regs->regs[2] == 1000)
839 base = 1;
840 filename = getname((char *) (long)regs->regs[base + 4]);
841 error = PTR_ERR(filename);
842 if (IS_ERR(filename))
843 return error;
844
845 error = do_execve(filename, (char **) (long)regs->regs[base + 5],
846 (char **) 0, regs);
847 putname(filename);
848
849 return error;
850}
851
852asmlinkage int irix_exece(struct pt_regs *regs)
853{
854 int error, base = 0;
855 char *filename;
856
857 if (regs->regs[2] == 1000)
858 base = 1;
859 filename = getname((char *) (long)regs->regs[base + 4]);
860 error = PTR_ERR(filename);
861 if (IS_ERR(filename))
862 return error;
863 error = do_execve(filename, (char **) (long)regs->regs[base + 5],
864 (char **) (long)regs->regs[base + 6], regs);
865 putname(filename);
866
867 return error;
868}
869
870asmlinkage unsigned long irix_gethostid(void)
871{
872 printk("[%s:%d]: irix_gethostid() called...\n",
873 current->comm, current->pid);
874
875 return -EINVAL;
876}
877
878asmlinkage unsigned long irix_sethostid(unsigned long val)
879{
880 printk("[%s:%d]: irix_sethostid(%08lx) called...\n",
881 current->comm, current->pid, val);
882
883 return -EINVAL;
884}
885
886asmlinkage int irix_socket(int family, int type, int protocol)
887{
888 switch(type) {
889 case 1:
890 type = SOCK_DGRAM;
891 break;
892
893 case 2:
894 type = SOCK_STREAM;
895 break;
896
897 case 3:
898 type = 9; /* Invalid... */
899 break;
900
901 case 4:
902 type = SOCK_RAW;
903 break;
904
905 case 5:
906 type = SOCK_RDM;
907 break;
908
909 case 6:
910 type = SOCK_SEQPACKET;
911 break;
912
913 default:
914 break;
915 }
916
917 return sys_socket(family, type, protocol);
918}
919
920asmlinkage int irix_getdomainname(char *name, int len)
921{
922 int error;
923
924 if (!access_ok(VERIFY_WRITE, name, len))
925 return -EFAULT;
926
927 down_read(&uts_sem);
928 if (len > __NEW_UTS_LEN)
929 len = __NEW_UTS_LEN;
930 error = 0;
931 if (copy_to_user(name, system_utsname.domainname, len))
932 error = -EFAULT;
933 up_read(&uts_sem);
934
935 return error;
936}
937
938asmlinkage unsigned long irix_getpagesize(void)
939{
940 return PAGE_SIZE;
941}
942
943asmlinkage int irix_msgsys(int opcode, unsigned long arg0, unsigned long arg1,
944 unsigned long arg2, unsigned long arg3,
945 unsigned long arg4)
946{
947 switch (opcode) {
948 case 0:
949 return sys_msgget((key_t) arg0, (int) arg1);
950 case 1:
951 return sys_msgctl((int) arg0, (int) arg1, (struct msqid_ds *)arg2);
952 case 2:
953 return sys_msgrcv((int) arg0, (struct msgbuf *) arg1,
954 (size_t) arg2, (long) arg3, (int) arg4);
955 case 3:
956 return sys_msgsnd((int) arg0, (struct msgbuf *) arg1,
957 (size_t) arg2, (int) arg3);
958 default:
959 return -EINVAL;
960 }
961}
962
963asmlinkage int irix_shmsys(int opcode, unsigned long arg0, unsigned long arg1,
964 unsigned long arg2, unsigned long arg3)
965{
966 switch (opcode) {
967 case 0:
968 return do_shmat((int) arg0, (char *)arg1, (int) arg2,
969 (unsigned long *) arg3);
970 case 1:
971 return sys_shmctl((int)arg0, (int)arg1, (struct shmid_ds *)arg2);
972 case 2:
973 return sys_shmdt((char *)arg0);
974 case 3:
975 return sys_shmget((key_t) arg0, (int) arg1, (int) arg2);
976 default:
977 return -EINVAL;
978 }
979}
980
981asmlinkage int irix_semsys(int opcode, unsigned long arg0, unsigned long arg1,
982 unsigned long arg2, int arg3)
983{
984 switch (opcode) {
985 case 0:
986 return sys_semctl((int) arg0, (int) arg1, (int) arg2,
987 (union semun) arg3);
988 case 1:
989 return sys_semget((key_t) arg0, (int) arg1, (int) arg2);
990 case 2:
991 return sys_semop((int) arg0, (struct sembuf *)arg1,
992 (unsigned int) arg2);
993 default:
994 return -EINVAL;
995 }
996}
997
998static inline loff_t llseek(struct file *file, loff_t offset, int origin)
999{
1000 loff_t (*fn)(struct file *, loff_t, int);
1001 loff_t retval;
1002
1003 fn = default_llseek;
1004 if (file->f_op && file->f_op->llseek)
1005 fn = file->f_op->llseek;
1006 lock_kernel();
1007 retval = fn(file, offset, origin);
1008 unlock_kernel();
1009 return retval;
1010}
1011
1012asmlinkage int irix_lseek64(int fd, int _unused, int offhi, int offlow,
1013 int origin)
1014{
1015 int retval;
1016 struct file * file;
1017 loff_t offset;
1018
1019 retval = -EBADF;
1020 file = fget(fd);
1021 if (!file)
1022 goto bad;
1023 retval = -EINVAL;
1024 if (origin > 2)
1025 goto out_putf;
1026
1027 offset = llseek(file, ((loff_t) offhi << 32) | offlow, origin);
1028 retval = (int) offset;
1029
1030out_putf:
1031 fput(file);
1032bad:
1033 return retval;
1034}
1035
1036asmlinkage int irix_sginap(int ticks)
1037{
1038 current->state = TASK_INTERRUPTIBLE;
1039 schedule_timeout(ticks);
1040 return 0;
1041}
1042
1043asmlinkage int irix_sgikopt(char *istring, char *ostring, int len)
1044{
1045 return -EINVAL;
1046}
1047
1048asmlinkage int irix_gettimeofday(struct timeval *tv)
1049{
1050 time_t sec;
1051 long nsec, seq;
1052 int err;
1053
1054 if (!access_ok(VERIFY_WRITE, tv, sizeof(struct timeval)))
1055 return -EFAULT;
1056
1057 do {
1058 seq = read_seqbegin(&xtime_lock);
1059 sec = xtime.tv_sec;
1060 nsec = xtime.tv_nsec;
1061 } while (read_seqretry(&xtime_lock, seq));
1062
1063 err = __put_user(sec, &tv->tv_sec);
1064 err |= __put_user((nsec / 1000), &tv->tv_usec);
1065
1066 return err;
1067}
1068
1069#define IRIX_MAP_AUTOGROW 0x40
1070
1071asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot,
1072 int flags, int fd, off_t offset)
1073{
1074 struct file *file = NULL;
1075 unsigned long retval;
1076
1077 if (!(flags & MAP_ANONYMOUS)) {
1078 if (!(file = fget(fd)))
1079 return -EBADF;
1080
1081 /* Ok, bad taste hack follows, try to think in something else
1082 * when reading this. */
1083 if (flags & IRIX_MAP_AUTOGROW) {
1084 unsigned long old_pos;
1085 long max_size = offset + len;
1086
1087 if (max_size > file->f_dentry->d_inode->i_size) {
1088 old_pos = sys_lseek (fd, max_size - 1, 0);
1089 sys_write (fd, "", 1);
1090 sys_lseek (fd, old_pos, 0);
1091 }
1092 }
1093 }
1094
1095 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1096
1097 down_write(&current->mm->mmap_sem);
1098 retval = do_mmap(file, addr, len, prot, flags, offset);
1099 up_write(&current->mm->mmap_sem);
1100 if (file)
1101 fput(file);
1102
1103 return retval;
1104}
1105
1106asmlinkage int irix_madvise(unsigned long addr, int len, int behavior)
1107{
1108 printk("[%s:%d] Wheee.. irix_madvise(%08lx,%d,%d)\n",
1109 current->comm, current->pid, addr, len, behavior);
1110
1111 return -EINVAL;
1112}
1113
1114asmlinkage int irix_pagelock(char *addr, int len, int op)
1115{
1116 printk("[%s:%d] Wheee.. irix_pagelock(%p,%d,%d)\n",
1117 current->comm, current->pid, addr, len, op);
1118
1119 return -EINVAL;
1120}
1121
1122asmlinkage int irix_quotactl(struct pt_regs *regs)
1123{
1124 printk("[%s:%d] Wheee.. irix_quotactl()\n",
1125 current->comm, current->pid);
1126
1127 return -EINVAL;
1128}
1129
1130asmlinkage int irix_BSDsetpgrp(int pid, int pgrp)
1131{
1132 int error;
1133
1134#ifdef DEBUG_PROCGRPS
1135 printk("[%s:%d] BSDsetpgrp(%d, %d) ", current->comm, current->pid,
1136 pid, pgrp);
1137#endif
1138 if(!pid)
1139 pid = current->pid;
1140
1141 /* Wheee, weird sysv thing... */
1142 if ((pgrp == 0) && (pid == current->pid))
1143 error = sys_setsid();
1144 else
1145 error = sys_setpgid(pid, pgrp);
1146
1147#ifdef DEBUG_PROCGRPS
1148 printk("error = %d\n", error);
1149#endif
1150
1151 return error;
1152}
1153
1154asmlinkage int irix_systeminfo(int cmd, char *buf, int cnt)
1155{
1156 printk("[%s:%d] Wheee.. irix_systeminfo(%d,%p,%d)\n",
1157 current->comm, current->pid, cmd, buf, cnt);
1158
1159 return -EINVAL;
1160}
1161
1162struct iuname {
1163 char sysname[257], nodename[257], release[257];
1164 char version[257], machine[257];
1165 char m_type[257], base_rel[257];
1166 char _unused0[257], _unused1[257], _unused2[257];
1167 char _unused3[257], _unused4[257], _unused5[257];
1168};
1169
1170asmlinkage int irix_uname(struct iuname *buf)
1171{
1172 down_read(&uts_sem);
1173 if (copy_to_user(system_utsname.sysname, buf->sysname, 65)
1174 || copy_to_user(system_utsname.nodename, buf->nodename, 65)
1175 || copy_to_user(system_utsname.release, buf->release, 65)
1176 || copy_to_user(system_utsname.version, buf->version, 65)
1177 || copy_to_user(system_utsname.machine, buf->machine, 65)) {
1178 return -EFAULT;
1179 }
1180 up_read(&uts_sem);
1181
1182 return 1;
1183}
1184
1185#undef DEBUG_XSTAT
1186
1187static int irix_xstat32_xlate(struct kstat *stat, void *ubuf)
1188{
1189 struct xstat32 {
1190 u32 st_dev, st_pad1[3], st_ino, st_mode, st_nlink, st_uid, st_gid;
1191 u32 st_rdev, st_pad2[2], st_size, st_pad3;
1192 u32 st_atime0, st_atime1;
1193 u32 st_mtime0, st_mtime1;
1194 u32 st_ctime0, st_ctime1;
1195 u32 st_blksize, st_blocks;
1196 char st_fstype[16];
1197 u32 st_pad4[8];
1198 } ub;
1199
1200 if (!sysv_valid_dev(stat->dev) || !sysv_valid_dev(stat->rdev))
1201 return -EOVERFLOW;
1202 ub.st_dev = sysv_encode_dev(stat->dev);
1203 ub.st_ino = stat->ino;
1204 ub.st_mode = stat->mode;
1205 ub.st_nlink = stat->nlink;
1206 SET_UID(ub.st_uid, stat->uid);
1207 SET_GID(ub.st_gid, stat->gid);
1208 ub.st_rdev = sysv_encode_dev(stat->rdev);
1209#if BITS_PER_LONG == 32
1210 if (stat->size > MAX_NON_LFS)
1211 return -EOVERFLOW;
1212#endif
1213 ub.st_size = stat->size;
1214 ub.st_atime0 = stat->atime.tv_sec;
1215 ub.st_atime1 = stat->atime.tv_nsec;
1216 ub.st_mtime0 = stat->mtime.tv_sec;
1217 ub.st_mtime1 = stat->atime.tv_nsec;
1218 ub.st_ctime0 = stat->ctime.tv_sec;
1219 ub.st_ctime1 = stat->atime.tv_nsec;
1220 ub.st_blksize = stat->blksize;
1221 ub.st_blocks = stat->blocks;
1222 strcpy (ub.st_fstype, "efs");
1223
1224 return copy_to_user(ubuf, &ub, sizeof(ub)) ? -EFAULT : 0;
1225}
1226
1227static int irix_xstat64_xlate(struct kstat *stat, void *ubuf)
1228{
1229 struct xstat64 {
1230 u32 st_dev; s32 st_pad1[3];
1231 unsigned long long st_ino;
1232 u32 st_mode;
1233 u32 st_nlink; s32 st_uid; s32 st_gid; u32 st_rdev;
1234 s32 st_pad2[2];
1235 long long st_size;
1236 s32 st_pad3;
1237 struct { s32 tv_sec, tv_nsec; } st_atime, st_mtime, st_ctime;
1238 s32 st_blksize;
1239 long long st_blocks;
1240 char st_fstype[16];
1241 s32 st_pad4[8];
1242 } ks;
1243
1244 if (!sysv_valid_dev(stat->dev) || !sysv_valid_dev(stat->rdev))
1245 return -EOVERFLOW;
1246
1247 ks.st_dev = sysv_encode_dev(stat->dev);
1248 ks.st_pad1[0] = ks.st_pad1[1] = ks.st_pad1[2] = 0;
1249 ks.st_ino = (unsigned long long) stat->ino;
1250 ks.st_mode = (u32) stat->mode;
1251 ks.st_nlink = (u32) stat->nlink;
1252 ks.st_uid = (s32) stat->uid;
1253 ks.st_gid = (s32) stat->gid;
1254 ks.st_rdev = sysv_encode_dev (stat->rdev);
1255 ks.st_pad2[0] = ks.st_pad2[1] = 0;
1256 ks.st_size = (long long) stat->size;
1257 ks.st_pad3 = 0;
1258
1259 /* XXX hackety hack... */
1260 ks.st_atime.tv_sec = (s32) stat->atime.tv_sec;
1261 ks.st_atime.tv_nsec = stat->atime.tv_nsec;
1262 ks.st_mtime.tv_sec = (s32) stat->mtime.tv_sec;
1263 ks.st_mtime.tv_nsec = stat->mtime.tv_nsec;
1264 ks.st_ctime.tv_sec = (s32) stat->ctime.tv_sec;
1265 ks.st_ctime.tv_nsec = stat->ctime.tv_nsec;
1266
1267 ks.st_blksize = (s32) stat->blksize;
1268 ks.st_blocks = (long long) stat->blocks;
1269 memset(ks.st_fstype, 0, 16);
1270 ks.st_pad4[0] = ks.st_pad4[1] = ks.st_pad4[2] = ks.st_pad4[3] = 0;
1271 ks.st_pad4[4] = ks.st_pad4[5] = ks.st_pad4[6] = ks.st_pad4[7] = 0;
1272
1273 /* Now write it all back. */
1274 return copy_to_user(ubuf, &ks, sizeof(ks)) ? -EFAULT : 0;
1275}
1276
1277asmlinkage int irix_xstat(int version, char *filename, struct stat *statbuf)
1278{
1279 int retval;
1280 struct kstat stat;
1281
1282#ifdef DEBUG_XSTAT
1283 printk("[%s:%d] Wheee.. irix_xstat(%d,%s,%p) ",
1284 current->comm, current->pid, version, filename, statbuf);
1285#endif
1286
1287 retval = vfs_stat(filename, &stat);
1288 if (!retval) {
1289 switch(version) {
1290 case 2:
1291 retval = irix_xstat32_xlate(&stat, statbuf);
1292 break;
1293 case 3:
1294 retval = irix_xstat64_xlate(&stat, statbuf);
1295 break;
1296 default:
1297 retval = -EINVAL;
1298 }
1299 }
1300 return retval;
1301}
1302
1303asmlinkage int irix_lxstat(int version, char *filename, struct stat *statbuf)
1304{
1305 int error;
1306 struct kstat stat;
1307
1308#ifdef DEBUG_XSTAT
1309 printk("[%s:%d] Wheee.. irix_lxstat(%d,%s,%p) ",
1310 current->comm, current->pid, version, filename, statbuf);
1311#endif
1312
1313 error = vfs_lstat(filename, &stat);
1314
1315 if (!error) {
1316 switch (version) {
1317 case 2:
1318 error = irix_xstat32_xlate(&stat, statbuf);
1319 break;
1320 case 3:
1321 error = irix_xstat64_xlate(&stat, statbuf);
1322 break;
1323 default:
1324 error = -EINVAL;
1325 }
1326 }
1327 return error;
1328}
1329
1330asmlinkage int irix_fxstat(int version, int fd, struct stat *statbuf)
1331{
1332 int error;
1333 struct kstat stat;
1334
1335#ifdef DEBUG_XSTAT
1336 printk("[%s:%d] Wheee.. irix_fxstat(%d,%d,%p) ",
1337 current->comm, current->pid, version, fd, statbuf);
1338#endif
1339
1340 error = vfs_fstat(fd, &stat);
1341 if (!error) {
1342 switch (version) {
1343 case 2:
1344 error = irix_xstat32_xlate(&stat, statbuf);
1345 break;
1346 case 3:
1347 error = irix_xstat64_xlate(&stat, statbuf);
1348 break;
1349 default:
1350 error = -EINVAL;
1351 }
1352 }
1353 return error;
1354}
1355
1356asmlinkage int irix_xmknod(int ver, char *filename, int mode, unsigned dev)
1357{
1358 int retval;
1359 printk("[%s:%d] Wheee.. irix_xmknod(%d,%s,%x,%x)\n",
1360 current->comm, current->pid, ver, filename, mode, dev);
1361
1362 switch(ver) {
1363 case 2:
1364 /* shouldn't we convert here as well as on stat()? */
1365 retval = sys_mknod(filename, mode, dev);
1366 break;
1367
1368 default:
1369 retval = -EINVAL;
1370 break;
1371 };
1372
1373 return retval;
1374}
1375
1376asmlinkage int irix_swapctl(int cmd, char *arg)
1377{
1378 printk("[%s:%d] Wheee.. irix_swapctl(%d,%p)\n",
1379 current->comm, current->pid, cmd, arg);
1380
1381 return -EINVAL;
1382}
1383
1384struct irix_statvfs {
1385 u32 f_bsize; u32 f_frsize; u32 f_blocks;
1386 u32 f_bfree; u32 f_bavail; u32 f_files; u32 f_ffree; u32 f_favail;
1387 u32 f_fsid; char f_basetype[16];
1388 u32 f_flag; u32 f_namemax;
1389 char f_fstr[32]; u32 f_filler[16];
1390};
1391
1392asmlinkage int irix_statvfs(char *fname, struct irix_statvfs *buf)
1393{
1394 struct nameidata nd;
1395 struct kstatfs kbuf;
1396 int error, i;
1397
1398 printk("[%s:%d] Wheee.. irix_statvfs(%s,%p)\n",
1399 current->comm, current->pid, fname, buf);
1400 if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs))) {
1401 error = -EFAULT;
1402 goto out;
1403 }
1404 error = user_path_walk(fname, &nd);
1405 if (error)
1406 goto out;
1407 error = vfs_statfs(nd.dentry->d_inode->i_sb, &kbuf);
1408 if (error)
1409 goto dput_and_out;
1410
1411 __put_user(kbuf.f_bsize, &buf->f_bsize);
1412 __put_user(kbuf.f_frsize, &buf->f_frsize);
1413 __put_user(kbuf.f_blocks, &buf->f_blocks);
1414 __put_user(kbuf.f_bfree, &buf->f_bfree);
1415 __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
1416 __put_user(kbuf.f_files, &buf->f_files);
1417 __put_user(kbuf.f_ffree, &buf->f_ffree);
1418 __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
1419#ifdef __MIPSEB__
1420 __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
1421#else
1422 __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
1423#endif
1424 for (i = 0; i < 16; i++)
1425 __put_user(0, &buf->f_basetype[i]);
1426 __put_user(0, &buf->f_flag);
1427 __put_user(kbuf.f_namelen, &buf->f_namemax);
1428 for (i = 0; i < 32; i++)
1429 __put_user(0, &buf->f_fstr[i]);
1430
1431 error = 0;
1432
1433dput_and_out:
1434 path_release(&nd);
1435out:
1436 return error;
1437}
1438
1439asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs *buf)
1440{
1441 struct kstatfs kbuf;
1442 struct file *file;
1443 int error, i;
1444
1445 printk("[%s:%d] Wheee.. irix_fstatvfs(%d,%p)\n",
1446 current->comm, current->pid, fd, buf);
1447
1448 if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs))) {
1449 error = -EFAULT;
1450 goto out;
1451 }
1452 if (!(file = fget(fd))) {
1453 error = -EBADF;
1454 goto out;
1455 }
1456 error = vfs_statfs(file->f_dentry->d_inode->i_sb, &kbuf);
1457 if (error)
1458 goto out_f;
1459
1460 __put_user(kbuf.f_bsize, &buf->f_bsize);
1461 __put_user(kbuf.f_frsize, &buf->f_frsize);
1462 __put_user(kbuf.f_blocks, &buf->f_blocks);
1463 __put_user(kbuf.f_bfree, &buf->f_bfree);
1464 __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
1465 __put_user(kbuf.f_files, &buf->f_files);
1466 __put_user(kbuf.f_ffree, &buf->f_ffree);
1467 __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
1468#ifdef __MIPSEB__
1469 __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
1470#else
1471 __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
1472#endif
1473 for(i = 0; i < 16; i++)
1474 __put_user(0, &buf->f_basetype[i]);
1475 __put_user(0, &buf->f_flag);
1476 __put_user(kbuf.f_namelen, &buf->f_namemax);
1477 __clear_user(&buf->f_fstr, sizeof(buf->f_fstr));
1478
1479out_f:
1480 fput(file);
1481out:
1482 return error;
1483}
1484
1485asmlinkage int irix_priocntl(struct pt_regs *regs)
1486{
1487 printk("[%s:%d] Wheee.. irix_priocntl()\n",
1488 current->comm, current->pid);
1489
1490 return -EINVAL;
1491}
1492
1493asmlinkage int irix_sigqueue(int pid, int sig, int code, int val)
1494{
1495 printk("[%s:%d] Wheee.. irix_sigqueue(%d,%d,%d,%d)\n",
1496 current->comm, current->pid, pid, sig, code, val);
1497
1498 return -EINVAL;
1499}
1500
1501asmlinkage int irix_truncate64(char *name, int pad, int size1, int size2)
1502{
1503 int retval;
1504
1505 if (size1) {
1506 retval = -EINVAL;
1507 goto out;
1508 }
1509 retval = sys_truncate(name, size2);
1510
1511out:
1512 return retval;
1513}
1514
1515asmlinkage int irix_ftruncate64(int fd, int pad, int size1, int size2)
1516{
1517 int retval;
1518
1519 if (size1) {
1520 retval = -EINVAL;
1521 goto out;
1522 }
1523 retval = sys_ftruncate(fd, size2);
1524
1525out:
1526 return retval;
1527}
1528
1529asmlinkage int irix_mmap64(struct pt_regs *regs)
1530{
1531 int len, prot, flags, fd, off1, off2, error, base = 0;
1532 unsigned long addr, pgoff, *sp;
1533 struct file *file = NULL;
1534
1535 if (regs->regs[2] == 1000)
1536 base = 1;
1537 sp = (unsigned long *) (regs->regs[29] + 16);
1538 addr = regs->regs[base + 4];
1539 len = regs->regs[base + 5];
1540 prot = regs->regs[base + 6];
1541 if (!base) {
1542 flags = regs->regs[base + 7];
1543 if (!access_ok(VERIFY_READ, sp, (4 * sizeof(unsigned long)))) {
1544 error = -EFAULT;
1545 goto out;
1546 }
1547 fd = sp[0];
1548 __get_user(off1, &sp[1]);
1549 __get_user(off2, &sp[2]);
1550 } else {
1551 if (!access_ok(VERIFY_READ, sp, (5 * sizeof(unsigned long)))) {
1552 error = -EFAULT;
1553 goto out;
1554 }
1555 __get_user(flags, &sp[0]);
1556 __get_user(fd, &sp[1]);
1557 __get_user(off1, &sp[2]);
1558 __get_user(off2, &sp[3]);
1559 }
1560
1561 if (off1 & PAGE_MASK) {
1562 error = -EOVERFLOW;
1563 goto out;
1564 }
1565
1566 pgoff = (off1 << (32 - PAGE_SHIFT)) | (off2 >> PAGE_SHIFT);
1567
1568 if (!(flags & MAP_ANONYMOUS)) {
1569 if (!(file = fget(fd))) {
1570 error = -EBADF;
1571 goto out;
1572 }
1573
1574 /* Ok, bad taste hack follows, try to think in something else
1575 when reading this */
1576 if (flags & IRIX_MAP_AUTOGROW) {
1577 unsigned long old_pos;
1578 long max_size = off2 + len;
1579
1580 if (max_size > file->f_dentry->d_inode->i_size) {
1581 old_pos = sys_lseek (fd, max_size - 1, 0);
1582 sys_write (fd, "", 1);
1583 sys_lseek (fd, old_pos, 0);
1584 }
1585 }
1586 }
1587
1588 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1589
1590 down_write(&current->mm->mmap_sem);
1591 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1592 up_write(&current->mm->mmap_sem);
1593
1594 if (file)
1595 fput(file);
1596
1597out:
1598 return error;
1599}
1600
1601asmlinkage int irix_dmi(struct pt_regs *regs)
1602{
1603 printk("[%s:%d] Wheee.. irix_dmi()\n",
1604 current->comm, current->pid);
1605
1606 return -EINVAL;
1607}
1608
1609asmlinkage int irix_pread(int fd, char *buf, int cnt, int off64,
1610 int off1, int off2)
1611{
1612 printk("[%s:%d] Wheee.. irix_pread(%d,%p,%d,%d,%d,%d)\n",
1613 current->comm, current->pid, fd, buf, cnt, off64, off1, off2);
1614
1615 return -EINVAL;
1616}
1617
1618asmlinkage int irix_pwrite(int fd, char *buf, int cnt, int off64,
1619 int off1, int off2)
1620{
1621 printk("[%s:%d] Wheee.. irix_pwrite(%d,%p,%d,%d,%d,%d)\n",
1622 current->comm, current->pid, fd, buf, cnt, off64, off1, off2);
1623
1624 return -EINVAL;
1625}
1626
1627asmlinkage int irix_sgifastpath(int cmd, unsigned long arg0, unsigned long arg1,
1628 unsigned long arg2, unsigned long arg3,
1629 unsigned long arg4, unsigned long arg5)
1630{
1631 printk("[%s:%d] Wheee.. irix_fastpath(%d,%08lx,%08lx,%08lx,%08lx,"
1632 "%08lx,%08lx)\n",
1633 current->comm, current->pid, cmd, arg0, arg1, arg2,
1634 arg3, arg4, arg5);
1635
1636 return -EINVAL;
1637}
1638
1639struct irix_statvfs64 {
1640 u32 f_bsize; u32 f_frsize;
1641 u64 f_blocks; u64 f_bfree; u64 f_bavail;
1642 u64 f_files; u64 f_ffree; u64 f_favail;
1643 u32 f_fsid;
1644 char f_basetype[16];
1645 u32 f_flag; u32 f_namemax;
1646 char f_fstr[32];
1647 u32 f_filler[16];
1648};
1649
1650asmlinkage int irix_statvfs64(char *fname, struct irix_statvfs64 *buf)
1651{
1652 struct nameidata nd;
1653 struct kstatfs kbuf;
1654 int error, i;
1655
1656 printk("[%s:%d] Wheee.. irix_statvfs64(%s,%p)\n",
1657 current->comm, current->pid, fname, buf);
1658 if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs64))) {
1659 error = -EFAULT;
1660 goto out;
1661 }
1662 error = user_path_walk(fname, &nd);
1663 if (error)
1664 goto out;
1665 error = vfs_statfs(nd.dentry->d_inode->i_sb, &kbuf);
1666 if (error)
1667 goto dput_and_out;
1668
1669 __put_user(kbuf.f_bsize, &buf->f_bsize);
1670 __put_user(kbuf.f_frsize, &buf->f_frsize);
1671 __put_user(kbuf.f_blocks, &buf->f_blocks);
1672 __put_user(kbuf.f_bfree, &buf->f_bfree);
1673 __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
1674 __put_user(kbuf.f_files, &buf->f_files);
1675 __put_user(kbuf.f_ffree, &buf->f_ffree);
1676 __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
1677#ifdef __MIPSEB__
1678 __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
1679#else
1680 __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
1681#endif
1682 for(i = 0; i < 16; i++)
1683 __put_user(0, &buf->f_basetype[i]);
1684 __put_user(0, &buf->f_flag);
1685 __put_user(kbuf.f_namelen, &buf->f_namemax);
1686 for(i = 0; i < 32; i++)
1687 __put_user(0, &buf->f_fstr[i]);
1688
1689 error = 0;
1690
1691dput_and_out:
1692 path_release(&nd);
1693out:
1694 return error;
1695}
1696
1697asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs *buf)
1698{
1699 struct kstatfs kbuf;
1700 struct file *file;
1701 int error, i;
1702
1703 printk("[%s:%d] Wheee.. irix_fstatvfs64(%d,%p)\n",
1704 current->comm, current->pid, fd, buf);
1705
1706 if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs))) {
1707 error = -EFAULT;
1708 goto out;
1709 }
1710 if (!(file = fget(fd))) {
1711 error = -EBADF;
1712 goto out;
1713 }
1714 error = vfs_statfs(file->f_dentry->d_inode->i_sb, &kbuf);
1715 if (error)
1716 goto out_f;
1717
1718 __put_user(kbuf.f_bsize, &buf->f_bsize);
1719 __put_user(kbuf.f_frsize, &buf->f_frsize);
1720 __put_user(kbuf.f_blocks, &buf->f_blocks);
1721 __put_user(kbuf.f_bfree, &buf->f_bfree);
1722 __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
1723 __put_user(kbuf.f_files, &buf->f_files);
1724 __put_user(kbuf.f_ffree, &buf->f_ffree);
1725 __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
1726#ifdef __MIPSEB__
1727 __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
1728#else
1729 __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
1730#endif
1731 for(i = 0; i < 16; i++)
1732 __put_user(0, &buf->f_basetype[i]);
1733 __put_user(0, &buf->f_flag);
1734 __put_user(kbuf.f_namelen, &buf->f_namemax);
1735 __clear_user(buf->f_fstr, sizeof(buf->f_fstr[i]));
1736
1737out_f:
1738 fput(file);
1739out:
1740 return error;
1741}
1742
1743asmlinkage int irix_getmountid(char *fname, unsigned long *midbuf)
1744{
1745 int err = 0;
1746
1747 printk("[%s:%d] irix_getmountid(%s, %p)\n",
1748 current->comm, current->pid, fname, midbuf);
1749 if (!access_ok(VERIFY_WRITE, midbuf, (sizeof(unsigned long) * 4)))
1750 return -EFAULT;
1751
1752 /*
1753 * The idea with this system call is that when trying to determine
1754 * 'pwd' and it's a toss-up for some reason, userland can use the
1755 * fsid of the filesystem to try and make the right decision, but
1756 * we don't have this so for now. XXX
1757 */
1758 err |= __put_user(0, &midbuf[0]);
1759 err |= __put_user(0, &midbuf[1]);
1760 err |= __put_user(0, &midbuf[2]);
1761 err |= __put_user(0, &midbuf[3]);
1762
1763 return err;
1764}
1765
1766asmlinkage int irix_nsproc(unsigned long entry, unsigned long mask,
1767 unsigned long arg, unsigned long sp, int slen)
1768{
1769 printk("[%s:%d] Wheee.. irix_nsproc(%08lx,%08lx,%08lx,%08lx,%d)\n",
1770 current->comm, current->pid, entry, mask, arg, sp, slen);
1771
1772 return -EINVAL;
1773}
1774
1775#undef DEBUG_GETDENTS
1776
1777struct irix_dirent32 {
1778 u32 d_ino;
1779 u32 d_off;
1780 unsigned short d_reclen;
1781 char d_name[1];
1782};
1783
1784struct irix_dirent32_callback {
1785 struct irix_dirent32 *current_dir;
1786 struct irix_dirent32 *previous;
1787 int count;
1788 int error;
1789};
1790
1791#define NAME_OFFSET32(de) ((int) ((de)->d_name - (char *) (de)))
1792#define ROUND_UP32(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
1793
1794static int irix_filldir32(void *__buf, const char *name, int namlen,
1795 loff_t offset, ino_t ino, unsigned int d_type)
1796{
1797 struct irix_dirent32 *dirent;
1798 struct irix_dirent32_callback *buf =
1799 (struct irix_dirent32_callback *)__buf;
1800 unsigned short reclen = ROUND_UP32(NAME_OFFSET32(dirent) + namlen + 1);
1801
1802#ifdef DEBUG_GETDENTS
1803 printk("\nirix_filldir32[reclen<%d>namlen<%d>count<%d>]",
1804 reclen, namlen, buf->count);
1805#endif
1806 buf->error = -EINVAL; /* only used if we fail.. */
1807 if (reclen > buf->count)
1808 return -EINVAL;
1809 dirent = buf->previous;
1810 if (dirent)
1811 __put_user(offset, &dirent->d_off);
1812 dirent = buf->current_dir;
1813 buf->previous = dirent;
1814 __put_user(ino, &dirent->d_ino);
1815 __put_user(reclen, &dirent->d_reclen);
1816 copy_to_user(dirent->d_name, name, namlen);
1817 __put_user(0, &dirent->d_name[namlen]);
1818 ((char *) dirent) += reclen;
1819 buf->current_dir = dirent;
1820 buf->count -= reclen;
1821
1822 return 0;
1823}
1824
1825asmlinkage int irix_ngetdents(unsigned int fd, void * dirent,
1826 unsigned int count, int *eob)
1827{
1828 struct file *file;
1829 struct irix_dirent32 *lastdirent;
1830 struct irix_dirent32_callback buf;
1831 int error;
1832
1833#ifdef DEBUG_GETDENTS
1834 printk("[%s:%d] ngetdents(%d, %p, %d, %p) ", current->comm,
1835 current->pid, fd, dirent, count, eob);
1836#endif
1837 error = -EBADF;
1838 file = fget(fd);
1839 if (!file)
1840 goto out;
1841
1842 buf.current_dir = (struct irix_dirent32 *) dirent;
1843 buf.previous = NULL;
1844 buf.count = count;
1845 buf.error = 0;
1846
1847 error = vfs_readdir(file, irix_filldir32, &buf);
1848 if (error < 0)
1849 goto out_putf;
1850
1851 error = buf.error;
1852 lastdirent = buf.previous;
1853 if (lastdirent) {
1854 put_user(file->f_pos, &lastdirent->d_off);
1855 error = count - buf.count;
1856 }
1857
1858 if (put_user(0, eob) < 0) {
1859 error = -EFAULT;
1860 goto out_putf;
1861 }
1862
1863#ifdef DEBUG_GETDENTS
1864 printk("eob=%d returning %d\n", *eob, count - buf.count);
1865#endif
1866 error = count - buf.count;
1867
1868out_putf:
1869 fput(file);
1870out:
1871 return error;
1872}
1873
1874struct irix_dirent64 {
1875 u64 d_ino;
1876 u64 d_off;
1877 unsigned short d_reclen;
1878 char d_name[1];
1879};
1880
1881struct irix_dirent64_callback {
1882 struct irix_dirent64 *curr;
1883 struct irix_dirent64 *previous;
1884 int count;
1885 int error;
1886};
1887
1888#define NAME_OFFSET64(de) ((int) ((de)->d_name - (char *) (de)))
1889#define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1))
1890
1891static int irix_filldir64(void * __buf, const char * name, int namlen,
1892 loff_t offset, ino_t ino, unsigned int d_type)
1893{
1894 struct irix_dirent64 *dirent;
1895 struct irix_dirent64_callback * buf =
1896 (struct irix_dirent64_callback *) __buf;
1897 unsigned short reclen = ROUND_UP64(NAME_OFFSET64(dirent) + namlen + 1);
1898
1899 buf->error = -EINVAL; /* only used if we fail.. */
1900 if (reclen > buf->count)
1901 return -EINVAL;
1902 dirent = buf->previous;
1903 if (dirent)
1904 __put_user(offset, &dirent->d_off);
1905 dirent = buf->curr;
1906 buf->previous = dirent;
1907 __put_user(ino, &dirent->d_ino);
1908 __put_user(reclen, &dirent->d_reclen);
1909 __copy_to_user(dirent->d_name, name, namlen);
1910 __put_user(0, &dirent->d_name[namlen]);
1911 ((char *) dirent) += reclen;
1912 buf->curr = dirent;
1913 buf->count -= reclen;
1914
1915 return 0;
1916}
1917
1918asmlinkage int irix_getdents64(int fd, void *dirent, int cnt)
1919{
1920 struct file *file;
1921 struct irix_dirent64 *lastdirent;
1922 struct irix_dirent64_callback buf;
1923 int error;
1924
1925#ifdef DEBUG_GETDENTS
1926 printk("[%s:%d] getdents64(%d, %p, %d) ", current->comm,
1927 current->pid, fd, dirent, cnt);
1928#endif
1929 error = -EBADF;
1930 if (!(file = fget(fd)))
1931 goto out;
1932
1933 error = -EFAULT;
1934 if (!access_ok(VERIFY_WRITE, dirent, cnt))
1935 goto out_f;
1936
1937 error = -EINVAL;
1938 if (cnt < (sizeof(struct irix_dirent64) + 255))
1939 goto out_f;
1940
1941 buf.curr = (struct irix_dirent64 *) dirent;
1942 buf.previous = NULL;
1943 buf.count = cnt;
1944 buf.error = 0;
1945 error = vfs_readdir(file, irix_filldir64, &buf);
1946 if (error < 0)
1947 goto out_f;
1948 lastdirent = buf.previous;
1949 if (!lastdirent) {
1950 error = buf.error;
1951 goto out_f;
1952 }
1953 lastdirent->d_off = (u64) file->f_pos;
1954#ifdef DEBUG_GETDENTS
1955 printk("returning %d\n", cnt - buf.count);
1956#endif
1957 error = cnt - buf.count;
1958
1959out_f:
1960 fput(file);
1961out:
1962 return error;
1963}
1964
1965asmlinkage int irix_ngetdents64(int fd, void *dirent, int cnt, int *eob)
1966{
1967 struct file *file;
1968 struct irix_dirent64 *lastdirent;
1969 struct irix_dirent64_callback buf;
1970 int error;
1971
1972#ifdef DEBUG_GETDENTS
1973 printk("[%s:%d] ngetdents64(%d, %p, %d) ", current->comm,
1974 current->pid, fd, dirent, cnt);
1975#endif
1976 error = -EBADF;
1977 if (!(file = fget(fd)))
1978 goto out;
1979
1980 error = -EFAULT;
1981 if (!access_ok(VERIFY_WRITE, dirent, cnt) ||
1982 !access_ok(VERIFY_WRITE, eob, sizeof(*eob)))
1983 goto out_f;
1984
1985 error = -EINVAL;
1986 if (cnt < (sizeof(struct irix_dirent64) + 255))
1987 goto out_f;
1988
1989 *eob = 0;
1990 buf.curr = (struct irix_dirent64 *) dirent;
1991 buf.previous = NULL;
1992 buf.count = cnt;
1993 buf.error = 0;
1994 error = vfs_readdir(file, irix_filldir64, &buf);
1995 if (error < 0)
1996 goto out_f;
1997 lastdirent = buf.previous;
1998 if (!lastdirent) {
1999 error = buf.error;
2000 goto out_f;
2001 }
2002 lastdirent->d_off = (u64) file->f_pos;
2003#ifdef DEBUG_GETDENTS
2004 printk("eob=%d returning %d\n", *eob, cnt - buf.count);
2005#endif
2006 error = cnt - buf.count;
2007
2008out_f:
2009 fput(file);
2010out:
2011 return error;
2012}
2013
2014asmlinkage int irix_uadmin(unsigned long op, unsigned long func, unsigned long arg)
2015{
2016 int retval;
2017
2018 switch (op) {
2019 case 1:
2020 /* Reboot */
2021 printk("[%s:%d] irix_uadmin: Wants to reboot...\n",
2022 current->comm, current->pid);
2023 retval = -EINVAL;
2024 goto out;
2025
2026 case 2:
2027 /* Shutdown */
2028 printk("[%s:%d] irix_uadmin: Wants to shutdown...\n",
2029 current->comm, current->pid);
2030 retval = -EINVAL;
2031 goto out;
2032
2033 case 4:
2034 /* Remount-root */
2035 printk("[%s:%d] irix_uadmin: Wants to remount root...\n",
2036 current->comm, current->pid);
2037 retval = -EINVAL;
2038 goto out;
2039
2040 case 8:
2041 /* Kill all tasks. */
2042 printk("[%s:%d] irix_uadmin: Wants to kill all tasks...\n",
2043 current->comm, current->pid);
2044 retval = -EINVAL;
2045 goto out;
2046
2047 case 256:
2048 /* Set magic mushrooms... */
2049 printk("[%s:%d] irix_uadmin: Wants to set magic mushroom[%d]...\n",
2050 current->comm, current->pid, (int) func);
2051 retval = -EINVAL;
2052 goto out;
2053
2054 default:
2055 printk("[%s:%d] irix_uadmin: Unknown operation [%d]...\n",
2056 current->comm, current->pid, (int) op);
2057 retval = -EINVAL;
2058 goto out;
2059 };
2060
2061out:
2062 return retval;
2063}
2064
2065asmlinkage int irix_utssys(char *inbuf, int arg, int type, char *outbuf)
2066{
2067 int retval;
2068
2069 switch(type) {
2070 case 0:
2071 /* uname() */
2072 retval = irix_uname((struct iuname *)inbuf);
2073 goto out;
2074
2075 case 2:
2076 /* ustat() */
2077 printk("[%s:%d] irix_utssys: Wants to do ustat()\n",
2078 current->comm, current->pid);
2079 retval = -EINVAL;
2080 goto out;
2081
2082 case 3:
2083 /* fusers() */
2084 printk("[%s:%d] irix_utssys: Wants to do fusers()\n",
2085 current->comm, current->pid);
2086 retval = -EINVAL;
2087 goto out;
2088
2089 default:
2090 printk("[%s:%d] irix_utssys: Wants to do unknown type[%d]\n",
2091 current->comm, current->pid, (int) type);
2092 retval = -EINVAL;
2093 goto out;
2094 }
2095
2096out:
2097 return retval;
2098}
2099
2100#undef DEBUG_FCNTL
2101
2102#define IRIX_F_ALLOCSP 10
2103
2104asmlinkage int irix_fcntl(int fd, int cmd, int arg)
2105{
2106 int retval;
2107
2108#ifdef DEBUG_FCNTL
2109 printk("[%s:%d] irix_fcntl(%d, %d, %d) ", current->comm,
2110 current->pid, fd, cmd, arg);
2111#endif
2112 if (cmd == IRIX_F_ALLOCSP){
2113 return 0;
2114 }
2115 retval = sys_fcntl(fd, cmd, arg);
2116#ifdef DEBUG_FCNTL
2117 printk("%d\n", retval);
2118#endif
2119 return retval;
2120}
2121
2122asmlinkage int irix_ulimit(int cmd, int arg)
2123{
2124 int retval;
2125
2126 switch(cmd) {
2127 case 1:
2128 printk("[%s:%d] irix_ulimit: Wants to get file size limit.\n",
2129 current->comm, current->pid);
2130 retval = -EINVAL;
2131 goto out;
2132
2133 case 2:
2134 printk("[%s:%d] irix_ulimit: Wants to set file size limit.\n",
2135 current->comm, current->pid);
2136 retval = -EINVAL;
2137 goto out;
2138
2139 case 3:
2140 printk("[%s:%d] irix_ulimit: Wants to get brk limit.\n",
2141 current->comm, current->pid);
2142 retval = -EINVAL;
2143 goto out;
2144
2145 case 4:
2146#if 0
2147 printk("[%s:%d] irix_ulimit: Wants to get fd limit.\n",
2148 current->comm, current->pid);
2149 retval = -EINVAL;
2150 goto out;
2151#endif
2152 retval = current->signal->rlim[RLIMIT_NOFILE].rlim_cur;
2153 goto out;
2154
2155 case 5:
2156 printk("[%s:%d] irix_ulimit: Wants to get txt offset.\n",
2157 current->comm, current->pid);
2158 retval = -EINVAL;
2159 goto out;
2160
2161 default:
2162 printk("[%s:%d] irix_ulimit: Unknown command [%d].\n",
2163 current->comm, current->pid, cmd);
2164 retval = -EINVAL;
2165 goto out;
2166 }
2167out:
2168 return retval;
2169}
2170
2171asmlinkage int irix_unimp(struct pt_regs *regs)
2172{
2173 printk("irix_unimp [%s:%d] v0=%d v1=%d a0=%08lx a1=%08lx a2=%08lx "
2174 "a3=%08lx\n", current->comm, current->pid,
2175 (int) regs->regs[2], (int) regs->regs[3],
2176 regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
2177
2178 return -ENOSYS;
2179}
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
new file mode 100644
index 000000000000..648c82292ed6
--- /dev/null
+++ b/arch/mips/kernel/time.c
@@ -0,0 +1,755 @@
1/*
2 * Copyright 2001 MontaVista Software Inc.
3 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
4 * Copyright (c) 2003, 2004 Maciej W. Rozycki
5 *
6 * Common time service routines for MIPS machines. See
7 * Documentation/mips/time.README.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/sched.h>
18#include <linux/param.h>
19#include <linux/time.h>
20#include <linux/timex.h>
21#include <linux/smp.h>
22#include <linux/kernel_stat.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26
27#include <asm/bootinfo.h>
28#include <asm/compiler.h>
29#include <asm/cpu.h>
30#include <asm/cpu-features.h>
31#include <asm/div64.h>
32#include <asm/sections.h>
33#include <asm/time.h>
34
35/*
36 * The integer part of the number of usecs per jiffy is taken from tick,
37 * but the fractional part is not recorded, so we calculate it using the
38 * initial value of HZ. This aids systems where tick isn't really an
39 * integer (e.g. for HZ = 128).
40 */
41#define USECS_PER_JIFFY TICK_SIZE
42#define USECS_PER_JIFFY_FRAC ((unsigned long)(u32)((1000000ULL << 32) / HZ))
43
44#define TICK_SIZE (tick_nsec / 1000)
45
46u64 jiffies_64 = INITIAL_JIFFIES;
47
48EXPORT_SYMBOL(jiffies_64);
49
50/*
51 * forward reference
52 */
53extern volatile unsigned long wall_jiffies;
54
55DEFINE_SPINLOCK(rtc_lock);
56
57/*
58 * By default we provide the null RTC ops
59 */
60static unsigned long null_rtc_get_time(void)
61{
62 return mktime(2000, 1, 1, 0, 0, 0);
63}
64
65static int null_rtc_set_time(unsigned long sec)
66{
67 return 0;
68}
69
70unsigned long (*rtc_get_time)(void) = null_rtc_get_time;
71int (*rtc_set_time)(unsigned long) = null_rtc_set_time;
72int (*rtc_set_mmss)(unsigned long);
73
74
75/* usecs per counter cycle, shifted to left by 32 bits */
76static unsigned int sll32_usecs_per_cycle;
77
78/* how many counter cycles in a jiffy */
79static unsigned long cycles_per_jiffy;
80
81/* Cycle counter value at the previous timer interrupt.. */
82static unsigned int timerhi, timerlo;
83
84/* expirelo is the count value for next CPU timer interrupt */
85static unsigned int expirelo;
86
87
88/*
89 * Null timer ack for systems not needing one (e.g. i8254).
90 */
91static void null_timer_ack(void) { /* nothing */ }
92
93/*
94 * Null high precision timer functions for systems lacking one.
95 */
96static unsigned int null_hpt_read(void)
97{
98 return 0;
99}
100
101static void null_hpt_init(unsigned int count) { /* nothing */ }
102
103
104/*
105 * Timer ack for an R4k-compatible timer of a known frequency.
106 */
107static void c0_timer_ack(void)
108{
109 unsigned int count;
110
111 /* Ack this timer interrupt and set the next one. */
112 expirelo += cycles_per_jiffy;
113 write_c0_compare(expirelo);
114
115 /* Check to see if we have missed any timer interrupts. */
116 count = read_c0_count();
117 if ((count - expirelo) < 0x7fffffff) {
118 /* missed_timer_count++; */
119 expirelo = count + cycles_per_jiffy;
120 write_c0_compare(expirelo);
121 }
122}
123
124/*
125 * High precision timer functions for a R4k-compatible timer.
126 */
127static unsigned int c0_hpt_read(void)
128{
129 return read_c0_count();
130}
131
132/* For use solely as a high precision timer. */
133static void c0_hpt_init(unsigned int count)
134{
135 write_c0_count(read_c0_count() - count);
136}
137
138/* For use both as a high precision timer and an interrupt source. */
139static void c0_hpt_timer_init(unsigned int count)
140{
141 count = read_c0_count() - count;
142 expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
143 write_c0_count(expirelo - cycles_per_jiffy);
144 write_c0_compare(expirelo);
145 write_c0_count(count);
146}
147
148int (*mips_timer_state)(void);
149void (*mips_timer_ack)(void);
150unsigned int (*mips_hpt_read)(void);
151void (*mips_hpt_init)(unsigned int);
152
153
154/*
155 * This version of gettimeofday has microsecond resolution and better than
156 * microsecond precision on fast machines with cycle counter.
157 */
158void do_gettimeofday(struct timeval *tv)
159{
160 unsigned long seq;
161 unsigned long lost;
162 unsigned long usec, sec;
163 unsigned long max_ntp_tick = tick_usec - tickadj;
164
165 do {
166 seq = read_seqbegin(&xtime_lock);
167
168 usec = do_gettimeoffset();
169
170 lost = jiffies - wall_jiffies;
171
172 /*
173 * If time_adjust is negative then NTP is slowing the clock
174 * so make sure not to go into next possible interval.
175 * Better to lose some accuracy than have time go backwards..
176 */
177 if (unlikely(time_adjust < 0)) {
178 usec = min(usec, max_ntp_tick);
179
180 if (lost)
181 usec += lost * max_ntp_tick;
182 } else if (unlikely(lost))
183 usec += lost * tick_usec;
184
185 sec = xtime.tv_sec;
186 usec += (xtime.tv_nsec / 1000);
187
188 } while (read_seqretry(&xtime_lock, seq));
189
190 while (usec >= 1000000) {
191 usec -= 1000000;
192 sec++;
193 }
194
195 tv->tv_sec = sec;
196 tv->tv_usec = usec;
197}
198
199EXPORT_SYMBOL(do_gettimeofday);
200
201int do_settimeofday(struct timespec *tv)
202{
203 time_t wtm_sec, sec = tv->tv_sec;
204 long wtm_nsec, nsec = tv->tv_nsec;
205
206 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
207 return -EINVAL;
208
209 write_seqlock_irq(&xtime_lock);
210
211 /*
212 * This is revolting. We need to set "xtime" correctly. However,
213 * the value in this location is the value at the most recent update
214 * of wall time. Discover what correction gettimeofday() would have
215 * made, and then undo it!
216 */
217 nsec -= do_gettimeoffset() * NSEC_PER_USEC;
218 nsec -= (jiffies - wall_jiffies) * tick_nsec;
219
220 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
221 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
222
223 set_normalized_timespec(&xtime, sec, nsec);
224 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
225
226 time_adjust = 0; /* stop active adjtime() */
227 time_status |= STA_UNSYNC;
228 time_maxerror = NTP_PHASE_LIMIT;
229 time_esterror = NTP_PHASE_LIMIT;
230
231 write_sequnlock_irq(&xtime_lock);
232 clock_was_set();
233 return 0;
234}
235
236EXPORT_SYMBOL(do_settimeofday);
237
238/*
239 * Gettimeoffset routines. These routines returns the time duration
240 * since last timer interrupt in usecs.
241 *
242 * If the exact CPU counter frequency is known, use fixed_rate_gettimeoffset.
243 * Otherwise use calibrate_gettimeoffset()
244 *
245 * If the CPU does not have the counter register, you can either supply
246 * your own gettimeoffset() routine, or use null_gettimeoffset(), which
247 * gives the same resolution as HZ.
248 */
249
250static unsigned long null_gettimeoffset(void)
251{
252 return 0;
253}
254
255
256/* The function pointer to one of the gettimeoffset funcs. */
257unsigned long (*do_gettimeoffset)(void) = null_gettimeoffset;
258
259
260static unsigned long fixed_rate_gettimeoffset(void)
261{
262 u32 count;
263 unsigned long res;
264
265 /* Get last timer tick in absolute kernel time */
266 count = mips_hpt_read();
267
268 /* .. relative to previous jiffy (32 bits is enough) */
269 count -= timerlo;
270
271 __asm__("multu %1,%2"
272 : "=h" (res)
273 : "r" (count), "r" (sll32_usecs_per_cycle)
274 : "lo", GCC_REG_ACCUM);
275
276 /*
277 * Due to possible jiffies inconsistencies, we need to check
278 * the result so that we'll get a timer that is monotonic.
279 */
280 if (res >= USECS_PER_JIFFY)
281 res = USECS_PER_JIFFY - 1;
282
283 return res;
284}
285
286
287/*
288 * Cached "1/(clocks per usec) * 2^32" value.
289 * It has to be recalculated once each jiffy.
290 */
291static unsigned long cached_quotient;
292
293/* Last jiffy when calibrate_divXX_gettimeoffset() was called. */
294static unsigned long last_jiffies;
295
296/*
297 * This is moved from dec/time.c:do_ioasic_gettimeoffset() by Maciej.
298 */
299static unsigned long calibrate_div32_gettimeoffset(void)
300{
301 u32 count;
302 unsigned long res, tmp;
303 unsigned long quotient;
304
305 tmp = jiffies;
306
307 quotient = cached_quotient;
308
309 if (last_jiffies != tmp) {
310 last_jiffies = tmp;
311 if (last_jiffies != 0) {
312 unsigned long r0;
313 do_div64_32(r0, timerhi, timerlo, tmp);
314 do_div64_32(quotient, USECS_PER_JIFFY,
315 USECS_PER_JIFFY_FRAC, r0);
316 cached_quotient = quotient;
317 }
318 }
319
320 /* Get last timer tick in absolute kernel time */
321 count = mips_hpt_read();
322
323 /* .. relative to previous jiffy (32 bits is enough) */
324 count -= timerlo;
325
326 __asm__("multu %1,%2"
327 : "=h" (res)
328 : "r" (count), "r" (quotient)
329 : "lo", GCC_REG_ACCUM);
330
331 /*
332 * Due to possible jiffies inconsistencies, we need to check
333 * the result so that we'll get a timer that is monotonic.
334 */
335 if (res >= USECS_PER_JIFFY)
336 res = USECS_PER_JIFFY - 1;
337
338 return res;
339}
340
341static unsigned long calibrate_div64_gettimeoffset(void)
342{
343 u32 count;
344 unsigned long res, tmp;
345 unsigned long quotient;
346
347 tmp = jiffies;
348
349 quotient = cached_quotient;
350
351 if (last_jiffies != tmp) {
352 last_jiffies = tmp;
353 if (last_jiffies) {
354 unsigned long r0;
355 __asm__(".set push\n\t"
356 ".set mips3\n\t"
357 "lwu %0,%3\n\t"
358 "dsll32 %1,%2,0\n\t"
359 "or %1,%1,%0\n\t"
360 "ddivu $0,%1,%4\n\t"
361 "mflo %1\n\t"
362 "dsll32 %0,%5,0\n\t"
363 "or %0,%0,%6\n\t"
364 "ddivu $0,%0,%1\n\t"
365 "mflo %0\n\t"
366 ".set pop"
367 : "=&r" (quotient), "=&r" (r0)
368 : "r" (timerhi), "m" (timerlo),
369 "r" (tmp), "r" (USECS_PER_JIFFY),
370 "r" (USECS_PER_JIFFY_FRAC)
371 : "hi", "lo", GCC_REG_ACCUM);
372 cached_quotient = quotient;
373 }
374 }
375
376 /* Get last timer tick in absolute kernel time */
377 count = mips_hpt_read();
378
379 /* .. relative to previous jiffy (32 bits is enough) */
380 count -= timerlo;
381
382 __asm__("multu %1,%2"
383 : "=h" (res)
384 : "r" (count), "r" (quotient)
385 : "lo", GCC_REG_ACCUM);
386
387 /*
388 * Due to possible jiffies inconsistencies, we need to check
389 * the result so that we'll get a timer that is monotonic.
390 */
391 if (res >= USECS_PER_JIFFY)
392 res = USECS_PER_JIFFY - 1;
393
394 return res;
395}
396
397
398/* last time when xtime and rtc are sync'ed up */
399static long last_rtc_update;
400
401/*
402 * local_timer_interrupt() does profiling and process accounting
403 * on a per-CPU basis.
404 *
405 * In UP mode, it is invoked from the (global) timer_interrupt.
406 *
407 * In SMP mode, it might invoked by per-CPU timer interrupt, or
408 * a broadcasted inter-processor interrupt which itself is triggered
409 * by the global timer interrupt.
410 */
411void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
412{
413 if (current->pid)
414 profile_tick(CPU_PROFILING, regs);
415 update_process_times(user_mode(regs));
416}
417
418/*
419 * High-level timer interrupt service routines. This function
420 * is set as irqaction->handler and is invoked through do_IRQ.
421 */
422irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
423{
424 unsigned long j;
425 unsigned int count;
426
427 count = mips_hpt_read();
428 mips_timer_ack();
429
430 /* Update timerhi/timerlo for intra-jiffy calibration. */
431 timerhi += count < timerlo; /* Wrap around */
432 timerlo = count;
433
434 /*
435 * call the generic timer interrupt handling
436 */
437 do_timer(regs);
438
439 /*
440 * If we have an externally synchronized Linux clock, then update
441 * CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be
442 * called as close as possible to 500 ms before the new second starts.
443 */
444 write_seqlock(&xtime_lock);
445 if ((time_status & STA_UNSYNC) == 0 &&
446 xtime.tv_sec > last_rtc_update + 660 &&
447 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
448 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
449 if (rtc_set_mmss(xtime.tv_sec) == 0) {
450 last_rtc_update = xtime.tv_sec;
451 } else {
452 /* do it again in 60 s */
453 last_rtc_update = xtime.tv_sec - 600;
454 }
455 }
456 write_sequnlock(&xtime_lock);
457
458 /*
459 * If jiffies has overflown in this timer_interrupt, we must
460 * update the timer[hi]/[lo] to make fast gettimeoffset funcs
461 * quotient calc still valid. -arca
462 *
463 * The first timer interrupt comes late as interrupts are
464 * enabled long after timers are initialized. Therefore the
465 * high precision timer is fast, leading to wrong gettimeoffset()
466 * calculations. We deal with it by setting it based on the
467 * number of its ticks between the second and the third interrupt.
468 * That is still somewhat imprecise, but it's a good estimate.
469 * --macro
470 */
471 j = jiffies;
472 if (j < 4) {
473 static unsigned int prev_count;
474 static int hpt_initialized;
475
476 switch (j) {
477 case 0:
478 timerhi = timerlo = 0;
479 mips_hpt_init(count);
480 break;
481 case 2:
482 prev_count = count;
483 break;
484 case 3:
485 if (!hpt_initialized) {
486 unsigned int c3 = 3 * (count - prev_count);
487
488 timerhi = 0;
489 timerlo = c3;
490 mips_hpt_init(count - c3);
491 hpt_initialized = 1;
492 }
493 break;
494 default:
495 break;
496 }
497 }
498
499 /*
500 * In UP mode, we call local_timer_interrupt() to do profiling
501 * and process accouting.
502 *
503 * In SMP mode, local_timer_interrupt() is invoked by appropriate
504 * low-level local timer interrupt handler.
505 */
506 local_timer_interrupt(irq, dev_id, regs);
507
508 return IRQ_HANDLED;
509}
510
511asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs)
512{
513 irq_enter();
514 kstat_this_cpu.irqs[irq]++;
515
516 /* we keep interrupt disabled all the time */
517 timer_interrupt(irq, NULL, regs);
518
519 irq_exit();
520}
521
522asmlinkage void ll_local_timer_interrupt(int irq, struct pt_regs *regs)
523{
524 irq_enter();
525 if (smp_processor_id() != 0)
526 kstat_this_cpu.irqs[irq]++;
527
528 /* we keep interrupt disabled all the time */
529 local_timer_interrupt(irq, NULL, regs);
530
531 irq_exit();
532}
533
534/*
535 * time_init() - it does the following things.
536 *
537 * 1) board_time_init() -
538 * a) (optional) set up RTC routines,
539 * b) (optional) calibrate and set the mips_hpt_frequency
540 * (only needed if you intended to use fixed_rate_gettimeoffset
541 * or use cpu counter as timer interrupt source)
542 * 2) setup xtime based on rtc_get_time().
543 * 3) choose a appropriate gettimeoffset routine.
544 * 4) calculate a couple of cached variables for later usage
545 * 5) board_timer_setup() -
546 * a) (optional) over-write any choices made above by time_init().
547 * b) machine specific code should setup the timer irqaction.
548 * c) enable the timer interrupt
549 */
550
551void (*board_time_init)(void);
552void (*board_timer_setup)(struct irqaction *irq);
553
554unsigned int mips_hpt_frequency;
555
556static struct irqaction timer_irqaction = {
557 .handler = timer_interrupt,
558 .flags = SA_INTERRUPT,
559 .name = "timer",
560};
561
562static unsigned int __init calibrate_hpt(void)
563{
564 u64 frequency;
565 u32 hpt_start, hpt_end, hpt_count, hz;
566
567 const int loops = HZ / 10;
568 int log_2_loops = 0;
569 int i;
570
571 /*
572 * We want to calibrate for 0.1s, but to avoid a 64-bit
573 * division we round the number of loops up to the nearest
574 * power of 2.
575 */
576 while (loops > 1 << log_2_loops)
577 log_2_loops++;
578 i = 1 << log_2_loops;
579
580 /*
581 * Wait for a rising edge of the timer interrupt.
582 */
583 while (mips_timer_state());
584 while (!mips_timer_state());
585
586 /*
587 * Now see how many high precision timer ticks happen
588 * during the calculated number of periods between timer
589 * interrupts.
590 */
591 hpt_start = mips_hpt_read();
592 do {
593 while (mips_timer_state());
594 while (!mips_timer_state());
595 } while (--i);
596 hpt_end = mips_hpt_read();
597
598 hpt_count = hpt_end - hpt_start;
599 hz = HZ;
600 frequency = (u64)hpt_count * (u64)hz;
601
602 return frequency >> log_2_loops;
603}
604
605void __init time_init(void)
606{
607 if (board_time_init)
608 board_time_init();
609
610 if (!rtc_set_mmss)
611 rtc_set_mmss = rtc_set_time;
612
613 xtime.tv_sec = rtc_get_time();
614 xtime.tv_nsec = 0;
615
616 set_normalized_timespec(&wall_to_monotonic,
617 -xtime.tv_sec, -xtime.tv_nsec);
618
619 /* Choose appropriate high precision timer routines. */
620 if (!cpu_has_counter && !mips_hpt_read) {
621 /* No high precision timer -- sorry. */
622 mips_hpt_read = null_hpt_read;
623 mips_hpt_init = null_hpt_init;
624 } else if (!mips_hpt_frequency && !mips_timer_state) {
625 /* A high precision timer of unknown frequency. */
626 if (!mips_hpt_read) {
627 /* No external high precision timer -- use R4k. */
628 mips_hpt_read = c0_hpt_read;
629 mips_hpt_init = c0_hpt_init;
630 }
631
632 if ((current_cpu_data.isa_level == MIPS_CPU_ISA_M32) ||
633 (current_cpu_data.isa_level == MIPS_CPU_ISA_I) ||
634 (current_cpu_data.isa_level == MIPS_CPU_ISA_II))
635 /*
636 * We need to calibrate the counter but we don't have
637 * 64-bit division.
638 */
639 do_gettimeoffset = calibrate_div32_gettimeoffset;
640 else
641 /*
642 * We need to calibrate the counter but we *do* have
643 * 64-bit division.
644 */
645 do_gettimeoffset = calibrate_div64_gettimeoffset;
646 } else {
647 /* We know counter frequency. Or we can get it. */
648 if (!mips_hpt_read) {
649 /* No external high precision timer -- use R4k. */
650 mips_hpt_read = c0_hpt_read;
651
652 if (mips_timer_state)
653 mips_hpt_init = c0_hpt_init;
654 else {
655 /* No external timer interrupt -- use R4k. */
656 mips_hpt_init = c0_hpt_timer_init;
657 mips_timer_ack = c0_timer_ack;
658 }
659 }
660 if (!mips_hpt_frequency)
661 mips_hpt_frequency = calibrate_hpt();
662
663 do_gettimeoffset = fixed_rate_gettimeoffset;
664
665 /* Calculate cache parameters. */
666 cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ;
667
668 /* sll32_usecs_per_cycle = 10^6 * 2^32 / mips_counter_freq */
669 do_div64_32(sll32_usecs_per_cycle,
670 1000000, mips_hpt_frequency / 2,
671 mips_hpt_frequency);
672
673 /* Report the high precision timer rate for a reference. */
674 printk("Using %u.%03u MHz high precision timer.\n",
675 ((mips_hpt_frequency + 500) / 1000) / 1000,
676 ((mips_hpt_frequency + 500) / 1000) % 1000);
677 }
678
679 if (!mips_timer_ack)
680 /* No timer interrupt ack (e.g. i8254). */
681 mips_timer_ack = null_timer_ack;
682
683 /* This sets up the high precision timer for the first interrupt. */
684 mips_hpt_init(mips_hpt_read());
685
686 /*
687 * Call board specific timer interrupt setup.
688 *
689 * this pointer must be setup in machine setup routine.
690 *
691 * Even if a machine chooses to use a low-level timer interrupt,
692 * it still needs to setup the timer_irqaction.
693 * In that case, it might be better to set timer_irqaction.handler
694 * to be NULL function so that we are sure the high-level code
695 * is not invoked accidentally.
696 */
697 board_timer_setup(&timer_irqaction);
698}
699
700#define FEBRUARY 2
701#define STARTOFTIME 1970
702#define SECDAY 86400L
703#define SECYR (SECDAY * 365)
704#define leapyear(y) ((!((y) % 4) && ((y) % 100)) || !((y) % 400))
705#define days_in_year(y) (leapyear(y) ? 366 : 365)
706#define days_in_month(m) (month_days[(m) - 1])
707
708static int month_days[12] = {
709 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
710};
711
712void to_tm(unsigned long tim, struct rtc_time *tm)
713{
714 long hms, day, gday;
715 int i;
716
717 gday = day = tim / SECDAY;
718 hms = tim % SECDAY;
719
720 /* Hours, minutes, seconds are easy */
721 tm->tm_hour = hms / 3600;
722 tm->tm_min = (hms % 3600) / 60;
723 tm->tm_sec = (hms % 3600) % 60;
724
725 /* Number of years in days */
726 for (i = STARTOFTIME; day >= days_in_year(i); i++)
727 day -= days_in_year(i);
728 tm->tm_year = i;
729
730 /* Number of months in days left */
731 if (leapyear(tm->tm_year))
732 days_in_month(FEBRUARY) = 29;
733 for (i = 1; day >= days_in_month(i); i++)
734 day -= days_in_month(i);
735 days_in_month(FEBRUARY) = 28;
736 tm->tm_mon = i - 1; /* tm_mon starts from 0 to 11 */
737
738 /* Days are what is left over (+1) from all that. */
739 tm->tm_mday = day + 1;
740
741 /*
742 * Determine the day of week
743 */
744 tm->tm_wday = (gday + 4) % 7; /* 1970/1/1 was Thursday */
745}
746
747EXPORT_SYMBOL(rtc_lock);
748EXPORT_SYMBOL(to_tm);
749EXPORT_SYMBOL(rtc_set_time);
750EXPORT_SYMBOL(rtc_get_time);
751
752unsigned long long sched_clock(void)
753{
754 return (unsigned long long)jiffies*(1000000000/HZ);
755}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
new file mode 100644
index 000000000000..56c36e42e0a6
--- /dev/null
+++ b/arch/mips/kernel/traps.c
@@ -0,0 +1,1062 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000, 01 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004 Maciej W. Rozycki
13 */
14#include <linux/config.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/spinlock.h>
22#include <linux/kallsyms.h>
23
24#include <asm/bootinfo.h>
25#include <asm/branch.h>
26#include <asm/break.h>
27#include <asm/cpu.h>
28#include <asm/fpu.h>
29#include <asm/module.h>
30#include <asm/pgtable.h>
31#include <asm/ptrace.h>
32#include <asm/sections.h>
33#include <asm/system.h>
34#include <asm/tlbdebug.h>
35#include <asm/traps.h>
36#include <asm/uaccess.h>
37#include <asm/mmu_context.h>
38#include <asm/watch.h>
39#include <asm/types.h>
40
41extern asmlinkage void handle_tlbm(void);
42extern asmlinkage void handle_tlbl(void);
43extern asmlinkage void handle_tlbs(void);
44extern asmlinkage void handle_adel(void);
45extern asmlinkage void handle_ades(void);
46extern asmlinkage void handle_ibe(void);
47extern asmlinkage void handle_dbe(void);
48extern asmlinkage void handle_sys(void);
49extern asmlinkage void handle_bp(void);
50extern asmlinkage void handle_ri(void);
51extern asmlinkage void handle_cpu(void);
52extern asmlinkage void handle_ov(void);
53extern asmlinkage void handle_tr(void);
54extern asmlinkage void handle_fpe(void);
55extern asmlinkage void handle_mdmx(void);
56extern asmlinkage void handle_watch(void);
57extern asmlinkage void handle_mcheck(void);
58extern asmlinkage void handle_reserved(void);
59
60extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp,
61 struct mips_fpu_soft_struct *ctx);
62
63void (*board_be_init)(void);
64int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
65
66/*
67 * These constant is for searching for possible module text segments.
68 * MODULE_RANGE is a guess of how much space is likely to be vmalloced.
69 */
70#define MODULE_RANGE (8*1024*1024)
71
72/*
73 * This routine abuses get_user()/put_user() to reference pointers
74 * with at least a bit of error checking ...
75 */
76void show_stack(struct task_struct *task, unsigned long *sp)
77{
78 const int field = 2 * sizeof(unsigned long);
79 long stackdata;
80 int i;
81
82 if (!sp) {
83 if (task && task != current)
84 sp = (unsigned long *) task->thread.reg29;
85 else
86 sp = (unsigned long *) &sp;
87 }
88
89 printk("Stack :");
90 i = 0;
91 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
92 if (i && ((i % (64 / field)) == 0))
93 printk("\n ");
94 if (i > 39) {
95 printk(" ...");
96 break;
97 }
98
99 if (__get_user(stackdata, sp++)) {
100 printk(" (Bad stack address)");
101 break;
102 }
103
104 printk(" %0*lx", field, stackdata);
105 i++;
106 }
107 printk("\n");
108}
109
110void show_trace(struct task_struct *task, unsigned long *stack)
111{
112 const int field = 2 * sizeof(unsigned long);
113 unsigned long addr;
114
115 if (!stack) {
116 if (task && task != current)
117 stack = (unsigned long *) task->thread.reg29;
118 else
119 stack = (unsigned long *) &stack;
120 }
121
122 printk("Call Trace:");
123#ifdef CONFIG_KALLSYMS
124 printk("\n");
125#endif
126 while (!kstack_end(stack)) {
127 addr = *stack++;
128 if (__kernel_text_address(addr)) {
129 printk(" [<%0*lx>] ", field, addr);
130 print_symbol("%s\n", addr);
131 }
132 }
133 printk("\n");
134}
135
136/*
137 * The architecture-independent dump_stack generator
138 */
139void dump_stack(void)
140{
141 unsigned long stack;
142
143 show_trace(current, &stack);
144}
145
146EXPORT_SYMBOL(dump_stack);
147
148void show_code(unsigned int *pc)
149{
150 long i;
151
152 printk("\nCode:");
153
154 for(i = -3 ; i < 6 ; i++) {
155 unsigned int insn;
156 if (__get_user(insn, pc + i)) {
157 printk(" (Bad address in epc)\n");
158 break;
159 }
160 printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
161 }
162}
163
164void show_regs(struct pt_regs *regs)
165{
166 const int field = 2 * sizeof(unsigned long);
167 unsigned int cause = regs->cp0_cause;
168 int i;
169
170 printk("Cpu %d\n", smp_processor_id());
171
172 /*
173 * Saved main processor registers
174 */
175 for (i = 0; i < 32; ) {
176 if ((i % 4) == 0)
177 printk("$%2d :", i);
178 if (i == 0)
179 printk(" %0*lx", field, 0UL);
180 else if (i == 26 || i == 27)
181 printk(" %*s", field, "");
182 else
183 printk(" %0*lx", field, regs->regs[i]);
184
185 i++;
186 if ((i % 4) == 0)
187 printk("\n");
188 }
189
190 printk("Hi : %0*lx\n", field, regs->hi);
191 printk("Lo : %0*lx\n", field, regs->lo);
192
193 /*
194 * Saved cp0 registers
195 */
196 printk("epc : %0*lx ", field, regs->cp0_epc);
197 print_symbol("%s ", regs->cp0_epc);
198 printk(" %s\n", print_tainted());
199 printk("ra : %0*lx ", field, regs->regs[31]);
200 print_symbol("%s\n", regs->regs[31]);
201
202 printk("Status: %08x ", (uint32_t) regs->cp0_status);
203
204 if (regs->cp0_status & ST0_KX)
205 printk("KX ");
206 if (regs->cp0_status & ST0_SX)
207 printk("SX ");
208 if (regs->cp0_status & ST0_UX)
209 printk("UX ");
210 switch (regs->cp0_status & ST0_KSU) {
211 case KSU_USER:
212 printk("USER ");
213 break;
214 case KSU_SUPERVISOR:
215 printk("SUPERVISOR ");
216 break;
217 case KSU_KERNEL:
218 printk("KERNEL ");
219 break;
220 default:
221 printk("BAD_MODE ");
222 break;
223 }
224 if (regs->cp0_status & ST0_ERL)
225 printk("ERL ");
226 if (regs->cp0_status & ST0_EXL)
227 printk("EXL ");
228 if (regs->cp0_status & ST0_IE)
229 printk("IE ");
230 printk("\n");
231
232 printk("Cause : %08x\n", cause);
233
234 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
235 if (1 <= cause && cause <= 5)
236 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
237
238 printk("PrId : %08x\n", read_c0_prid());
239}
240
241void show_registers(struct pt_regs *regs)
242{
243 show_regs(regs);
244 print_modules();
245 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
246 current->comm, current->pid, current_thread_info(), current);
247 show_stack(current, (long *) regs->regs[29]);
248 show_trace(current, (long *) regs->regs[29]);
249 show_code((unsigned int *) regs->cp0_epc);
250 printk("\n");
251}
252
253static DEFINE_SPINLOCK(die_lock);
254
255NORET_TYPE void __die(const char * str, struct pt_regs * regs,
256 const char * file, const char * func, unsigned long line)
257{
258 static int die_counter;
259
260 console_verbose();
261 spin_lock_irq(&die_lock);
262 printk("%s", str);
263 if (file && func)
264 printk(" in %s:%s, line %ld", file, func, line);
265 printk("[#%d]:\n", ++die_counter);
266 show_registers(regs);
267 spin_unlock_irq(&die_lock);
268 do_exit(SIGSEGV);
269}
270
271void __die_if_kernel(const char * str, struct pt_regs * regs,
272 const char * file, const char * func, unsigned long line)
273{
274 if (!user_mode(regs))
275 __die(str, regs, file, func, line);
276}
277
278extern const struct exception_table_entry __start___dbe_table[];
279extern const struct exception_table_entry __stop___dbe_table[];
280
281void __declare_dbe_table(void)
282{
283 __asm__ __volatile__(
284 ".section\t__dbe_table,\"a\"\n\t"
285 ".previous"
286 );
287}
288
289/* Given an address, look for it in the exception tables. */
290static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
291{
292 const struct exception_table_entry *e;
293
294 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
295 if (!e)
296 e = search_module_dbetables(addr);
297 return e;
298}
299
300asmlinkage void do_be(struct pt_regs *regs)
301{
302 const int field = 2 * sizeof(unsigned long);
303 const struct exception_table_entry *fixup = NULL;
304 int data = regs->cp0_cause & 4;
305 int action = MIPS_BE_FATAL;
306
307 /* XXX For now. Fixme, this searches the wrong table ... */
308 if (data && !user_mode(regs))
309 fixup = search_dbe_tables(exception_epc(regs));
310
311 if (fixup)
312 action = MIPS_BE_FIXUP;
313
314 if (board_be_handler)
315 action = board_be_handler(regs, fixup != 0);
316
317 switch (action) {
318 case MIPS_BE_DISCARD:
319 return;
320 case MIPS_BE_FIXUP:
321 if (fixup) {
322 regs->cp0_epc = fixup->nextinsn;
323 return;
324 }
325 break;
326 default:
327 break;
328 }
329
330 /*
331 * Assume it would be too dangerous to continue ...
332 */
333 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
334 data ? "Data" : "Instruction",
335 field, regs->cp0_epc, field, regs->regs[31]);
336 die_if_kernel("Oops", regs);
337 force_sig(SIGBUS, current);
338}
339
340static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
341{
342 unsigned int *epc;
343
344 epc = (unsigned int *) regs->cp0_epc +
345 ((regs->cp0_cause & CAUSEF_BD) != 0);
346 if (!get_user(*opcode, epc))
347 return 0;
348
349 force_sig(SIGSEGV, current);
350 return 1;
351}
352
353/*
354 * ll/sc emulation
355 */
356
357#define OPCODE 0xfc000000
358#define BASE 0x03e00000
359#define RT 0x001f0000
360#define OFFSET 0x0000ffff
361#define LL 0xc0000000
362#define SC 0xe0000000
363
364/*
365 * The ll_bit is cleared by r*_switch.S
366 */
367
368unsigned long ll_bit;
369
370static struct task_struct *ll_task = NULL;
371
372static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
373{
374 unsigned long value, *vaddr;
375 long offset;
376 int signal = 0;
377
378 /*
379 * analyse the ll instruction that just caused a ri exception
380 * and put the referenced address to addr.
381 */
382
383 /* sign extend offset */
384 offset = opcode & OFFSET;
385 offset <<= 16;
386 offset >>= 16;
387
388 vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);
389
390 if ((unsigned long)vaddr & 3) {
391 signal = SIGBUS;
392 goto sig;
393 }
394 if (get_user(value, vaddr)) {
395 signal = SIGSEGV;
396 goto sig;
397 }
398
399 preempt_disable();
400
401 if (ll_task == NULL || ll_task == current) {
402 ll_bit = 1;
403 } else {
404 ll_bit = 0;
405 }
406 ll_task = current;
407
408 preempt_enable();
409
410 regs->regs[(opcode & RT) >> 16] = value;
411
412 compute_return_epc(regs);
413 return;
414
415sig:
416 force_sig(signal, current);
417}
418
419static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
420{
421 unsigned long *vaddr, reg;
422 long offset;
423 int signal = 0;
424
425 /*
426 * analyse the sc instruction that just caused a ri exception
427 * and put the referenced address to addr.
428 */
429
430 /* sign extend offset */
431 offset = opcode & OFFSET;
432 offset <<= 16;
433 offset >>= 16;
434
435 vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);
436 reg = (opcode & RT) >> 16;
437
438 if ((unsigned long)vaddr & 3) {
439 signal = SIGBUS;
440 goto sig;
441 }
442
443 preempt_disable();
444
445 if (ll_bit == 0 || ll_task != current) {
446 regs->regs[reg] = 0;
447 preempt_enable();
448 compute_return_epc(regs);
449 return;
450 }
451
452 preempt_enable();
453
454 if (put_user(regs->regs[reg], vaddr)) {
455 signal = SIGSEGV;
456 goto sig;
457 }
458
459 regs->regs[reg] = 1;
460
461 compute_return_epc(regs);
462 return;
463
464sig:
465 force_sig(signal, current);
466}
467
468/*
469 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
470 * opcodes are supposed to result in coprocessor unusable exceptions if
471 * executed on ll/sc-less processors. That's the theory. In practice a
472 * few processors such as NEC's VR4100 throw reserved instruction exceptions
473 * instead, so we're doing the emulation thing in both exception handlers.
474 */
475static inline int simulate_llsc(struct pt_regs *regs)
476{
477 unsigned int opcode;
478
479 if (unlikely(get_insn_opcode(regs, &opcode)))
480 return -EFAULT;
481
482 if ((opcode & OPCODE) == LL) {
483 simulate_ll(regs, opcode);
484 return 0;
485 }
486 if ((opcode & OPCODE) == SC) {
487 simulate_sc(regs, opcode);
488 return 0;
489 }
490
491 return -EFAULT; /* Strange things going on ... */
492}
493
494asmlinkage void do_ov(struct pt_regs *regs)
495{
496 siginfo_t info;
497
498 info.si_code = FPE_INTOVF;
499 info.si_signo = SIGFPE;
500 info.si_errno = 0;
501 info.si_addr = (void *)regs->cp0_epc;
502 force_sig_info(SIGFPE, &info, current);
503}
504
505/*
506 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
507 */
508asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
509{
510 if (fcr31 & FPU_CSR_UNI_X) {
511 int sig;
512
513 preempt_disable();
514
515 /*
516 * Unimplemented operation exception. If we've got the full
517 * software emulator on-board, let's use it...
518 *
519 * Force FPU to dump state into task/thread context. We're
520 * moving a lot of data here for what is probably a single
521 * instruction, but the alternative is to pre-decode the FP
522 * register operands before invoking the emulator, which seems
523 * a bit extreme for what should be an infrequent event.
524 */
525 save_fp(current);
526
527 /* Run the emulator */
528 sig = fpu_emulator_cop1Handler (0, regs,
529 &current->thread.fpu.soft);
530
531 /*
532 * We can't allow the emulated instruction to leave any of
533 * the cause bit set in $fcr31.
534 */
535 current->thread.fpu.soft.fcr31 &= ~FPU_CSR_ALL_X;
536
537 /* Restore the hardware register state */
538 restore_fp(current);
539
540 preempt_enable();
541
542 /* If something went wrong, signal */
543 if (sig)
544 force_sig(sig, current);
545
546 return;
547 }
548
549 force_sig(SIGFPE, current);
550}
551
552asmlinkage void do_bp(struct pt_regs *regs)
553{
554 unsigned int opcode, bcode;
555 siginfo_t info;
556
557 die_if_kernel("Break instruction in kernel code", regs);
558
559 if (get_insn_opcode(regs, &opcode))
560 return;
561
562 /*
563 * There is the ancient bug in the MIPS assemblers that the break
564 * code starts left to bit 16 instead to bit 6 in the opcode.
565 * Gas is bug-compatible, but not always, grrr...
566 * We handle both cases with a simple heuristics. --macro
567 */
568 bcode = ((opcode >> 6) & ((1 << 20) - 1));
569 if (bcode < (1 << 10))
570 bcode <<= 10;
571
572 /*
573 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
574 * insns, even for break codes that indicate arithmetic failures.
575 * Weird ...)
576 * But should we continue the brokenness??? --macro
577 */
578 switch (bcode) {
579 case BRK_OVERFLOW << 10:
580 case BRK_DIVZERO << 10:
581 if (bcode == (BRK_DIVZERO << 10))
582 info.si_code = FPE_INTDIV;
583 else
584 info.si_code = FPE_INTOVF;
585 info.si_signo = SIGFPE;
586 info.si_errno = 0;
587 info.si_addr = (void *)regs->cp0_epc;
588 force_sig_info(SIGFPE, &info, current);
589 break;
590 default:
591 force_sig(SIGTRAP, current);
592 }
593}
594
595asmlinkage void do_tr(struct pt_regs *regs)
596{
597 unsigned int opcode, tcode = 0;
598 siginfo_t info;
599
600 die_if_kernel("Trap instruction in kernel code", regs);
601
602 if (get_insn_opcode(regs, &opcode))
603 return;
604
605 /* Immediate versions don't provide a code. */
606 if (!(opcode & OPCODE))
607 tcode = ((opcode >> 6) & ((1 << 10) - 1));
608
609 /*
610 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
611 * insns, even for trap codes that indicate arithmetic failures.
612 * Weird ...)
613 * But should we continue the brokenness??? --macro
614 */
615 switch (tcode) {
616 case BRK_OVERFLOW:
617 case BRK_DIVZERO:
618 if (tcode == BRK_DIVZERO)
619 info.si_code = FPE_INTDIV;
620 else
621 info.si_code = FPE_INTOVF;
622 info.si_signo = SIGFPE;
623 info.si_errno = 0;
624 info.si_addr = (void *)regs->cp0_epc;
625 force_sig_info(SIGFPE, &info, current);
626 break;
627 default:
628 force_sig(SIGTRAP, current);
629 }
630}
631
632asmlinkage void do_ri(struct pt_regs *regs)
633{
634 die_if_kernel("Reserved instruction in kernel code", regs);
635
636 if (!cpu_has_llsc)
637 if (!simulate_llsc(regs))
638 return;
639
640 force_sig(SIGILL, current);
641}
642
643asmlinkage void do_cpu(struct pt_regs *regs)
644{
645 unsigned int cpid;
646
647 die_if_kernel("do_cpu invoked from kernel context!", regs);
648
649 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
650
651 switch (cpid) {
652 case 0:
653 if (cpu_has_llsc)
654 break;
655
656 if (!simulate_llsc(regs))
657 return;
658 break;
659
660 case 1:
661 preempt_disable();
662
663 own_fpu();
664 if (used_math()) { /* Using the FPU again. */
665 restore_fp(current);
666 } else { /* First time FPU user. */
667 init_fpu();
668 set_used_math();
669 }
670
671 if (!cpu_has_fpu) {
672 int sig = fpu_emulator_cop1Handler(0, regs,
673 &current->thread.fpu.soft);
674 if (sig)
675 force_sig(sig, current);
676 }
677
678 preempt_enable();
679
680 return;
681
682 case 2:
683 case 3:
684 break;
685 }
686
687 force_sig(SIGILL, current);
688}
689
690asmlinkage void do_mdmx(struct pt_regs *regs)
691{
692 force_sig(SIGILL, current);
693}
694
695asmlinkage void do_watch(struct pt_regs *regs)
696{
697 /*
698 * We use the watch exception where available to detect stack
699 * overflows.
700 */
701 dump_tlb_all();
702 show_regs(regs);
703 panic("Caught WATCH exception - probably caused by stack overflow.");
704}
705
706asmlinkage void do_mcheck(struct pt_regs *regs)
707{
708 show_regs(regs);
709 dump_tlb_all();
710 /*
711 * Some chips may have other causes of machine check (e.g. SB1
712 * graduation timer)
713 */
714 panic("Caught Machine Check exception - %scaused by multiple "
715 "matching entries in the TLB.",
716 (regs->cp0_status & ST0_TS) ? "" : "not ");
717}
718
719asmlinkage void do_reserved(struct pt_regs *regs)
720{
721 /*
722 * Game over - no way to handle this if it ever occurs. Most probably
723 * caused by a new unknown cpu type or after another deadly
724 * hard/software error.
725 */
726 show_regs(regs);
727 panic("Caught reserved exception %ld - should not happen.",
728 (regs->cp0_cause & 0x7f) >> 2);
729}
730
731/*
732 * Some MIPS CPUs can enable/disable for cache parity detection, but do
733 * it different ways.
734 */
735static inline void parity_protection_init(void)
736{
737 switch (current_cpu_data.cputype) {
738 case CPU_24K:
739 /* 24K cache parity not currently implemented in FPGA */
740 printk(KERN_INFO "Disable cache parity protection for "
741 "MIPS 24K CPU.\n");
742 write_c0_ecc(read_c0_ecc() & ~0x80000000);
743 break;
744 case CPU_5KC:
745 /* Set the PE bit (bit 31) in the c0_ecc register. */
746 printk(KERN_INFO "Enable cache parity protection for "
747 "MIPS 5KC/24K CPUs.\n");
748 write_c0_ecc(read_c0_ecc() | 0x80000000);
749 break;
750 case CPU_20KC:
751 case CPU_25KF:
752 /* Clear the DE bit (bit 16) in the c0_status register. */
753 printk(KERN_INFO "Enable cache parity protection for "
754 "MIPS 20KC/25KF CPUs.\n");
755 clear_c0_status(ST0_DE);
756 break;
757 default:
758 break;
759 }
760}
761
762asmlinkage void cache_parity_error(void)
763{
764 const int field = 2 * sizeof(unsigned long);
765 unsigned int reg_val;
766
767 /* For the moment, report the problem and hang. */
768 printk("Cache error exception:\n");
769 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
770 reg_val = read_c0_cacheerr();
771 printk("c0_cacheerr == %08x\n", reg_val);
772
773 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
774 reg_val & (1<<30) ? "secondary" : "primary",
775 reg_val & (1<<31) ? "data" : "insn");
776 printk("Error bits: %s%s%s%s%s%s%s\n",
777 reg_val & (1<<29) ? "ED " : "",
778 reg_val & (1<<28) ? "ET " : "",
779 reg_val & (1<<26) ? "EE " : "",
780 reg_val & (1<<25) ? "EB " : "",
781 reg_val & (1<<24) ? "EI " : "",
782 reg_val & (1<<23) ? "E1 " : "",
783 reg_val & (1<<22) ? "E0 " : "");
784 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
785
786#if defined(CONFIG_CPU_MIPS32) || defined (CONFIG_CPU_MIPS64)
787 if (reg_val & (1<<22))
788 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
789
790 if (reg_val & (1<<23))
791 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
792#endif
793
794 panic("Can't handle the cache error!");
795}
796
797/*
798 * SDBBP EJTAG debug exception handler.
799 * We skip the instruction and return to the next instruction.
800 */
801void ejtag_exception_handler(struct pt_regs *regs)
802{
803 const int field = 2 * sizeof(unsigned long);
804 unsigned long depc, old_epc;
805 unsigned int debug;
806
807 printk("SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
808 depc = read_c0_depc();
809 debug = read_c0_debug();
810 printk("c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
811 if (debug & 0x80000000) {
812 /*
813 * In branch delay slot.
814 * We cheat a little bit here and use EPC to calculate the
815 * debug return address (DEPC). EPC is restored after the
816 * calculation.
817 */
818 old_epc = regs->cp0_epc;
819 regs->cp0_epc = depc;
820 __compute_return_epc(regs);
821 depc = regs->cp0_epc;
822 regs->cp0_epc = old_epc;
823 } else
824 depc += 4;
825 write_c0_depc(depc);
826
827#if 0
828 printk("\n\n----- Enable EJTAG single stepping ----\n\n");
829 write_c0_debug(debug | 0x100);
830#endif
831}
832
833/*
834 * NMI exception handler.
835 */
836void nmi_exception_handler(struct pt_regs *regs)
837{
838 printk("NMI taken!!!!\n");
839 die("NMI", regs);
840 while(1) ;
841}
842
843unsigned long exception_handlers[32];
844
845/*
846 * As a side effect of the way this is implemented we're limited
847 * to interrupt handlers in the address range from
848 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
849 */
850void *set_except_vector(int n, void *addr)
851{
852 unsigned long handler = (unsigned long) addr;
853 unsigned long old_handler = exception_handlers[n];
854
855 exception_handlers[n] = handler;
856 if (n == 0 && cpu_has_divec) {
857 *(volatile u32 *)(CAC_BASE + 0x200) = 0x08000000 |
858 (0x03ffffff & (handler >> 2));
859 flush_icache_range(CAC_BASE + 0x200, CAC_BASE + 0x204);
860 }
861 return (void *)old_handler;
862}
863
864/*
865 * This is used by native signal handling
866 */
867asmlinkage int (*save_fp_context)(struct sigcontext *sc);
868asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
869
870extern asmlinkage int _save_fp_context(struct sigcontext *sc);
871extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
872
873extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
874extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
875
876static inline void signal_init(void)
877{
878 if (cpu_has_fpu) {
879 save_fp_context = _save_fp_context;
880 restore_fp_context = _restore_fp_context;
881 } else {
882 save_fp_context = fpu_emulator_save_context;
883 restore_fp_context = fpu_emulator_restore_context;
884 }
885}
886
887#ifdef CONFIG_MIPS32_COMPAT
888
889/*
890 * This is used by 32-bit signal stuff on the 64-bit kernel
891 */
892asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
893asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
894
895extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);
896extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);
897
898extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);
899extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);
900
901static inline void signal32_init(void)
902{
903 if (cpu_has_fpu) {
904 save_fp_context32 = _save_fp_context32;
905 restore_fp_context32 = _restore_fp_context32;
906 } else {
907 save_fp_context32 = fpu_emulator_save_context32;
908 restore_fp_context32 = fpu_emulator_restore_context32;
909 }
910}
911#endif
912
913extern void cpu_cache_init(void);
914extern void tlb_init(void);
915
916void __init per_cpu_trap_init(void)
917{
918 unsigned int cpu = smp_processor_id();
919 unsigned int status_set = ST0_CU0;
920
921 /*
922 * Disable coprocessors and select 32-bit or 64-bit addressing
923 * and the 16/32 or 32/32 FPR register model. Reset the BEV
924 * flag that some firmware may have left set and the TS bit (for
925 * IP27). Set XX for ISA IV code to work.
926 */
927#ifdef CONFIG_MIPS64
928 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
929#endif
930 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
931 status_set |= ST0_XX;
932 change_c0_status(ST0_CU|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
933 status_set);
934
935 /*
936 * Some MIPS CPUs have a dedicated interrupt vector which reduces the
937 * interrupt processing overhead. Use it where available.
938 */
939 if (cpu_has_divec)
940 set_c0_cause(CAUSEF_IV);
941
942 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
943 TLBMISS_HANDLER_SETUP();
944
945 atomic_inc(&init_mm.mm_count);
946 current->active_mm = &init_mm;
947 BUG_ON(current->mm);
948 enter_lazy_tlb(&init_mm, current);
949
950 cpu_cache_init();
951 tlb_init();
952}
953
954void __init trap_init(void)
955{
956 extern char except_vec3_generic, except_vec3_r4000;
957 extern char except_vec_ejtag_debug;
958 extern char except_vec4;
959 unsigned long i;
960
961 per_cpu_trap_init();
962
963 /*
964 * Copy the generic exception handlers to their final destination.
965 * This will be overriden later as suitable for a particular
966 * configuration.
967 */
968 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
969
970 /*
971 * Setup default vectors
972 */
973 for (i = 0; i <= 31; i++)
974 set_except_vector(i, handle_reserved);
975
976 /*
977 * Copy the EJTAG debug exception vector handler code to it's final
978 * destination.
979 */
980 if (cpu_has_ejtag)
981 memcpy((void *)(CAC_BASE + 0x300), &except_vec_ejtag_debug, 0x80);
982
983 /*
984 * Only some CPUs have the watch exceptions.
985 */
986 if (cpu_has_watch)
987 set_except_vector(23, handle_watch);
988
989 /*
990 * Some MIPS CPUs have a dedicated interrupt vector which reduces the
991 * interrupt processing overhead. Use it where available.
992 */
993 if (cpu_has_divec)
994 memcpy((void *)(CAC_BASE + 0x200), &except_vec4, 0x8);
995
996 /*
997 * Some CPUs can enable/disable for cache parity detection, but does
998 * it different ways.
999 */
1000 parity_protection_init();
1001
1002 /*
1003 * The Data Bus Errors / Instruction Bus Errors are signaled
1004 * by external hardware. Therefore these two exceptions
1005 * may have board specific handlers.
1006 */
1007 if (board_be_init)
1008 board_be_init();
1009
1010 set_except_vector(1, handle_tlbm);
1011 set_except_vector(2, handle_tlbl);
1012 set_except_vector(3, handle_tlbs);
1013
1014 set_except_vector(4, handle_adel);
1015 set_except_vector(5, handle_ades);
1016
1017 set_except_vector(6, handle_ibe);
1018 set_except_vector(7, handle_dbe);
1019
1020 set_except_vector(8, handle_sys);
1021 set_except_vector(9, handle_bp);
1022 set_except_vector(10, handle_ri);
1023 set_except_vector(11, handle_cpu);
1024 set_except_vector(12, handle_ov);
1025 set_except_vector(13, handle_tr);
1026 set_except_vector(22, handle_mdmx);
1027
1028 if (cpu_has_fpu && !cpu_has_nofpuex)
1029 set_except_vector(15, handle_fpe);
1030
1031 if (cpu_has_mcheck)
1032 set_except_vector(24, handle_mcheck);
1033
1034 if (cpu_has_vce)
1035 /* Special exception: R4[04]00 uses also the divec space. */
1036 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
1037 else if (cpu_has_4kex)
1038 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
1039 else
1040 memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
1041
1042 if (current_cpu_data.cputype == CPU_R6000 ||
1043 current_cpu_data.cputype == CPU_R6000A) {
1044 /*
1045 * The R6000 is the only R-series CPU that features a machine
1046 * check exception (similar to the R4000 cache error) and
1047 * unaligned ldc1/sdc1 exception. The handlers have not been
1048 * written yet. Well, anyway there is no R6000 machine on the
1049 * current list of targets for Linux/MIPS.
1050 * (Duh, crap, there is someone with a triple R6k machine)
1051 */
1052 //set_except_vector(14, handle_mc);
1053 //set_except_vector(15, handle_ndc);
1054 }
1055
1056 signal_init();
1057#ifdef CONFIG_MIPS32_COMPAT
1058 signal32_init();
1059#endif
1060
1061 flush_icache_range(CAC_BASE, CAC_BASE + 0x400);
1062}
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
new file mode 100644
index 000000000000..3f24a1d45865
--- /dev/null
+++ b/arch/mips/kernel/unaligned.c
@@ -0,0 +1,550 @@
1/*
2 * Handle unaligned accesses by emulation.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 *
11 * This file contains exception handler for address error exception with the
12 * special capability to execute faulting instructions in software. The
13 * handler does not try to handle the case when the program counter points
14 * to an address not aligned to a word boundary.
15 *
16 * Putting data to unaligned addresses is a bad practice even on Intel where
17 * only the performance is affected. Much worse is that such code is non-
18 * portable. Due to several programs that die on MIPS due to alignment
19 * problems I decided to implement this handler anyway though I originally
20 * didn't intend to do this at all for user code.
21 *
22 * For now I enable fixing of address errors by default to make life easier.
23 * I however intend to disable this somewhen in the future when the alignment
24 * problems with user programs have been fixed. For programmers this is the
25 * right way to go.
26 *
27 * Fixing address errors is a per process option. The option is inherited
28 * across fork(2) and execve(2) calls. If you really want to use the
29 * option in your user programs - I discourage the use of the software
30 * emulation strongly - use the following code in your userland stuff:
31 *
32 * #include <sys/sysmips.h>
33 *
34 * ...
35 * sysmips(MIPS_FIXADE, x);
36 * ...
37 *
38 * The argument x is 0 for disabling software emulation, enabled otherwise.
39 *
40 * Below a little program to play around with this feature.
41 *
42 * #include <stdio.h>
43 * #include <sys/sysmips.h>
44 *
45 * struct foo {
46 * unsigned char bar[8];
47 * };
48 *
49 * main(int argc, char *argv[])
50 * {
51 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
52 * unsigned int *p = (unsigned int *) (x.bar + 3);
53 * int i;
54 *
55 * if (argc > 1)
56 * sysmips(MIPS_FIXADE, atoi(argv[1]));
57 *
58 * printf("*p = %08lx\n", *p);
59 *
60 * *p = 0xdeadface;
61 *
62 * for(i = 0; i <= 7; i++)
63 * printf("%02x ", x.bar[i]);
64 * printf("\n");
65 * }
66 *
67 * Coprocessor loads are not supported; I think this case is unimportant
68 * in the practice.
69 *
70 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
71 * exception for the R6000.
72 * A store crossing a page boundary might be executed only partially.
73 * Undo the partial store in this case.
74 */
75#include <linux/config.h>
76#include <linux/mm.h>
77#include <linux/module.h>
78#include <linux/signal.h>
79#include <linux/smp.h>
80#include <linux/smp_lock.h>
81
82#include <asm/asm.h>
83#include <asm/branch.h>
84#include <asm/byteorder.h>
85#include <asm/inst.h>
86#include <asm/uaccess.h>
87#include <asm/system.h>
88
89#define STR(x) __STR(x)
90#define __STR(x) #x
91
92#ifdef CONFIG_PROC_FS
93unsigned long unaligned_instructions;
94#endif
95
96static inline int emulate_load_store_insn(struct pt_regs *regs,
97 void *addr, unsigned long pc,
98 unsigned long **regptr, unsigned long *newvalue)
99{
100 union mips_instruction insn;
101 unsigned long value;
102 unsigned int res;
103
104 regs->regs[0] = 0;
105 *regptr=NULL;
106
107 /*
108 * This load never faults.
109 */
110 __get_user(insn.word, (unsigned int *)pc);
111
112 switch (insn.i_format.opcode) {
113 /*
114 * These are instructions that a compiler doesn't generate. We
115 * can assume therefore that the code is MIPS-aware and
116 * really buggy. Emulating these instructions would break the
117 * semantics anyway.
118 */
119 case ll_op:
120 case lld_op:
121 case sc_op:
122 case scd_op:
123
124 /*
125 * For these instructions the only way to create an address
126 * error is an attempted access to kernel/supervisor address
127 * space.
128 */
129 case ldl_op:
130 case ldr_op:
131 case lwl_op:
132 case lwr_op:
133 case sdl_op:
134 case sdr_op:
135 case swl_op:
136 case swr_op:
137 case lb_op:
138 case lbu_op:
139 case sb_op:
140 goto sigbus;
141
142 /*
143 * The remaining opcodes are the ones that are really of interest.
144 */
145 case lh_op:
146 if (!access_ok(VERIFY_READ, addr, 2))
147 goto sigbus;
148
149 __asm__ __volatile__ (".set\tnoat\n"
150#ifdef __BIG_ENDIAN
151 "1:\tlb\t%0, 0(%2)\n"
152 "2:\tlbu\t$1, 1(%2)\n\t"
153#endif
154#ifdef __LITTLE_ENDIAN
155 "1:\tlb\t%0, 1(%2)\n"
156 "2:\tlbu\t$1, 0(%2)\n\t"
157#endif
158 "sll\t%0, 0x8\n\t"
159 "or\t%0, $1\n\t"
160 "li\t%1, 0\n"
161 "3:\t.set\tat\n\t"
162 ".section\t.fixup,\"ax\"\n\t"
163 "4:\tli\t%1, %3\n\t"
164 "j\t3b\n\t"
165 ".previous\n\t"
166 ".section\t__ex_table,\"a\"\n\t"
167 STR(PTR)"\t1b, 4b\n\t"
168 STR(PTR)"\t2b, 4b\n\t"
169 ".previous"
170 : "=&r" (value), "=r" (res)
171 : "r" (addr), "i" (-EFAULT));
172 if (res)
173 goto fault;
174 *newvalue = value;
175 *regptr = &regs->regs[insn.i_format.rt];
176 break;
177
178 case lw_op:
179 if (!access_ok(VERIFY_READ, addr, 4))
180 goto sigbus;
181
182 __asm__ __volatile__ (
183#ifdef __BIG_ENDIAN
184 "1:\tlwl\t%0, (%2)\n"
185 "2:\tlwr\t%0, 3(%2)\n\t"
186#endif
187#ifdef __LITTLE_ENDIAN
188 "1:\tlwl\t%0, 3(%2)\n"
189 "2:\tlwr\t%0, (%2)\n\t"
190#endif
191 "li\t%1, 0\n"
192 "3:\t.section\t.fixup,\"ax\"\n\t"
193 "4:\tli\t%1, %3\n\t"
194 "j\t3b\n\t"
195 ".previous\n\t"
196 ".section\t__ex_table,\"a\"\n\t"
197 STR(PTR)"\t1b, 4b\n\t"
198 STR(PTR)"\t2b, 4b\n\t"
199 ".previous"
200 : "=&r" (value), "=r" (res)
201 : "r" (addr), "i" (-EFAULT));
202 if (res)
203 goto fault;
204 *newvalue = value;
205 *regptr = &regs->regs[insn.i_format.rt];
206 break;
207
208 case lhu_op:
209 if (!access_ok(VERIFY_READ, addr, 2))
210 goto sigbus;
211
212 __asm__ __volatile__ (
213 ".set\tnoat\n"
214#ifdef __BIG_ENDIAN
215 "1:\tlbu\t%0, 0(%2)\n"
216 "2:\tlbu\t$1, 1(%2)\n\t"
217#endif
218#ifdef __LITTLE_ENDIAN
219 "1:\tlbu\t%0, 1(%2)\n"
220 "2:\tlbu\t$1, 0(%2)\n\t"
221#endif
222 "sll\t%0, 0x8\n\t"
223 "or\t%0, $1\n\t"
224 "li\t%1, 0\n"
225 "3:\t.set\tat\n\t"
226 ".section\t.fixup,\"ax\"\n\t"
227 "4:\tli\t%1, %3\n\t"
228 "j\t3b\n\t"
229 ".previous\n\t"
230 ".section\t__ex_table,\"a\"\n\t"
231 STR(PTR)"\t1b, 4b\n\t"
232 STR(PTR)"\t2b, 4b\n\t"
233 ".previous"
234 : "=&r" (value), "=r" (res)
235 : "r" (addr), "i" (-EFAULT));
236 if (res)
237 goto fault;
238 *newvalue = value;
239 *regptr = &regs->regs[insn.i_format.rt];
240 break;
241
242 case lwu_op:
243#ifdef CONFIG_MIPS64
244 /*
245 * A 32-bit kernel might be running on a 64-bit processor. But
246 * if we're on a 32-bit processor and an i-cache incoherency
247 * or race makes us see a 64-bit instruction here the sdl/sdr
248 * would blow up, so for now we don't handle unaligned 64-bit
249 * instructions on 32-bit kernels.
250 */
251 if (!access_ok(VERIFY_READ, addr, 4))
252 goto sigbus;
253
254 __asm__ __volatile__ (
255#ifdef __BIG_ENDIAN
256 "1:\tlwl\t%0, (%2)\n"
257 "2:\tlwr\t%0, 3(%2)\n\t"
258#endif
259#ifdef __LITTLE_ENDIAN
260 "1:\tlwl\t%0, 3(%2)\n"
261 "2:\tlwr\t%0, (%2)\n\t"
262#endif
263 "dsll\t%0, %0, 32\n\t"
264 "dsrl\t%0, %0, 32\n\t"
265 "li\t%1, 0\n"
266 "3:\t.section\t.fixup,\"ax\"\n\t"
267 "4:\tli\t%1, %3\n\t"
268 "j\t3b\n\t"
269 ".previous\n\t"
270 ".section\t__ex_table,\"a\"\n\t"
271 STR(PTR)"\t1b, 4b\n\t"
272 STR(PTR)"\t2b, 4b\n\t"
273 ".previous"
274 : "=&r" (value), "=r" (res)
275 : "r" (addr), "i" (-EFAULT));
276 if (res)
277 goto fault;
278 *newvalue = value;
279 *regptr = &regs->regs[insn.i_format.rt];
280 break;
281#endif /* CONFIG_MIPS64 */
282
283 /* Cannot handle 64-bit instructions in 32-bit kernel */
284 goto sigill;
285
286 case ld_op:
287#ifdef CONFIG_MIPS64
288 /*
289 * A 32-bit kernel might be running on a 64-bit processor. But
290 * if we're on a 32-bit processor and an i-cache incoherency
291 * or race makes us see a 64-bit instruction here the sdl/sdr
292 * would blow up, so for now we don't handle unaligned 64-bit
293 * instructions on 32-bit kernels.
294 */
295 if (!access_ok(VERIFY_READ, addr, 8))
296 goto sigbus;
297
298 __asm__ __volatile__ (
299#ifdef __BIG_ENDIAN
300 "1:\tldl\t%0, (%2)\n"
301 "2:\tldr\t%0, 7(%2)\n\t"
302#endif
303#ifdef __LITTLE_ENDIAN
304 "1:\tldl\t%0, 7(%2)\n"
305 "2:\tldr\t%0, (%2)\n\t"
306#endif
307 "li\t%1, 0\n"
308 "3:\t.section\t.fixup,\"ax\"\n\t"
309 "4:\tli\t%1, %3\n\t"
310 "j\t3b\n\t"
311 ".previous\n\t"
312 ".section\t__ex_table,\"a\"\n\t"
313 STR(PTR)"\t1b, 4b\n\t"
314 STR(PTR)"\t2b, 4b\n\t"
315 ".previous"
316 : "=&r" (value), "=r" (res)
317 : "r" (addr), "i" (-EFAULT));
318 if (res)
319 goto fault;
320 *newvalue = value;
321 *regptr = &regs->regs[insn.i_format.rt];
322 break;
323#endif /* CONFIG_MIPS64 */
324
325 /* Cannot handle 64-bit instructions in 32-bit kernel */
326 goto sigill;
327
328 case sh_op:
329 if (!access_ok(VERIFY_WRITE, addr, 2))
330 goto sigbus;
331
332 value = regs->regs[insn.i_format.rt];
333 __asm__ __volatile__ (
334#ifdef __BIG_ENDIAN
335 ".set\tnoat\n"
336 "1:\tsb\t%1, 1(%2)\n\t"
337 "srl\t$1, %1, 0x8\n"
338 "2:\tsb\t$1, 0(%2)\n\t"
339 ".set\tat\n\t"
340#endif
341#ifdef __LITTLE_ENDIAN
342 ".set\tnoat\n"
343 "1:\tsb\t%1, 0(%2)\n\t"
344 "srl\t$1,%1, 0x8\n"
345 "2:\tsb\t$1, 1(%2)\n\t"
346 ".set\tat\n\t"
347#endif
348 "li\t%0, 0\n"
349 "3:\n\t"
350 ".section\t.fixup,\"ax\"\n\t"
351 "4:\tli\t%0, %3\n\t"
352 "j\t3b\n\t"
353 ".previous\n\t"
354 ".section\t__ex_table,\"a\"\n\t"
355 STR(PTR)"\t1b, 4b\n\t"
356 STR(PTR)"\t2b, 4b\n\t"
357 ".previous"
358 : "=r" (res)
359 : "r" (value), "r" (addr), "i" (-EFAULT));
360 if (res)
361 goto fault;
362 break;
363
364 case sw_op:
365 if (!access_ok(VERIFY_WRITE, addr, 4))
366 goto sigbus;
367
368 value = regs->regs[insn.i_format.rt];
369 __asm__ __volatile__ (
370#ifdef __BIG_ENDIAN
371 "1:\tswl\t%1,(%2)\n"
372 "2:\tswr\t%1, 3(%2)\n\t"
373#endif
374#ifdef __LITTLE_ENDIAN
375 "1:\tswl\t%1, 3(%2)\n"
376 "2:\tswr\t%1, (%2)\n\t"
377#endif
378 "li\t%0, 0\n"
379 "3:\n\t"
380 ".section\t.fixup,\"ax\"\n\t"
381 "4:\tli\t%0, %3\n\t"
382 "j\t3b\n\t"
383 ".previous\n\t"
384 ".section\t__ex_table,\"a\"\n\t"
385 STR(PTR)"\t1b, 4b\n\t"
386 STR(PTR)"\t2b, 4b\n\t"
387 ".previous"
388 : "=r" (res)
389 : "r" (value), "r" (addr), "i" (-EFAULT));
390 if (res)
391 goto fault;
392 break;
393
394 case sd_op:
395#ifdef CONFIG_MIPS64
396 /*
397 * A 32-bit kernel might be running on a 64-bit processor. But
398 * if we're on a 32-bit processor and an i-cache incoherency
399 * or race makes us see a 64-bit instruction here the sdl/sdr
400 * would blow up, so for now we don't handle unaligned 64-bit
401 * instructions on 32-bit kernels.
402 */
403 if (!access_ok(VERIFY_WRITE, addr, 8))
404 goto sigbus;
405
406 value = regs->regs[insn.i_format.rt];
407 __asm__ __volatile__ (
408#ifdef __BIG_ENDIAN
409 "1:\tsdl\t%1,(%2)\n"
410 "2:\tsdr\t%1, 7(%2)\n\t"
411#endif
412#ifdef __LITTLE_ENDIAN
413 "1:\tsdl\t%1, 7(%2)\n"
414 "2:\tsdr\t%1, (%2)\n\t"
415#endif
416 "li\t%0, 0\n"
417 "3:\n\t"
418 ".section\t.fixup,\"ax\"\n\t"
419 "4:\tli\t%0, %3\n\t"
420 "j\t3b\n\t"
421 ".previous\n\t"
422 ".section\t__ex_table,\"a\"\n\t"
423 STR(PTR)"\t1b, 4b\n\t"
424 STR(PTR)"\t2b, 4b\n\t"
425 ".previous"
426 : "=r" (res)
427 : "r" (value), "r" (addr), "i" (-EFAULT));
428 if (res)
429 goto fault;
430 break;
431#endif /* CONFIG_MIPS64 */
432
433 /* Cannot handle 64-bit instructions in 32-bit kernel */
434 goto sigill;
435
436 case lwc1_op:
437 case ldc1_op:
438 case swc1_op:
439 case sdc1_op:
440 /*
441 * I herewith declare: this does not happen. So send SIGBUS.
442 */
443 goto sigbus;
444
445 case lwc2_op:
446 case ldc2_op:
447 case swc2_op:
448 case sdc2_op:
449 /*
450 * These are the coprocessor 2 load/stores. The current
451 * implementations don't use cp2 and cp2 should always be
452 * disabled in c0_status. So send SIGILL.
453 * (No longer true: The Sony Praystation uses cp2 for
454 * 3D matrix operations. Dunno if that thingy has a MMU ...)
455 */
456 default:
457 /*
458 * Pheeee... We encountered an yet unknown instruction or
459 * cache coherence problem. Die sucker, die ...
460 */
461 goto sigill;
462 }
463
464#ifdef CONFIG_PROC_FS
465 unaligned_instructions++;
466#endif
467
468 return 0;
469
470fault:
471 /* Did we have an exception handler installed? */
472 if (fixup_exception(regs))
473 return 1;
474
475 die_if_kernel ("Unhandled kernel unaligned access", regs);
476 send_sig(SIGSEGV, current, 1);
477
478 return 0;
479
480sigbus:
481 die_if_kernel("Unhandled kernel unaligned access", regs);
482 send_sig(SIGBUS, current, 1);
483
484 return 0;
485
486sigill:
487 die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
488 send_sig(SIGILL, current, 1);
489
490 return 0;
491}
492
493asmlinkage void do_ade(struct pt_regs *regs)
494{
495 unsigned long *regptr, newval;
496 extern int do_dsemulret(struct pt_regs *);
497 mm_segment_t seg;
498 unsigned long pc;
499
500 /*
501 * Address errors may be deliberately induced by the FPU emulator to
502 * retake control of the CPU after executing the instruction in the
503 * delay slot of an emulated branch.
504 */
505 /* Terminate if exception was recognized as a delay slot return */
506 if (do_dsemulret(regs))
507 return;
508
509 /* Otherwise handle as normal */
510
511 /*
512 * Did we catch a fault trying to load an instruction?
513 * Or are we running in MIPS16 mode?
514 */
515 if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
516 goto sigbus;
517
518 pc = exception_epc(regs);
519 if ((current->thread.mflags & MF_FIXADE) == 0)
520 goto sigbus;
521
522 /*
523 * Do branch emulation only if we didn't forward the exception.
524 * This is all so but ugly ...
525 */
526 seg = get_fs();
527 if (!user_mode(regs))
528 set_fs(KERNEL_DS);
529 if (!emulate_load_store_insn(regs, (void *)regs->cp0_badvaddr, pc,
530 &regptr, &newval)) {
531 compute_return_epc(regs);
532 /*
533 * Now that branch is evaluated, update the dest
534 * register if necessary
535 */
536 if (regptr)
537 *regptr = newval;
538 }
539 set_fs(seg);
540
541 return;
542
543sigbus:
544 die_if_kernel("Kernel unaligned instruction access", regs);
545 force_sig(SIGBUS, current);
546
547 /*
548 * XXX On return from the signal handler we should advance the epc
549 */
550}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..e830d788c106
--- /dev/null
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -0,0 +1,183 @@
1#include <linux/config.h>
2#include <asm-generic/vmlinux.lds.h>
3
4#undef mips /* CPP really sucks for this job */
5#define mips mips
6OUTPUT_ARCH(mips)
7ENTRY(kernel_entry)
8jiffies = JIFFIES;
9SECTIONS
10{
11#ifdef CONFIG_BOOT_ELF64
12 /* Read-only sections, merged into text segment: */
13 /* . = 0xc000000000000000; */
14
15 /* This is the value for an Origin kernel, taken from an IRIX kernel. */
16 /* . = 0xc00000000001c000; */
17
18 /* Set the vaddr for the text segment to a value
19 >= 0xa800 0000 0001 9000 if no symmon is going to configured
20 >= 0xa800 0000 0030 0000 otherwise */
21
22 /* . = 0xa800000000300000; */
23 /* . = 0xa800000000300000; */
24 . = 0xffffffff80300000;
25#endif
26 . = LOADADDR;
27 /* read-only */
28 _text = .; /* Text and read-only data */
29 .text : {
30 *(.text)
31 SCHED_TEXT
32 LOCK_TEXT
33 *(.fixup)
34 *(.gnu.warning)
35 } =0
36
37 _etext = .; /* End of text section */
38
39 . = ALIGN(16); /* Exception table */
40 __start___ex_table = .;
41 __ex_table : { *(__ex_table) }
42 __stop___ex_table = .;
43
44 __start___dbe_table = .; /* Exception table for data bus errors */
45 __dbe_table : { *(__dbe_table) }
46 __stop___dbe_table = .;
47
48 RODATA
49
50 /* writeable */
51 .data : { /* Data */
52 . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
53 *(.data.init_task)
54
55 *(.data)
56
57 /* Align the initial ramdisk image (INITRD) on page boundaries. */
58 . = ALIGN(4096);
59 __rd_start = .;
60 *(.initrd)
61 . = ALIGN(4096);
62 __rd_end = .;
63
64 CONSTRUCTORS
65 }
66 _gp = . + 0x8000;
67 .lit8 : { *(.lit8) }
68 .lit4 : { *(.lit4) }
69 /* We want the small data sections together, so single-instruction offsets
70 can access them all, and initialized data all before uninitialized, so
71 we can shorten the on-disk segment size. */
72 .sdata : { *(.sdata) }
73
74 . = ALIGN(4096);
75 __nosave_begin = .;
76 .data_nosave : { *(.data.nosave) }
77 . = ALIGN(4096);
78 __nosave_end = .;
79
80 . = ALIGN(32);
81 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
82
83 _edata = .; /* End of data section */
84
85 /* will be freed after init */
86 . = ALIGN(4096); /* Init code and data */
87 __init_begin = .;
88 .init.text : {
89 _sinittext = .;
90 *(.init.text)
91 _einittext = .;
92 }
93 .init.data : { *(.init.data) }
94 . = ALIGN(16);
95 __setup_start = .;
96 .init.setup : { *(.init.setup) }
97 __setup_end = .;
98
99 .early_initcall.init : {
100 __earlyinitcall_start = .;
101 *(.initcall.early1.init)
102 }
103 __earlyinitcall_end = .;
104
105 __initcall_start = .;
106 .initcall.init : {
107 *(.initcall1.init)
108 *(.initcall2.init)
109 *(.initcall3.init)
110 *(.initcall4.init)
111 *(.initcall5.init)
112 *(.initcall6.init)
113 *(.initcall7.init)
114 }
115 __initcall_end = .;
116
117 __con_initcall_start = .;
118 .con_initcall.init : { *(.con_initcall.init) }
119 __con_initcall_end = .;
120 SECURITY_INIT
121 . = ALIGN(4096);
122 __initramfs_start = .;
123 .init.ramfs : { *(.init.ramfs) }
124 __initramfs_end = .;
125 . = ALIGN(32);
126 __per_cpu_start = .;
127 .data.percpu : { *(.data.percpu) }
128 __per_cpu_end = .;
129 . = ALIGN(4096);
130 __init_end = .;
131 /* freed after init ends here */
132
133 __bss_start = .; /* BSS */
134 .sbss : {
135 *(.sbss)
136 *(.scommon)
137 }
138 .bss : {
139 *(.bss)
140 *(COMMON)
141 }
142 __bss_stop = .;
143
144 _end = . ;
145
146 /* Sections to be discarded */
147 /DISCARD/ : {
148 *(.exit.text)
149 *(.exit.data)
150 *(.exitcall.exit)
151
152 /* ABI crap starts here */
153 *(.comment)
154 *(.MIPS.options)
155 *(.note)
156 *(.options)
157 *(.pdr)
158 *(.reginfo)
159 *(.mdebug*)
160 }
161
162 /* This is the MIPS specific mdebug section. */
163 .mdebug : { *(.mdebug) }
164 /* These are needed for ELF backends which have not yet been
165 converted to the new style linker. */
166 .stab 0 : { *(.stab) }
167 .stabstr 0 : { *(.stabstr) }
168 /* DWARF debug sections.
169 Symbols in the .debug DWARF section are relative to the beginning of the
170 section so we begin .debug at 0. It's not clear yet what needs to happen
171 for the others. */
172 .debug 0 : { *(.debug) }
173 .debug_srcinfo 0 : { *(.debug_srcinfo) }
174 .debug_aranges 0 : { *(.debug_aranges) }
175 .debug_pubnames 0 : { *(.debug_pubnames) }
176 .debug_sfnames 0 : { *(.debug_sfnames) }
177 .line 0 : { *(.line) }
178 /* These must appear regardless of . */
179 .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
180 .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
181 .comment : { *(.comment) }
182 .note : { *(.note) }
183}