aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm26/kernel
diff options
context:
space:
mode:
authorAdrian Bunk <bunk@stusta.de>2007-07-31 03:38:19 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-31 18:39:39 -0400
commit99eb8a550dbccc0e1f6c7e866fe421810e0585f6 (patch)
tree130c6e3338a0655ba74355eba83afab9261e1ed0 /arch/arm26/kernel
parent0d0ed42e5ca2e22465c591341839c18025748fe8 (diff)
Remove the arm26 port
The arm26 port has been in a state where it was far from even compiling for quite some time. Ian Molton agreed with the removal. Signed-off-by: Adrian Bunk <bunk@stusta.de> Cc: Ian Molton <spyro@f2s.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm26/kernel')
-rw-r--r--arch/arm26/kernel/Makefile17
-rw-r--r--arch/arm26/kernel/armksyms.c204
-rw-r--r--arch/arm26/kernel/asm-offsets.c55
-rw-r--r--arch/arm26/kernel/calls.S265
-rw-r--r--arch/arm26/kernel/compat.c173
-rw-r--r--arch/arm26/kernel/dma.c273
-rw-r--r--arch/arm26/kernel/ecard.c847
-rw-r--r--arch/arm26/kernel/entry.S951
-rw-r--r--arch/arm26/kernel/fiq.c201
-rw-r--r--arch/arm26/kernel/head.S112
-rw-r--r--arch/arm26/kernel/init_task.c49
-rw-r--r--arch/arm26/kernel/irq.c722
-rw-r--r--arch/arm26/kernel/process.c392
-rw-r--r--arch/arm26/kernel/ptrace.c670
-rw-r--r--arch/arm26/kernel/ptrace.h13
-rw-r--r--arch/arm26/kernel/semaphore.c222
-rw-r--r--arch/arm26/kernel/setup.c572
-rw-r--r--arch/arm26/kernel/signal.c538
-rw-r--r--arch/arm26/kernel/sys_arm.c323
-rw-r--r--arch/arm26/kernel/time.c210
-rw-r--r--arch/arm26/kernel/traps.c548
-rw-r--r--arch/arm26/kernel/vmlinux-arm26-xip.lds.in136
-rw-r--r--arch/arm26/kernel/vmlinux-arm26.lds.in129
-rw-r--r--arch/arm26/kernel/vmlinux.lds.S11
24 files changed, 0 insertions, 7633 deletions
diff --git a/arch/arm26/kernel/Makefile b/arch/arm26/kernel/Makefile
deleted file mode 100644
index ee9fb49fdb78..000000000000
--- a/arch/arm26/kernel/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
1#
2# Makefile for the linux kernel.
3#
4
5# Object file lists.
6
7AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR)
8
9obj-y := compat.o dma.o entry.o irq.o process.o ptrace.o \
10 semaphore.o setup.o signal.o sys_arm.o time.o traps.o \
11 ecard.o dma.o ecard.o fiq.o time.o
12
13extra-y := head.o init_task.o vmlinux.lds
14
15obj-$(CONFIG_FIQ) += fiq.o
16obj-$(CONFIG_MODULES) += armksyms.o
17
diff --git a/arch/arm26/kernel/armksyms.c b/arch/arm26/kernel/armksyms.c
deleted file mode 100644
index fe1e3ceed7cb..000000000000
--- a/arch/arm26/kernel/armksyms.c
+++ /dev/null
@@ -1,204 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/armksyms.c
3 *
4 * Copyright (C) 2003 Ian Molton
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/module.h>
12#include <linux/user.h>
13#include <linux/string.h>
14#include <linux/fs.h>
15#include <linux/mm.h>
16#include <linux/mman.h>
17#include <linux/delay.h>
18#include <linux/in6.h>
19#include <linux/interrupt.h>
20#include <linux/pm.h>
21#include <linux/tty.h>
22#include <linux/vt_kern.h>
23#include <linux/syscalls.h>
24
25#include <asm/byteorder.h>
26#include <asm/elf.h>
27#include <asm/io.h>
28#include <asm/irq.h>
29#include <asm/processor.h>
30#include <asm/semaphore.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/checksum.h>
34#include <asm/mach-types.h>
35
36extern int dump_fpu(struct pt_regs *, struct user_fp_struct *);
37extern void inswb(unsigned int port, void *to, int len);
38extern void outswb(unsigned int port, const void *to, int len);
39
40extern void __bad_xchg(volatile void *ptr, int size);
41
42/*
43 * libgcc functions - functions that are used internally by the
44 * compiler... (prototypes are not correct though, but that
45 * doesn't really matter since they're not versioned).
46 */
47extern void __ashldi3(void);
48extern void __ashrdi3(void);
49extern void __divsi3(void);
50extern void __lshrdi3(void);
51extern void __modsi3(void);
52extern void __muldi3(void);
53extern void __ucmpdi2(void);
54extern void __udivdi3(void);
55extern void __umoddi3(void);
56extern void __udivmoddi4(void);
57extern void __udivsi3(void);
58extern void __umodsi3(void);
59extern void abort(void);
60
61extern void ret_from_exception(void);
62extern void fpundefinstr(void);
63extern void fp_enter(void);
64
65/*
66 * This has a special calling convention; it doesn't
67 * modify any of the usual registers, except for LR.
68 * FIXME - we used to use our own local version - looks to be in kernel/softirq now
69 */
70//extern void __do_softirq(void);
71
72#define EXPORT_SYMBOL_ALIAS(sym,orig) \
73 const char __kstrtab_##sym[] \
74 __attribute__((section(".kstrtab"))) = \
75 __MODULE_STRING(sym); \
76 const struct module_symbol __ksymtab_##sym \
77 __attribute__((section("__ksymtab"))) = \
78 { (unsigned long)&orig, __kstrtab_##sym };
79
80/*
81 * floating point math emulator support.
82 * These symbols will never change their calling convention...
83 */
84EXPORT_SYMBOL_ALIAS(kern_fp_enter,fp_enter);
85EXPORT_SYMBOL_ALIAS(fp_printk,printk);
86EXPORT_SYMBOL_ALIAS(fp_send_sig,send_sig);
87
88EXPORT_SYMBOL(fpundefinstr);
89EXPORT_SYMBOL(ret_from_exception);
90
91#ifdef CONFIG_VT
92EXPORT_SYMBOL(kd_mksound);
93#endif
94
95//EXPORT_SYMBOL(__do_softirq);
96
97 /* platform dependent support */
98EXPORT_SYMBOL(dump_thread);
99EXPORT_SYMBOL(dump_fpu);
100EXPORT_SYMBOL(udelay);
101EXPORT_SYMBOL(kernel_thread);
102EXPORT_SYMBOL(system_rev);
103EXPORT_SYMBOL(system_serial_low);
104EXPORT_SYMBOL(system_serial_high);
105#ifdef CONFIG_DEBUG_BUGVERBOSE
106EXPORT_SYMBOL(__bug);
107#endif
108EXPORT_SYMBOL(__bad_xchg);
109EXPORT_SYMBOL(__readwrite_bug);
110EXPORT_SYMBOL(set_irq_type);
111EXPORT_SYMBOL(pm_idle);
112EXPORT_SYMBOL(pm_power_off);
113
114 /* processor dependencies */
115EXPORT_SYMBOL(__machine_arch_type);
116
117 /* networking */
118EXPORT_SYMBOL(csum_partial_copy_nocheck);
119EXPORT_SYMBOL(__csum_ipv6_magic);
120
121 /* io */
122#ifndef __raw_readsb
123EXPORT_SYMBOL(__raw_readsb);
124#endif
125#ifndef __raw_readsw
126EXPORT_SYMBOL(__raw_readsw);
127#endif
128#ifndef __raw_readsl
129EXPORT_SYMBOL(__raw_readsl);
130#endif
131#ifndef __raw_writesb
132EXPORT_SYMBOL(__raw_writesb);
133#endif
134#ifndef __raw_writesw
135EXPORT_SYMBOL(__raw_writesw);
136#endif
137#ifndef __raw_writesl
138EXPORT_SYMBOL(__raw_writesl);
139#endif
140
141 /* string / mem functions */
142EXPORT_SYMBOL(strcpy);
143EXPORT_SYMBOL(strncpy);
144EXPORT_SYMBOL(strcat);
145EXPORT_SYMBOL(strncat);
146EXPORT_SYMBOL(strcmp);
147EXPORT_SYMBOL(strncmp);
148EXPORT_SYMBOL(strchr);
149EXPORT_SYMBOL(strlen);
150EXPORT_SYMBOL(strnlen);
151EXPORT_SYMBOL(strrchr);
152EXPORT_SYMBOL(strstr);
153EXPORT_SYMBOL(memset);
154EXPORT_SYMBOL(memcpy);
155EXPORT_SYMBOL(memmove);
156EXPORT_SYMBOL(memcmp);
157EXPORT_SYMBOL(memscan);
158EXPORT_SYMBOL(__memzero);
159
160 /* user mem (segment) */
161EXPORT_SYMBOL(uaccess_kernel);
162EXPORT_SYMBOL(uaccess_user);
163
164EXPORT_SYMBOL(__get_user_1);
165EXPORT_SYMBOL(__get_user_2);
166EXPORT_SYMBOL(__get_user_4);
167EXPORT_SYMBOL(__get_user_8);
168
169EXPORT_SYMBOL(__put_user_1);
170EXPORT_SYMBOL(__put_user_2);
171EXPORT_SYMBOL(__put_user_4);
172EXPORT_SYMBOL(__put_user_8);
173
174 /* gcc lib functions */
175EXPORT_SYMBOL(__ashldi3);
176EXPORT_SYMBOL(__ashrdi3);
177EXPORT_SYMBOL(__divsi3);
178EXPORT_SYMBOL(__lshrdi3);
179EXPORT_SYMBOL(__modsi3);
180EXPORT_SYMBOL(__muldi3);
181EXPORT_SYMBOL(__ucmpdi2);
182EXPORT_SYMBOL(__udivdi3);
183EXPORT_SYMBOL(__umoddi3);
184EXPORT_SYMBOL(__udivmoddi4);
185EXPORT_SYMBOL(__udivsi3);
186EXPORT_SYMBOL(__umodsi3);
187
188 /* bitops */
189EXPORT_SYMBOL(_set_bit_le);
190EXPORT_SYMBOL(_test_and_set_bit_le);
191EXPORT_SYMBOL(_clear_bit_le);
192EXPORT_SYMBOL(_test_and_clear_bit_le);
193EXPORT_SYMBOL(_change_bit_le);
194EXPORT_SYMBOL(_test_and_change_bit_le);
195EXPORT_SYMBOL(_find_first_zero_bit_le);
196EXPORT_SYMBOL(_find_next_zero_bit_le);
197
198 /* elf */
199EXPORT_SYMBOL(elf_platform);
200EXPORT_SYMBOL(elf_hwcap);
201
202#ifdef CONFIG_PREEMPT
203EXPORT_SYMBOL(kernel_flag);
204#endif
diff --git a/arch/arm26/kernel/asm-offsets.c b/arch/arm26/kernel/asm-offsets.c
deleted file mode 100644
index 76d9d7d489a8..000000000000
--- a/arch/arm26/kernel/asm-offsets.c
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * Copyright (C) 1995-2001 Russell King
3 * 2001-2002 Keith Owens
4 * 2003 Ian Molton
5 *
6 * Generate definitions needed by assembly language modules.
7 * This code generates raw asm output which is post-processed to extract
8 * and format the required data.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/sched.h>
16#include <linux/mm.h>
17
18#include <asm/pgtable.h>
19#include <asm/uaccess.h>
20
21/*
22 * Make sure that the compiler and target are compatible.
23 */
24#if defined(__APCS_32__) && defined(CONFIG_CPU_26)
25#error Sorry, your compiler targets APCS-32 but this kernel requires APCS-26
26#endif
27
28/* Use marker if you need to separate the values later */
29
30#define DEFINE(sym, val) \
31 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
32
33#define BLANK() asm volatile("\n->" : : )
34
35int main(void)
36{
37 DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
38 BLANK();
39 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
40 DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
41 BLANK();
42 DEFINE(VM_EXEC, VM_EXEC);
43 BLANK();
44 BLANK();
45 DEFINE(PAGE_PRESENT, _PAGE_PRESENT);
46 DEFINE(PAGE_READONLY, _PAGE_READONLY);
47 DEFINE(PAGE_NOT_USER, _PAGE_NOT_USER);
48 DEFINE(PAGE_OLD, _PAGE_OLD);
49 DEFINE(PAGE_CLEAN, _PAGE_CLEAN);
50 BLANK();
51 DEFINE(PAGE_SZ, PAGE_SIZE);
52 BLANK();
53 DEFINE(SYS_ERROR0, 0x9f0000);
54 return 0;
55}
diff --git a/arch/arm26/kernel/calls.S b/arch/arm26/kernel/calls.S
deleted file mode 100644
index e3d276827c84..000000000000
--- a/arch/arm26/kernel/calls.S
+++ /dev/null
@@ -1,265 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/calls.S
3 *
4 * Copyright (C) 2003 Ian Molton
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * FIXME
11 * This file is included twice in entry.S which may not be necessary
12 */
13
14//FIXME - clearly NR_syscalls is never defined here
15
16#ifndef NR_syscalls
17#define NR_syscalls 256
18#else
19
20__syscall_start:
21/* 0 */ .long sys_ni_syscall
22 .long sys_exit
23 .long sys_fork_wrapper
24 .long sys_read
25 .long sys_write
26/* 5 */ .long sys_open
27 .long sys_close
28 .long sys_ni_syscall /* was sys_waitpid */
29 .long sys_creat
30 .long sys_link
31/* 10 */ .long sys_unlink
32 .long sys_execve_wrapper
33 .long sys_chdir
34 .long sys_time /* used by libc4 */
35 .long sys_mknod
36/* 15 */ .long sys_chmod
37 .long sys_lchown16
38 .long sys_ni_syscall /* was sys_break */
39 .long sys_ni_syscall /* was sys_stat */
40 .long sys_lseek
41/* 20 */ .long sys_getpid
42 .long sys_mount
43 .long sys_oldumount /* used by libc4 */
44 .long sys_setuid16
45 .long sys_getuid16
46/* 25 */ .long sys_stime
47 .long sys_ptrace
48 .long sys_alarm /* used by libc4 */
49 .long sys_ni_syscall /* was sys_fstat */
50 .long sys_pause
51/* 30 */ .long sys_utime /* used by libc4 */
52 .long sys_ni_syscall /* was sys_stty */
53 .long sys_ni_syscall /* was sys_getty */
54 .long sys_access
55 .long sys_nice
56/* 35 */ .long sys_ni_syscall /* was sys_ftime */
57 .long sys_sync
58 .long sys_kill
59 .long sys_rename
60 .long sys_mkdir
61/* 40 */ .long sys_rmdir
62 .long sys_dup
63 .long sys_pipe
64 .long sys_times
65 .long sys_ni_syscall /* was sys_prof */
66/* 45 */ .long sys_brk
67 .long sys_setgid16
68 .long sys_getgid16
69 .long sys_ni_syscall /* was sys_signal */
70 .long sys_geteuid16
71/* 50 */ .long sys_getegid16
72 .long sys_acct
73 .long sys_umount
74 .long sys_ni_syscall /* was sys_lock */
75 .long sys_ioctl
76/* 55 */ .long sys_fcntl
77 .long sys_ni_syscall /* was sys_mpx */
78 .long sys_setpgid
79 .long sys_ni_syscall /* was sys_ulimit */
80 .long sys_ni_syscall /* was sys_olduname */
81/* 60 */ .long sys_umask
82 .long sys_chroot
83 .long sys_ustat
84 .long sys_dup2
85 .long sys_getppid
86/* 65 */ .long sys_getpgrp
87 .long sys_setsid
88 .long sys_sigaction
89 .long sys_ni_syscall /* was sys_sgetmask */
90 .long sys_ni_syscall /* was sys_ssetmask */
91/* 70 */ .long sys_setreuid16
92 .long sys_setregid16
93 .long sys_sigsuspend_wrapper
94 .long sys_sigpending
95 .long sys_sethostname
96/* 75 */ .long sys_setrlimit
97 .long sys_old_getrlimit /* used by libc4 */
98 .long sys_getrusage
99 .long sys_gettimeofday
100 .long sys_settimeofday
101/* 80 */ .long sys_getgroups16
102 .long sys_setgroups16
103 .long old_select /* used by libc4 */
104 .long sys_symlink
105 .long sys_ni_syscall /* was sys_lstat */
106/* 85 */ .long sys_readlink
107 .long sys_uselib
108 .long sys_swapon
109 .long sys_reboot
110 .long old_readdir /* used by libc4 */
111/* 90 */ .long old_mmap /* used by libc4 */
112 .long sys_munmap
113 .long sys_truncate
114 .long sys_ftruncate
115 .long sys_fchmod
116/* 95 */ .long sys_fchown16
117 .long sys_getpriority
118 .long sys_setpriority
119 .long sys_ni_syscall /* was sys_profil */
120 .long sys_statfs
121/* 100 */ .long sys_fstatfs
122 .long sys_ni_syscall
123 .long sys_socketcall
124 .long sys_syslog
125 .long sys_setitimer
126/* 105 */ .long sys_getitimer
127 .long sys_newstat
128 .long sys_newlstat
129 .long sys_newfstat
130 .long sys_ni_syscall /* was sys_uname */
131/* 110 */ .long sys_ni_syscall /* was sys_iopl */
132 .long sys_vhangup
133 .long sys_ni_syscall
134 .long sys_syscall /* call a syscall */
135 .long sys_wait4
136/* 115 */ .long sys_swapoff
137 .long sys_sysinfo
138 .long sys_ipc
139 .long sys_fsync
140 .long sys_sigreturn_wrapper
141/* 120 */ .long sys_clone_wapper
142 .long sys_setdomainname
143 .long sys_newuname
144 .long sys_ni_syscall
145 .long sys_adjtimex
146/* 125 */ .long sys_mprotect
147 .long sys_sigprocmask
148 .long sys_ni_syscall /* WAS: sys_create_module */
149 .long sys_init_module
150 .long sys_delete_module
151/* 130 */ .long sys_ni_syscall /* WAS: sys_get_kernel_syms */
152 .long sys_quotactl
153 .long sys_getpgid
154 .long sys_fchdir
155 .long sys_bdflush
156/* 135 */ .long sys_sysfs
157 .long sys_personality
158 .long sys_ni_syscall /* .long _sys_afs_syscall */
159 .long sys_setfsuid16
160 .long sys_setfsgid16
161/* 140 */ .long sys_llseek
162 .long sys_getdents
163 .long sys_select
164 .long sys_flock
165 .long sys_msync
166/* 145 */ .long sys_readv
167 .long sys_writev
168 .long sys_getsid
169 .long sys_fdatasync
170 .long sys_sysctl
171/* 150 */ .long sys_mlock
172 .long sys_munlock
173 .long sys_mlockall
174 .long sys_munlockall
175 .long sys_sched_setparam
176/* 155 */ .long sys_sched_getparam
177 .long sys_sched_setscheduler
178 .long sys_sched_getscheduler
179 .long sys_sched_yield
180 .long sys_sched_get_priority_max
181/* 160 */ .long sys_sched_get_priority_min
182 .long sys_sched_rr_get_interval
183 .long sys_nanosleep
184 .long sys_arm_mremap
185 .long sys_setresuid16
186/* 165 */ .long sys_getresuid16
187 .long sys_ni_syscall
188 .long sys_ni_syscall /* WAS: sys_query_module */
189 .long sys_poll
190 .long sys_nfsservctl
191/* 170 */ .long sys_setresgid16
192 .long sys_getresgid16
193 .long sys_prctl
194 .long sys_rt_sigreturn_wrapper
195 .long sys_rt_sigaction
196/* 175 */ .long sys_rt_sigprocmask
197 .long sys_rt_sigpending
198 .long sys_rt_sigtimedwait
199 .long sys_rt_sigqueueinfo
200 .long sys_rt_sigsuspend_wrapper
201/* 180 */ .long sys_pread64
202 .long sys_pwrite64
203 .long sys_chown16
204 .long sys_getcwd
205 .long sys_capget
206/* 185 */ .long sys_capset
207 .long sys_sigaltstack_wrapper
208 .long sys_sendfile
209 .long sys_ni_syscall
210 .long sys_ni_syscall
211/* 190 */ .long sys_vfork_wrapper
212 .long sys_getrlimit
213 .long sys_mmap2
214 .long sys_truncate64
215 .long sys_ftruncate64
216/* 195 */ .long sys_stat64
217 .long sys_lstat64
218 .long sys_fstat64
219 .long sys_lchown
220 .long sys_getuid
221/* 200 */ .long sys_getgid
222 .long sys_geteuid
223 .long sys_getegid
224 .long sys_setreuid
225 .long sys_setregid
226/* 205 */ .long sys_getgroups
227 .long sys_setgroups
228 .long sys_fchown
229 .long sys_setresuid
230 .long sys_getresuid
231/* 210 */ .long sys_setresgid
232 .long sys_getresgid
233 .long sys_chown
234 .long sys_setuid
235 .long sys_setgid
236/* 215 */ .long sys_setfsuid
237 .long sys_setfsgid
238 .long sys_getdents64
239 .long sys_pivot_root
240 .long sys_mincore
241/* 220 */ .long sys_madvise
242 .long sys_fcntl64
243 .long sys_ni_syscall /* TUX */
244 .long sys_ni_syscall /* WAS: sys_security */
245 .long sys_gettid
246/* 225 */ .long sys_readahead
247 .long sys_setxattr
248 .long sys_lsetxattr
249 .long sys_fsetxattr
250 .long sys_getxattr
251/* 230 */ .long sys_lgetxattr
252 .long sys_fgetxattr
253 .long sys_listxattr
254 .long sys_llistxattr
255 .long sys_flistxattr
256/* 235 */ .long sys_removexattr
257 .long sys_lremovexattr
258 .long sys_fremovexattr
259 .long sys_tkill
260__syscall_end:
261
262 .rept NR_syscalls - (__syscall_end - __syscall_start) / 4
263 .long sys_ni_syscall
264 .endr
265#endif
diff --git a/arch/arm26/kernel/compat.c b/arch/arm26/kernel/compat.c
deleted file mode 100644
index 21e966ff0aa7..000000000000
--- a/arch/arm26/kernel/compat.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/compat.c
3 *
4 * Copyright (C) 2001 Russell King
5 * 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * We keep the old params compatibility cruft in one place (here)
12 * so we don't end up with lots of mess around other places.
13 *
14 * NOTE:
15 * The old struct param_struct is deprecated, but it will be kept in
16 * the kernel for 5 years from now (2001). This will allow boot loaders
17 * to convert to the new struct tag way.
18 */
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23
24#include <asm/setup.h>
25#include <asm/mach-types.h>
26#include <asm/page.h>
27
28//#include <asm/arch.h>
29//#include <asm/mach/irq.h>
30
31/*
32 * Usage:
33 * - do not go blindly adding fields, add them at the end
34 * - when adding fields, don't rely on the address until
35 * a patch from me has been released
36 * - unused fields should be zero (for future expansion)
37 * - this structure is relatively short-lived - only
38 * guaranteed to contain useful data in setup_arch()
39 *
40 * This is the old deprecated way to pass parameters to the kernel
41 */
42struct param_struct {
43 union {
44 struct {
45 unsigned long page_size; /* 0 */
46 unsigned long nr_pages; /* 4 */
47 unsigned long ramdisk_size; /* 8 */
48 unsigned long flags; /* 12 */
49#define FLAG_READONLY 1
50#define FLAG_RDLOAD 4
51#define FLAG_RDPROMPT 8
52 unsigned long rootdev; /* 16 */
53 unsigned long video_num_cols; /* 20 */
54 unsigned long video_num_rows; /* 24 */
55 unsigned long video_x; /* 28 */
56 unsigned long video_y; /* 32 */
57 unsigned long memc_control_reg; /* 36 */
58 unsigned char sounddefault; /* 40 */
59 unsigned char adfsdrives; /* 41 */
60 unsigned char bytes_per_char_h; /* 42 */
61 unsigned char bytes_per_char_v; /* 43 */
62 unsigned long pages_in_bank[4]; /* 44 */
63 unsigned long pages_in_vram; /* 60 */
64 unsigned long initrd_start; /* 64 */
65 unsigned long initrd_size; /* 68 */
66 unsigned long rd_start; /* 72 */
67 unsigned long system_rev; /* 76 */
68 unsigned long system_serial_low; /* 80 */
69 unsigned long system_serial_high; /* 84 */
70 unsigned long mem_fclk_21285; /* 88 */
71 } s;
72 char unused[256];
73 } u1;
74 union {
75 char paths[8][128];
76 struct {
77 unsigned long magic;
78 char n[1024 - sizeof(unsigned long)];
79 } s;
80 } u2;
81 char commandline[COMMAND_LINE_SIZE];
82};
83
84static struct tag * __init memtag(struct tag *tag, unsigned long start, unsigned long size)
85{
86 tag = tag_next(tag);
87 tag->hdr.tag = ATAG_MEM;
88 tag->hdr.size = tag_size(tag_mem32);
89 tag->u.mem.size = size;
90 tag->u.mem.start = start;
91
92 return tag;
93}
94
95static void __init build_tag_list(struct param_struct *params, void *taglist)
96{
97 struct tag *tag = taglist;
98
99 if (params->u1.s.page_size != PAGE_SIZE) {
100 printk(KERN_WARNING "Warning: bad configuration page, "
101 "trying to continue\n");
102 return;
103 }
104
105 printk(KERN_DEBUG "Converting old-style param struct to taglist\n");
106
107 tag->hdr.tag = ATAG_CORE;
108 tag->hdr.size = tag_size(tag_core);
109 tag->u.core.flags = params->u1.s.flags & FLAG_READONLY;
110 tag->u.core.pagesize = params->u1.s.page_size;
111 tag->u.core.rootdev = params->u1.s.rootdev;
112
113 tag = tag_next(tag);
114 tag->hdr.tag = ATAG_RAMDISK;
115 tag->hdr.size = tag_size(tag_ramdisk);
116 tag->u.ramdisk.flags = (params->u1.s.flags & FLAG_RDLOAD ? 1 : 0) |
117 (params->u1.s.flags & FLAG_RDPROMPT ? 2 : 0);
118 tag->u.ramdisk.size = params->u1.s.ramdisk_size;
119 tag->u.ramdisk.start = params->u1.s.rd_start;
120
121 tag = tag_next(tag);
122 tag->hdr.tag = ATAG_INITRD;
123 tag->hdr.size = tag_size(tag_initrd);
124 tag->u.initrd.start = params->u1.s.initrd_start;
125 tag->u.initrd.size = params->u1.s.initrd_size;
126
127 tag = tag_next(tag);
128 tag->hdr.tag = ATAG_SERIAL;
129 tag->hdr.size = tag_size(tag_serialnr);
130 tag->u.serialnr.low = params->u1.s.system_serial_low;
131 tag->u.serialnr.high = params->u1.s.system_serial_high;
132
133 tag = tag_next(tag);
134 tag->hdr.tag = ATAG_REVISION;
135 tag->hdr.size = tag_size(tag_revision);
136 tag->u.revision.rev = params->u1.s.system_rev;
137
138 tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE);
139
140 tag = tag_next(tag);
141 tag->hdr.tag = ATAG_ACORN;
142 tag->hdr.size = tag_size(tag_acorn);
143 tag->u.acorn.memc_control_reg = params->u1.s.memc_control_reg;
144 tag->u.acorn.vram_pages = params->u1.s.pages_in_vram;
145 tag->u.acorn.sounddefault = params->u1.s.sounddefault;
146 tag->u.acorn.adfsdrives = params->u1.s.adfsdrives;
147
148 tag = tag_next(tag);
149 tag->hdr.tag = ATAG_CMDLINE;
150 tag->hdr.size = (strlen(params->commandline) + 3 +
151 sizeof(struct tag_header)) >> 2;
152 strcpy(tag->u.cmdline.cmdline, params->commandline);
153
154 tag = tag_next(tag);
155 tag->hdr.tag = ATAG_NONE;
156 tag->hdr.size = 0;
157
158 memmove(params, taglist, ((int)tag) - ((int)taglist) +
159 sizeof(struct tag_header));
160}
161
162void __init convert_to_tag_list(struct tag *tags)
163{
164 struct param_struct *params = (struct param_struct *)tags;
165 build_tag_list(params, &params->u2);
166}
167
168void __init squash_mem_tags(struct tag *tag)
169{
170 for (; tag->hdr.size; tag = tag_next(tag))
171 if (tag->hdr.tag == ATAG_MEM)
172 tag->hdr.tag = ATAG_NONE;
173}
diff --git a/arch/arm26/kernel/dma.c b/arch/arm26/kernel/dma.c
deleted file mode 100644
index 80b5a774d905..000000000000
--- a/arch/arm26/kernel/dma.c
+++ /dev/null
@@ -1,273 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/dma.c
3 *
4 * Copyright (C) 1995-2000 Russell King
5 * 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Front-end to the DMA handling. This handles the allocation/freeing
12 * of DMA channels, and provides a unified interface to the machines
13 * DMA facilities.
14 */
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/mman.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/errno.h>
22
23#include <asm/dma.h>
24
25DEFINE_SPINLOCK(dma_spin_lock);
26
27static dma_t dma_chan[MAX_DMA_CHANNELS];
28
29/*
30 * Get dma list for /proc/dma
31 */
32int get_dma_list(char *buf)
33{
34 dma_t *dma;
35 char *p = buf;
36 int i;
37
38 for (i = 0, dma = dma_chan; i < MAX_DMA_CHANNELS; i++, dma++)
39 if (dma->lock)
40 p += sprintf(p, "%2d: %14s %s\n", i,
41 dma->d_ops->type, dma->device_id);
42
43 return p - buf;
44}
45
46/*
47 * Request DMA channel
48 *
49 * On certain platforms, we have to allocate an interrupt as well...
50 */
51int request_dma(dmach_t channel, const char *device_id)
52{
53 dma_t *dma = dma_chan + channel;
54 int ret;
55
56 if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
57 goto bad_dma;
58
59 if (xchg(&dma->lock, 1) != 0)
60 goto busy;
61
62 dma->device_id = device_id;
63 dma->active = 0;
64 dma->invalid = 1;
65
66 ret = 0;
67 if (dma->d_ops->request)
68 ret = dma->d_ops->request(channel, dma);
69
70 if (ret)
71 xchg(&dma->lock, 0);
72
73 return ret;
74
75bad_dma:
76 printk(KERN_ERR "dma: trying to allocate DMA%d\n", channel);
77 return -EINVAL;
78
79busy:
80 return -EBUSY;
81}
82
83/*
84 * Free DMA channel
85 *
86 * On certain platforms, we have to free interrupt as well...
87 */
88void free_dma(dmach_t channel)
89{
90 dma_t *dma = dma_chan + channel;
91
92 if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
93 goto bad_dma;
94
95 if (dma->active) {
96 printk(KERN_ERR "dma%d: freeing active DMA\n", channel);
97 dma->d_ops->disable(channel, dma);
98 dma->active = 0;
99 }
100
101 if (xchg(&dma->lock, 0) != 0) {
102 if (dma->d_ops->free)
103 dma->d_ops->free(channel, dma);
104 return;
105 }
106
107 printk(KERN_ERR "dma%d: trying to free free DMA\n", channel);
108 return;
109
110bad_dma:
111 printk(KERN_ERR "dma: trying to free DMA%d\n", channel);
112}
113
114/* Set DMA Scatter-Gather list
115 */
116void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg)
117{
118 dma_t *dma = dma_chan + channel;
119
120 if (dma->active)
121 printk(KERN_ERR "dma%d: altering DMA SG while "
122 "DMA active\n", channel);
123
124 dma->sg = sg;
125 dma->sgcount = nr_sg;
126 dma->using_sg = 1;
127 dma->invalid = 1;
128}
129
130/* Set DMA address
131 *
132 * Copy address to the structure, and set the invalid bit
133 */
134void set_dma_addr (dmach_t channel, unsigned long physaddr)
135{
136 dma_t *dma = dma_chan + channel;
137
138 if (dma->active)
139 printk(KERN_ERR "dma%d: altering DMA address while "
140 "DMA active\n", channel);
141
142 dma->sg = &dma->buf;
143 dma->sgcount = 1;
144 dma->buf.__address = (char *)physaddr;//FIXME - not pretty
145 dma->using_sg = 0;
146 dma->invalid = 1;
147}
148
149/* Set DMA byte count
150 *
151 * Copy address to the structure, and set the invalid bit
152 */
153void set_dma_count (dmach_t channel, unsigned long count)
154{
155 dma_t *dma = dma_chan + channel;
156
157 if (dma->active)
158 printk(KERN_ERR "dma%d: altering DMA count while "
159 "DMA active\n", channel);
160
161 dma->sg = &dma->buf;
162 dma->sgcount = 1;
163 dma->buf.length = count;
164 dma->using_sg = 0;
165 dma->invalid = 1;
166}
167
168/* Set DMA direction mode
169 */
170void set_dma_mode (dmach_t channel, dmamode_t mode)
171{
172 dma_t *dma = dma_chan + channel;
173
174 if (dma->active)
175 printk(KERN_ERR "dma%d: altering DMA mode while "
176 "DMA active\n", channel);
177
178 dma->dma_mode = mode;
179 dma->invalid = 1;
180}
181
182/* Enable DMA channel
183 */
184void enable_dma (dmach_t channel)
185{
186 dma_t *dma = dma_chan + channel;
187
188 if (!dma->lock)
189 goto free_dma;
190
191 if (dma->active == 0) {
192 dma->active = 1;
193 dma->d_ops->enable(channel, dma);
194 }
195 return;
196
197free_dma:
198 printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel);
199 BUG();
200}
201
202/* Disable DMA channel
203 */
204void disable_dma (dmach_t channel)
205{
206 dma_t *dma = dma_chan + channel;
207
208 if (!dma->lock)
209 goto free_dma;
210
211 if (dma->active == 1) {
212 dma->active = 0;
213 dma->d_ops->disable(channel, dma);
214 }
215 return;
216
217free_dma:
218 printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel);
219 BUG();
220}
221
222/*
223 * Is the specified DMA channel active?
224 */
225int dma_channel_active(dmach_t channel)
226{
227 return dma_chan[channel].active;
228}
229
230void set_dma_page(dmach_t channel, char pagenr)
231{
232 printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel);
233}
234
235void set_dma_speed(dmach_t channel, int cycle_ns)
236{
237 dma_t *dma = dma_chan + channel;
238 int ret = 0;
239
240 if (dma->d_ops->setspeed)
241 ret = dma->d_ops->setspeed(channel, dma, cycle_ns);
242 dma->speed = ret;
243}
244
245int get_dma_residue(dmach_t channel)
246{
247 dma_t *dma = dma_chan + channel;
248 int ret = 0;
249
250 if (dma->d_ops->residue)
251 ret = dma->d_ops->residue(channel, dma);
252
253 return ret;
254}
255
256void __init init_dma(void)
257{
258 arch_dma_init(dma_chan);
259}
260
261EXPORT_SYMBOL(request_dma);
262EXPORT_SYMBOL(free_dma);
263EXPORT_SYMBOL(enable_dma);
264EXPORT_SYMBOL(disable_dma);
265EXPORT_SYMBOL(set_dma_addr);
266EXPORT_SYMBOL(set_dma_count);
267EXPORT_SYMBOL(set_dma_mode);
268EXPORT_SYMBOL(set_dma_page);
269EXPORT_SYMBOL(get_dma_residue);
270EXPORT_SYMBOL(set_dma_sg);
271EXPORT_SYMBOL(set_dma_speed);
272
273EXPORT_SYMBOL(dma_spin_lock);
diff --git a/arch/arm26/kernel/ecard.c b/arch/arm26/kernel/ecard.c
deleted file mode 100644
index e2bcefc91cc3..000000000000
--- a/arch/arm26/kernel/ecard.c
+++ /dev/null
@@ -1,847 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/ecard.c
3 *
4 * Copyright 1995-2001 Russell King
5 * Copyright 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Find all installed expansion cards, and handle interrupts from them.
12 *
13 * Created from information from Acorns RiscOS3 PRMs
14 * 15-Jun-2003 IM Modified from ARM32 (RiscPC capable) version
15 * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment.
16 * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work.
17 * 12-Sep-1997 RMK Created new handling of interrupt enables/disables
18 * - cards can now register their own routine to control
19 * interrupts (recommended).
20 * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled
21 * on reset from Linux. (Caused cards not to respond
22 * under RiscOS without hard reset).
23 *
24 */
25#define ECARD_C
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/types.h>
30#include <linux/sched.h>
31#include <linux/interrupt.h>
32#include <linux/reboot.h>
33#include <linux/mm.h>
34#include <linux/slab.h>
35#include <linux/proc_fs.h>
36#include <linux/device.h>
37#include <linux/init.h>
38
39#include <asm/dma.h>
40#include <asm/ecard.h>
41#include <asm/hardware.h>
42#include <asm/io.h>
43#include <asm/irq.h>
44#include <asm/mmu_context.h>
45#include <asm/irqchip.h>
46#include <asm/tlbflush.h>
47
48enum req {
49 req_readbytes,
50 req_reset
51};
52
53struct ecard_request {
54 enum req req;
55 ecard_t *ec;
56 unsigned int address;
57 unsigned int length;
58 unsigned int use_loader;
59 void *buffer;
60};
61
62struct expcard_blacklist {
63 unsigned short manufacturer;
64 unsigned short product;
65 const char *type;
66};
67
68static ecard_t *cards;
69static ecard_t *slot_to_expcard[MAX_ECARDS];
70static unsigned int ectcr;
71
72/* List of descriptions of cards which don't have an extended
73 * identification, or chunk directories containing a description.
74 */
75static struct expcard_blacklist __initdata blacklist[] = {
76 { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" }
77};
78
79asmlinkage extern int
80ecard_loader_reset(volatile unsigned char *pa, loader_t loader);
81asmlinkage extern int
82ecard_loader_read(int off, volatile unsigned char *pa, loader_t loader);
83
84static const struct ecard_id *
85ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec);
86
87static inline unsigned short
88ecard_getu16(unsigned char *v)
89{
90 return v[0] | v[1] << 8;
91}
92
93static inline signed long
94ecard_gets24(unsigned char *v)
95{
96 return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0);
97}
98
99static inline ecard_t *
100slot_to_ecard(unsigned int slot)
101{
102 return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL;
103}
104
105/* ===================== Expansion card daemon ======================== */
106/*
107 * Since the loader programs on the expansion cards need to be run
108 * in a specific environment, create a separate task with this
109 * environment up, and pass requests to this task as and when we
110 * need to.
111 *
112 * This should allow 99% of loaders to be called from Linux.
113 *
114 * From a security standpoint, we trust the card vendors. This
115 * may be a misplaced trust.
116 */
117#define BUS_ADDR(x) ((((unsigned long)(x)) << 2) + IO_BASE)
118#define POD_INT_ADDR(x) ((volatile unsigned char *)\
119 ((BUS_ADDR((x)) - IO_BASE) + IO_START))
120
121static inline void ecard_task_reset(struct ecard_request *req)
122{
123 struct expansion_card *ec = req->ec;
124 if (ec->loader)
125 ecard_loader_reset(POD_INT_ADDR(ec->podaddr), ec->loader);
126}
127
128static void
129ecard_task_readbytes(struct ecard_request *req)
130{
131 unsigned char *buf = (unsigned char *)req->buffer;
132 volatile unsigned char *base_addr =
133 (volatile unsigned char *)POD_INT_ADDR(req->ec->podaddr);
134 unsigned int len = req->length;
135 unsigned int off = req->address;
136
137 if (!req->use_loader || !req->ec->loader) {
138 off *= 4;
139 while (len--) {
140 *buf++ = base_addr[off];
141 off += 4;
142 }
143 } else {
144 while(len--) {
145 /*
146 * The following is required by some
147 * expansion card loader programs.
148 */
149 *(unsigned long *)0x108 = 0;
150 *buf++ = ecard_loader_read(off++, base_addr,
151 req->ec->loader);
152 }
153 }
154}
155
156static void ecard_do_request(struct ecard_request *req)
157{
158 switch (req->req) {
159 case req_readbytes:
160 ecard_task_readbytes(req);
161 break;
162
163 case req_reset:
164 ecard_task_reset(req);
165 break;
166 }
167}
168
169/*
170 * On 26-bit processors, we don't need the kcardd thread to access the
171 * expansion card loaders. We do it directly.
172 */
173#define ecard_call(req) ecard_do_request(req)
174
175/* ======================= Mid-level card control ===================== */
176
177static void
178ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld)
179{
180 struct ecard_request req;
181
182 req.req = req_readbytes;
183 req.ec = ec;
184 req.address = off;
185 req.length = len;
186 req.use_loader = useld;
187 req.buffer = addr;
188
189 ecard_call(&req);
190}
191
192int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
193{
194 struct ex_chunk_dir excd;
195 int index = 16;
196 int useld = 0;
197
198 if (!ec->cid.cd)
199 return 0;
200
201 while(1) {
202 ecard_readbytes(&excd, ec, index, 8, useld);
203 index += 8;
204 if (c_id(&excd) == 0) {
205 if (!useld && ec->loader) {
206 useld = 1;
207 index = 0;
208 continue;
209 }
210 return 0;
211 }
212 if (c_id(&excd) == 0xf0) { /* link */
213 index = c_start(&excd);
214 continue;
215 }
216 if (c_id(&excd) == 0x80) { /* loader */
217 if (!ec->loader) {
218 ec->loader = kmalloc(c_len(&excd),
219 GFP_KERNEL);
220 if (ec->loader)
221 ecard_readbytes(ec->loader, ec,
222 (int)c_start(&excd),
223 c_len(&excd), useld);
224 else
225 return 0;
226 }
227 continue;
228 }
229 if (c_id(&excd) == id && num-- == 0)
230 break;
231 }
232
233 if (c_id(&excd) & 0x80) {
234 switch (c_id(&excd) & 0x70) {
235 case 0x70:
236 ecard_readbytes((unsigned char *)excd.d.string, ec,
237 (int)c_start(&excd), c_len(&excd),
238 useld);
239 break;
240 case 0x00:
241 break;
242 }
243 }
244 cd->start_offset = c_start(&excd);
245 memcpy(cd->d.string, excd.d.string, 256);
246 return 1;
247}
248
249/* ======================= Interrupt control ============================ */
250
251static void ecard_def_irq_enable(ecard_t *ec, int irqnr)
252{
253}
254
255static void ecard_def_irq_disable(ecard_t *ec, int irqnr)
256{
257}
258
259static int ecard_def_irq_pending(ecard_t *ec)
260{
261 return !ec->irqmask || ec->irqaddr[0] & ec->irqmask;
262}
263
264static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr)
265{
266 panic("ecard_def_fiq_enable called - impossible");
267}
268
269static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr)
270{
271 panic("ecard_def_fiq_disable called - impossible");
272}
273
274static int ecard_def_fiq_pending(ecard_t *ec)
275{
276 return !ec->fiqmask || ec->fiqaddr[0] & ec->fiqmask;
277}
278
279static expansioncard_ops_t ecard_default_ops = {
280 ecard_def_irq_enable,
281 ecard_def_irq_disable,
282 ecard_def_irq_pending,
283 ecard_def_fiq_enable,
284 ecard_def_fiq_disable,
285 ecard_def_fiq_pending
286};
287
288/*
289 * Enable and disable interrupts from expansion cards.
290 * (interrupts are disabled for these functions).
291 *
292 * They are not meant to be called directly, but via enable/disable_irq.
293 */
294static void ecard_irq_unmask(unsigned int irqnr)
295{
296 ecard_t *ec = slot_to_ecard(irqnr - 32);
297
298 if (ec) {
299 if (!ec->ops)
300 ec->ops = &ecard_default_ops;
301
302 if (ec->claimed && ec->ops->irqenable)
303 ec->ops->irqenable(ec, irqnr);
304 else
305 printk(KERN_ERR "ecard: rejecting request to "
306 "enable IRQs for %d\n", irqnr);
307 }
308}
309
310static void ecard_irq_mask(unsigned int irqnr)
311{
312 ecard_t *ec = slot_to_ecard(irqnr - 32);
313
314 if (ec) {
315 if (!ec->ops)
316 ec->ops = &ecard_default_ops;
317
318 if (ec->ops && ec->ops->irqdisable)
319 ec->ops->irqdisable(ec, irqnr);
320 }
321}
322
323static struct irqchip ecard_chip = {
324 .ack = ecard_irq_mask,
325 .mask = ecard_irq_mask,
326 .unmask = ecard_irq_unmask,
327};
328
329void ecard_enablefiq(unsigned int fiqnr)
330{
331 ecard_t *ec = slot_to_ecard(fiqnr);
332
333 if (ec) {
334 if (!ec->ops)
335 ec->ops = &ecard_default_ops;
336
337 if (ec->claimed && ec->ops->fiqenable)
338 ec->ops->fiqenable(ec, fiqnr);
339 else
340 printk(KERN_ERR "ecard: rejecting request to "
341 "enable FIQs for %d\n", fiqnr);
342 }
343}
344
345void ecard_disablefiq(unsigned int fiqnr)
346{
347 ecard_t *ec = slot_to_ecard(fiqnr);
348
349 if (ec) {
350 if (!ec->ops)
351 ec->ops = &ecard_default_ops;
352
353 if (ec->ops->fiqdisable)
354 ec->ops->fiqdisable(ec, fiqnr);
355 }
356}
357
358static void
359ecard_dump_irq_state(ecard_t *ec)
360{
361 printk(" %d: %sclaimed, ",
362 ec->slot_no,
363 ec->claimed ? "" : "not ");
364
365 if (ec->ops && ec->ops->irqpending &&
366 ec->ops != &ecard_default_ops)
367 printk("irq %spending\n",
368 ec->ops->irqpending(ec) ? "" : "not ");
369 else
370 printk("irqaddr %p, mask = %02X, status = %02X\n",
371 ec->irqaddr, ec->irqmask, *ec->irqaddr);
372}
373
374static void ecard_check_lockup(struct irqdesc *desc)
375{
376 static int last, lockup;
377 ecard_t *ec;
378
379 /*
380 * If the timer interrupt has not run since the last million
381 * unrecognised expansion card interrupts, then there is
382 * something seriously wrong. Disable the expansion card
383 * interrupts so at least we can continue.
384 *
385 * Maybe we ought to start a timer to re-enable them some time
386 * later?
387 */
388 if (last == jiffies) {
389 lockup += 1;
390 if (lockup > 1000000) {
391 printk(KERN_ERR "\nInterrupt lockup detected - "
392 "disabling all expansion card interrupts\n");
393
394 desc->chip->mask(IRQ_EXPANSIONCARD);
395
396 printk("Expansion card IRQ state:\n");
397
398 for (ec = cards; ec; ec = ec->next)
399 ecard_dump_irq_state(ec);
400 }
401 } else
402 lockup = 0;
403
404 /*
405 * If we did not recognise the source of this interrupt,
406 * warn the user, but don't flood the user with these messages.
407 */
408 if (!last || time_after(jiffies, (unsigned long)(last + 5*HZ))) {
409 last = jiffies;
410 printk(KERN_WARNING "Unrecognised interrupt from backplane\n");
411 }
412}
413
414static void
415ecard_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
416{
417 ecard_t *ec;
418 int called = 0;
419
420 desc->chip->mask(irq);
421 for (ec = cards; ec; ec = ec->next) {
422 int pending;
423
424 if (!ec->claimed || ec->irq == NO_IRQ)
425 continue;
426
427 if (ec->ops && ec->ops->irqpending)
428 pending = ec->ops->irqpending(ec);
429 else
430 pending = ecard_default_ops.irqpending(ec);
431
432 if (pending) {
433 struct irqdesc *d = irq_desc + ec->irq;
434 d->handle(ec->irq, d, regs);
435 called ++;
436 }
437 }
438 desc->chip->unmask(irq);
439
440 if (called == 0)
441 ecard_check_lockup(desc);
442}
443
444#define ecard_irqexp_handler NULL
445#define ecard_probeirqhw() (0)
446
447unsigned int ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
448{
449 unsigned long address = 0;
450 int slot = ec->slot_no;
451
452 ectcr &= ~(1 << slot);
453
454 switch (type) {
455 case ECARD_MEMC:
456 address = IO_EC_MEMC_BASE + (slot << 12);
457 break;
458
459 case ECARD_IOC:
460 address = IO_EC_IOC_BASE + (slot << 12) + (speed << 17);
461 break;
462
463 default:
464 break;
465 }
466
467 return address;
468}
469
470static int ecard_prints(char *buffer, ecard_t *ec)
471{
472 char *start = buffer;
473
474 buffer += sprintf(buffer, " %d: ", ec->slot_no);
475
476 if (ec->cid.id == 0) {
477 struct in_chunk_dir incd;
478
479 buffer += sprintf(buffer, "[%04X:%04X] ",
480 ec->cid.manufacturer, ec->cid.product);
481
482 if (!ec->card_desc && ec->cid.cd &&
483 ecard_readchunk(&incd, ec, 0xf5, 0)) {
484 ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL);
485
486 if (ec->card_desc)
487 strcpy((char *)ec->card_desc, incd.d.string);
488 }
489
490 buffer += sprintf(buffer, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*");
491 } else
492 buffer += sprintf(buffer, "Simple card %d\n", ec->cid.id);
493
494 return buffer - start;
495}
496
497static int get_ecard_dev_info(char *buf, char **start, off_t pos, int count)
498{
499 ecard_t *ec = cards;
500 off_t at = 0;
501 int len, cnt;
502
503 cnt = 0;
504 while (ec && count > cnt) {
505 len = ecard_prints(buf, ec);
506 at += len;
507 if (at >= pos) {
508 if (!*start) {
509 *start = buf + (pos - (at - len));
510 cnt = at - pos;
511 } else
512 cnt += len;
513 buf += len;
514 }
515 ec = ec->next;
516 }
517 return (count > cnt) ? cnt : count;
518}
519
520static struct proc_dir_entry *proc_bus_ecard_dir = NULL;
521
522static void ecard_proc_init(void)
523{
524 proc_bus_ecard_dir = proc_mkdir("ecard", proc_bus);
525 create_proc_info_entry("devices", 0, proc_bus_ecard_dir,
526 get_ecard_dev_info);
527}
528
529#define ec_set_resource(ec,nr,st,sz,flg) \
530 do { \
531 (ec)->resource[nr].name = ec->dev.bus_id; \
532 (ec)->resource[nr].start = st; \
533 (ec)->resource[nr].end = (st) + (sz) - 1; \
534 (ec)->resource[nr].flags = flg; \
535 } while (0)
536
537static void __init ecard_init_resources(struct expansion_card *ec)
538{
539 unsigned long base = PODSLOT_IOC0_BASE;
540 unsigned int slot = ec->slot_no;
541 int i;
542
543 ec_set_resource(ec, ECARD_RES_MEMC,
544 PODSLOT_MEMC_BASE + (slot << 14),
545 PODSLOT_MEMC_SIZE, IORESOURCE_MEM);
546
547 for (i = 0; i < ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++) {
548 ec_set_resource(ec, i + ECARD_RES_IOCSLOW,
549 base + (slot << 14) + (i << 19),
550 PODSLOT_IOC_SIZE, IORESOURCE_MEM);
551 }
552
553 for (i = 0; i < ECARD_NUM_RESOURCES; i++) {
554 if (ec->resource[i].start &&
555 request_resource(&iomem_resource, &ec->resource[i])) {
556 printk(KERN_ERR "%s: resource(s) not available\n",
557 ec->dev.bus_id);
558 ec->resource[i].end -= ec->resource[i].start;
559 ec->resource[i].start = 0;
560 }
561 }
562}
563
564static ssize_t ecard_show_irq(struct device *dev, struct device_attribute *attr, char *buf)
565{
566 struct expansion_card *ec = ECARD_DEV(dev);
567 return sprintf(buf, "%u\n", ec->irq);
568}
569
570static ssize_t ecard_show_vendor(struct device *dev, struct device_attribute *attr, char *buf)
571{
572 struct expansion_card *ec = ECARD_DEV(dev);
573 return sprintf(buf, "%u\n", ec->cid.manufacturer);
574}
575
576static ssize_t ecard_show_device(struct device *dev, struct device_attribute *attr, char *buf)
577{
578 struct expansion_card *ec = ECARD_DEV(dev);
579 return sprintf(buf, "%u\n", ec->cid.product);
580}
581
582static ssize_t ecard_show_dma(struct device *dev, struct device_attribute *attr, char *buf)
583{
584 struct expansion_card *ec = ECARD_DEV(dev);
585 return sprintf(buf, "%u\n", ec->dma);
586}
587
588static ssize_t ecard_show_resources(struct device *dev, struct device_attribute *attr, char *buf)
589{
590 struct expansion_card *ec = ECARD_DEV(dev);
591 char *str = buf;
592 int i;
593
594 for (i = 0; i < ECARD_NUM_RESOURCES; i++)
595 str += sprintf(str, "%08lx %08lx %08lx\n",
596 ec->resource[i].start,
597 ec->resource[i].end,
598 ec->resource[i].flags);
599
600 return str - buf;
601}
602
603static DEVICE_ATTR(irq, S_IRUGO, ecard_show_irq, NULL);
604static DEVICE_ATTR(vendor, S_IRUGO, ecard_show_vendor, NULL);
605static DEVICE_ATTR(device, S_IRUGO, ecard_show_device, NULL);
606static DEVICE_ATTR(dma, S_IRUGO, ecard_show_dma, NULL);
607static DEVICE_ATTR(resource, S_IRUGO, ecard_show_resources, NULL);
608
609/*
610 * Probe for an expansion card.
611 *
612 * If bit 1 of the first byte of the card is set, then the
613 * card does not exist.
614 */
615static int __init
616ecard_probe(int slot, card_type_t type)
617{
618 ecard_t **ecp;
619 ecard_t *ec;
620 struct ex_ecid cid;
621 int i, rc = -ENOMEM;
622
623 ec = kzalloc(sizeof(ecard_t), GFP_KERNEL);
624 if (!ec)
625 goto nomem;
626
627 ec->slot_no = slot;
628 ec->type = type;
629 ec->irq = NO_IRQ;
630 ec->fiq = NO_IRQ;
631 ec->dma = NO_DMA;
632 ec->card_desc = NULL;
633 ec->ops = &ecard_default_ops;
634
635 rc = -ENODEV;
636 if ((ec->podaddr = ecard_address(ec, type, ECARD_SYNC)) == 0)
637 goto nodev;
638
639 cid.r_zero = 1;
640 ecard_readbytes(&cid, ec, 0, 16, 0);
641 if (cid.r_zero)
642 goto nodev;
643
644 ec->cid.id = cid.r_id;
645 ec->cid.cd = cid.r_cd;
646 ec->cid.is = cid.r_is;
647 ec->cid.w = cid.r_w;
648 ec->cid.manufacturer = ecard_getu16(cid.r_manu);
649 ec->cid.product = ecard_getu16(cid.r_prod);
650 ec->cid.country = cid.r_country;
651 ec->cid.irqmask = cid.r_irqmask;
652 ec->cid.irqoff = ecard_gets24(cid.r_irqoff);
653 ec->cid.fiqmask = cid.r_fiqmask;
654 ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff);
655 ec->fiqaddr =
656 ec->irqaddr = (unsigned char *)ioaddr(ec->podaddr);
657
658 if (ec->cid.is) {
659 ec->irqmask = ec->cid.irqmask;
660 ec->irqaddr += ec->cid.irqoff;
661 ec->fiqmask = ec->cid.fiqmask;
662 ec->fiqaddr += ec->cid.fiqoff;
663 } else {
664 ec->irqmask = 1;
665 ec->fiqmask = 4;
666 }
667
668 for (i = 0; i < ARRAY_SIZE(blacklist); i++)
669 if (blacklist[i].manufacturer == ec->cid.manufacturer &&
670 blacklist[i].product == ec->cid.product) {
671 ec->card_desc = blacklist[i].type;
672 break;
673 }
674
675 snprintf(ec->dev.bus_id, sizeof(ec->dev.bus_id), "ecard%d", slot);
676 ec->dev.parent = NULL;
677 ec->dev.bus = &ecard_bus_type;
678 ec->dev.dma_mask = &ec->dma_mask;
679 ec->dma_mask = (u64)0xffffffff;
680
681 ecard_init_resources(ec);
682
683 /*
684 * hook the interrupt handlers
685 */
686 ec->irq = 32 + slot;
687 set_irq_chip(ec->irq, &ecard_chip);
688 set_irq_handler(ec->irq, do_level_IRQ);
689 set_irq_flags(ec->irq, IRQF_VALID);
690
691 for (ecp = &cards; *ecp; ecp = &(*ecp)->next);
692
693 *ecp = ec;
694 slot_to_expcard[slot] = ec;
695
696 device_register(&ec->dev);
697 device_create_file(&ec->dev, &dev_attr_dma);
698 device_create_file(&ec->dev, &dev_attr_irq);
699 device_create_file(&ec->dev, &dev_attr_resource);
700 device_create_file(&ec->dev, &dev_attr_vendor);
701 device_create_file(&ec->dev, &dev_attr_device);
702
703 return 0;
704
705nodev:
706 kfree(ec);
707nomem:
708 return rc;
709}
710
711/*
712 * Initialise the expansion card system.
713 * Locate all hardware - interrupt management and
714 * actual cards.
715 */
716static int __init ecard_init(void)
717{
718 int slot, irqhw;
719
720 printk("Probing expansion cards\n");
721
722 for (slot = 0; slot < MAX_ECARDS; slot ++) {
723 ecard_probe(slot, ECARD_IOC);
724 }
725
726 irqhw = ecard_probeirqhw();
727
728 set_irq_chained_handler(IRQ_EXPANSIONCARD,
729 irqhw ? ecard_irqexp_handler : ecard_irq_handler);
730
731 ecard_proc_init();
732
733 return 0;
734}
735
736subsys_initcall(ecard_init);
737
738/*
739 * ECARD "bus"
740 */
741static const struct ecard_id *
742ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec)
743{
744 int i;
745
746 for (i = 0; ids[i].manufacturer != 65535; i++)
747 if (ec->cid.manufacturer == ids[i].manufacturer &&
748 ec->cid.product == ids[i].product)
749 return ids + i;
750
751 return NULL;
752}
753
754static int ecard_drv_probe(struct device *dev)
755{
756 struct expansion_card *ec = ECARD_DEV(dev);
757 struct ecard_driver *drv = ECARD_DRV(dev->driver);
758 const struct ecard_id *id;
759 int ret;
760
761 id = ecard_match_device(drv->id_table, ec);
762
763 ecard_claim(ec);
764 ret = drv->probe(ec, id);
765 if (ret)
766 ecard_release(ec);
767 return ret;
768}
769
770static int ecard_drv_remove(struct device *dev)
771{
772 struct expansion_card *ec = ECARD_DEV(dev);
773 struct ecard_driver *drv = ECARD_DRV(dev->driver);
774
775 drv->remove(ec);
776 ecard_release(ec);
777
778 return 0;
779}
780
781/*
782 * Before rebooting, we must make sure that the expansion card is in a
783 * sensible state, so it can be re-detected. This means that the first
784 * page of the ROM must be visible. We call the expansion cards reset
785 * handler, if any.
786 */
787static void ecard_drv_shutdown(struct device *dev)
788{
789 struct expansion_card *ec = ECARD_DEV(dev);
790 struct ecard_driver *drv = ECARD_DRV(dev->driver);
791 struct ecard_request req;
792
793 if (drv->shutdown)
794 drv->shutdown(ec);
795 ecard_release(ec);
796 req.req = req_reset;
797 req.ec = ec;
798 ecard_call(&req);
799}
800
801int ecard_register_driver(struct ecard_driver *drv)
802{
803 drv->drv.bus = &ecard_bus_type;
804 drv->drv.probe = ecard_drv_probe;
805 drv->drv.remove = ecard_drv_remove;
806 drv->drv.shutdown = ecard_drv_shutdown;
807
808 return driver_register(&drv->drv);
809}
810
811void ecard_remove_driver(struct ecard_driver *drv)
812{
813 driver_unregister(&drv->drv);
814}
815
816static int ecard_match(struct device *_dev, struct device_driver *_drv)
817{
818 struct expansion_card *ec = ECARD_DEV(_dev);
819 struct ecard_driver *drv = ECARD_DRV(_drv);
820 int ret;
821
822 if (drv->id_table) {
823 ret = ecard_match_device(drv->id_table, ec) != NULL;
824 } else {
825 ret = ec->cid.id == drv->id;
826 }
827
828 return ret;
829}
830
831struct bus_type ecard_bus_type = {
832 .name = "ecard",
833 .match = ecard_match,
834};
835
836static int ecard_bus_init(void)
837{
838 return bus_register(&ecard_bus_type);
839}
840
841postcore_initcall(ecard_bus_init);
842
843EXPORT_SYMBOL(ecard_readchunk);
844EXPORT_SYMBOL(ecard_address);
845EXPORT_SYMBOL(ecard_register_driver);
846EXPORT_SYMBOL(ecard_remove_driver);
847EXPORT_SYMBOL(ecard_bus_type);
diff --git a/arch/arm26/kernel/entry.S b/arch/arm26/kernel/entry.S
deleted file mode 100644
index 7ffcc6e4770e..000000000000
--- a/arch/arm26/kernel/entry.S
+++ /dev/null
@@ -1,951 +0,0 @@
1/* arch/arm26/kernel/entry.S
2 *
3 * Assembled from chunks of code in arch/arm
4 *
5 * Copyright (C) 2003 Ian Molton
6 * Based on the work of RMK.
7 *
8 */
9
10#include <linux/linkage.h>
11
12#include <asm/assembler.h>
13#include <asm/asm-offsets.h>
14#include <asm/errno.h>
15#include <asm/hardware.h>
16#include <asm/sysirq.h>
17#include <asm/thread_info.h>
18#include <asm/page.h>
19#include <asm/ptrace.h>
20
21 .macro zero_fp
22#ifndef CONFIG_NO_FRAME_POINTER
23 mov fp, #0
24#endif
25 .endm
26
27 .text
28
29@ Bad Abort numbers
30@ -----------------
31@
32#define BAD_PREFETCH 0
33#define BAD_DATA 1
34#define BAD_ADDREXCPTN 2
35#define BAD_IRQ 3
36#define BAD_UNDEFINSTR 4
37
38@ OS version number used in SWIs
39@ RISC OS is 0
40@ RISC iX is 8
41@
42#define OS_NUMBER 9
43#define ARMSWI_OFFSET 0x000f0000
44
45@
46@ Stack format (ensured by USER_* and SVC_*)
47@ PSR and PC are comined on arm26
48@
49
50#define S_OFF 8
51
52#define S_OLD_R0 64
53#define S_PC 60
54#define S_LR 56
55#define S_SP 52
56#define S_IP 48
57#define S_FP 44
58#define S_R10 40
59#define S_R9 36
60#define S_R8 32
61#define S_R7 28
62#define S_R6 24
63#define S_R5 20
64#define S_R4 16
65#define S_R3 12
66#define S_R2 8
67#define S_R1 4
68#define S_R0 0
69
70 .macro save_user_regs
71 str r0, [sp, #-4]! @ Store SVC r0
72 str lr, [sp, #-4]! @ Store user mode PC
73 sub sp, sp, #15*4
74 stmia sp, {r0 - lr}^ @ Store the other user-mode regs
75 mov r0, r0
76 .endm
77
78 .macro slow_restore_user_regs
79 ldmia sp, {r0 - lr}^ @ restore the user regs not including PC
80 mov r0, r0
81 ldr lr, [sp, #15*4] @ get user PC
82 add sp, sp, #15*4+8 @ free stack
83 movs pc, lr @ return
84 .endm
85
86 .macro fast_restore_user_regs
87 add sp, sp, #S_OFF
88 ldmib sp, {r1 - lr}^
89 mov r0, r0
90 ldr lr, [sp, #15*4]
91 add sp, sp, #15*4+8
92 movs pc, lr
93 .endm
94
95 .macro save_svc_regs
96 str sp, [sp, #-16]!
97 str lr, [sp, #8]
98 str lr, [sp, #4]
99 stmfd sp!, {r0 - r12}
100 mov r0, #-1
101 str r0, [sp, #S_OLD_R0]
102 zero_fp
103 .endm
104
105 .macro save_svc_regs_irq
106 str sp, [sp, #-16]!
107 str lr, [sp, #4]
108 ldr lr, .LCirq
109 ldr lr, [lr]
110 str lr, [sp, #8]
111 stmfd sp!, {r0 - r12}
112 mov r0, #-1
113 str r0, [sp, #S_OLD_R0]
114 zero_fp
115 .endm
116
117 .macro restore_svc_regs
118 ldmfd sp, {r0 - pc}^
119 .endm
120
121 .macro mask_pc, rd, rm
122 bic \rd, \rm, #PCMASK
123 .endm
124
125 .macro disable_irqs, temp
126 mov \temp, pc
127 orr \temp, \temp, #PSR_I_BIT
128 teqp \temp, #0
129 .endm
130
131 .macro enable_irqs, temp
132 mov \temp, pc
133 and \temp, \temp, #~PSR_I_BIT
134 teqp \temp, #0
135 .endm
136
137 .macro initialise_traps_extra
138 .endm
139
140 .macro get_thread_info, rd
141 mov \rd, sp, lsr #13
142 mov \rd, \rd, lsl #13
143 .endm
144
145/*
146 * These are the registers used in the syscall handler, and allow us to
147 * have in theory up to 7 arguments to a function - r0 to r6.
148 *
149 * Note that tbl == why is intentional.
150 *
151 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
152 */
153scno .req r7 @ syscall number
154tbl .req r8 @ syscall table pointer
155why .req r8 @ Linux syscall (!= 0)
156tsk .req r9 @ current thread_info
157
158/*
159 * Get the system call number.
160 */
161 .macro get_scno
162 mask_pc lr, lr
163 ldr scno, [lr, #-4] @ get SWI instruction
164 .endm
165/*
166 * -----------------------------------------------------------------------
167 */
168
169/*
170 * We rely on the fact that R0 is at the bottom of the stack (due to
171 * slow/fast restore user regs).
172 */
173#if S_R0 != 0
174#error "Please fix"
175#endif
176
177/*
178 * This is the fast syscall return path. We do as little as
179 * possible here, and this includes saving r0 back into the SVC
180 * stack.
181 */
182ret_fast_syscall:
183 disable_irqs r1 @ disable interrupts
184 ldr r1, [tsk, #TI_FLAGS]
185 tst r1, #_TIF_WORK_MASK
186 bne fast_work_pending
187 fast_restore_user_regs
188
189/*
190 * Ok, we need to do extra processing, enter the slow path.
191 */
192fast_work_pending:
193 str r0, [sp, #S_R0+S_OFF]! @ returned r0
194work_pending:
195 tst r1, #_TIF_NEED_RESCHED
196 bne work_resched
197 tst r1, #_TIF_SIGPENDING
198 beq no_work_pending
199 mov r0, sp @ 'regs'
200 mov r2, why @ 'syscall'
201 bl do_notify_resume
202 disable_irqs r1 @ disable interrupts
203 b no_work_pending
204
205work_resched:
206 bl schedule
207/*
208 * "slow" syscall return path. "why" tells us if this was a real syscall.
209 */
210ENTRY(ret_to_user)
211ret_slow_syscall:
212 disable_irqs r1 @ disable interrupts
213 ldr r1, [tsk, #TI_FLAGS]
214 tst r1, #_TIF_WORK_MASK
215 bne work_pending
216no_work_pending:
217 slow_restore_user_regs
218
219/*
220 * This is how we return from a fork.
221 */
222ENTRY(ret_from_fork)
223 bl schedule_tail
224 get_thread_info tsk
225 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
226 mov why, #1
227 tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
228 beq ret_slow_syscall
229 mov r1, sp
230 mov r0, #1 @ trace exit [IP = 1]
231 bl syscall_trace
232 b ret_slow_syscall
233
234// FIXME - is this strictly necessary?
235#include "calls.S"
236
237/*=============================================================================
238 * SWI handler
239 *-----------------------------------------------------------------------------
240 */
241
242 .align 5
243ENTRY(vector_swi)
244 save_user_regs
245 zero_fp
246 get_scno
247
248 enable_irqs ip
249
250 str r4, [sp, #-S_OFF]! @ push fifth arg
251
252 get_thread_info tsk
253 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
254 bic scno, scno, #0xff000000 @ mask off SWI op-code
255 eor scno, scno, #OS_NUMBER << 20 @ check OS number
256 adr tbl, sys_call_table @ load syscall table pointer
257 tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
258 bne __sys_trace
259
260 adral lr, ret_fast_syscall @ set return address
261 orral lr, lr, #PSR_I_BIT | MODE_SVC26 @ Force SVC mode on return
262 cmp scno, #NR_syscalls @ check upper syscall limit
263 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
264
265 add r1, sp, #S_OFF
2662: mov why, #0 @ no longer a real syscall
267 cmp scno, #ARMSWI_OFFSET
268 eor r0, scno, #OS_NUMBER << 20 @ put OS number back
269 bcs arm_syscall
270 b sys_ni_syscall @ not private func
271
272 /*
273 * This is the really slow path. We're going to be doing
274 * context switches, and waiting for our parent to respond.
275 */
276__sys_trace:
277 add r1, sp, #S_OFF
278 mov r0, #0 @ trace entry [IP = 0]
279 bl syscall_trace
280
281 adral lr, __sys_trace_return @ set return address
282 orral lr, lr, #PSR_I_BIT | MODE_SVC26 @ Force SVC mode on return
283 add r1, sp, #S_R0 + S_OFF @ pointer to regs
284 cmp scno, #NR_syscalls @ check upper syscall limit
285 ldmccia r1, {r0 - r3} @ have to reload r0 - r3
286 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
287 b 2b
288
289__sys_trace_return:
290 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
291 mov r1, sp
292 mov r0, #1 @ trace exit [IP = 1]
293 bl syscall_trace
294 b ret_slow_syscall
295
296 .align 5
297
298 .type sys_call_table, #object
299ENTRY(sys_call_table)
300#include "calls.S"
301
302/*============================================================================
303 * Special system call wrappers
304 */
305@ r0 = syscall number
306@ r5 = syscall table
307 .type sys_syscall, #function
308sys_syscall:
309 eor scno, r0, #OS_NUMBER << 20
310 cmp scno, #NR_syscalls @ check range
311 stmleia sp, {r5, r6} @ shuffle args
312 movle r0, r1
313 movle r1, r2
314 movle r2, r3
315 movle r3, r4
316 ldrle pc, [tbl, scno, lsl #2]
317 b sys_ni_syscall
318
319sys_fork_wrapper:
320 add r0, sp, #S_OFF
321 b sys_fork
322
323sys_vfork_wrapper:
324 add r0, sp, #S_OFF
325 b sys_vfork
326
327sys_execve_wrapper:
328 add r3, sp, #S_OFF
329 b sys_execve
330
331sys_clone_wapper:
332 add r2, sp, #S_OFF
333 b sys_clone
334
335sys_sigsuspend_wrapper:
336 add r3, sp, #S_OFF
337 b sys_sigsuspend
338
339sys_rt_sigsuspend_wrapper:
340 add r2, sp, #S_OFF
341 b sys_rt_sigsuspend
342
343sys_sigreturn_wrapper:
344 add r0, sp, #S_OFF
345 b sys_sigreturn
346
347sys_rt_sigreturn_wrapper:
348 add r0, sp, #S_OFF
349 b sys_rt_sigreturn
350
351sys_sigaltstack_wrapper:
352 ldr r2, [sp, #S_OFF + S_SP]
353 b do_sigaltstack
354
355/*
356 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
357 * offset, we return EINVAL. FIXME - this lost some stuff from arm32 to
358 * ifdefs. check it out.
359 */
360sys_mmap2:
361 tst r5, #((1 << (PAGE_SHIFT - 12)) - 1)
362 moveq r5, r5, lsr #PAGE_SHIFT - 12
363 streq r5, [sp, #4]
364 beq do_mmap2
365 mov r0, #-EINVAL
366 RETINSTR(mov,pc, lr)
367
368/*
369 * Design issues:
370 * - We have several modes that each vector can be called from,
371 * each with its own set of registers. On entry to any vector,
372 * we *must* save the registers used in *that* mode.
373 *
374 * - This code must be as fast as possible.
375 *
376 * There are a few restrictions on the vectors:
377 * - the SWI vector cannot be called from *any* non-user mode
378 *
379 * - the FP emulator is *never* called from *any* non-user mode undefined
380 * instruction.
381 *
382 */
383
384 .text
385
386 .macro handle_irq
3871: mov r4, #IOC_BASE
388 ldrb r6, [r4, #0x24] @ get high priority first
389 adr r5, irq_prio_h
390 teq r6, #0
391 ldreqb r6, [r4, #0x14] @ get low priority
392 adreq r5, irq_prio_l
393
394 teq r6, #0 @ If an IRQ happened...
395 ldrneb r0, [r5, r6] @ get IRQ number
396 movne r1, sp @ get struct pt_regs
397 adrne lr, 1b @ Set return address to 1b
398 orrne lr, lr, #PSR_I_BIT | MODE_SVC26 @ (and force SVC mode)
399 bne asm_do_IRQ @ process IRQ (if asserted)
400 .endm
401
402
403/*
404 * Interrupt table (incorporates priority)
405 */
406 .macro irq_prio_table
407irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
408 .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
409 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
410 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
411 .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
412 .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
413 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
414 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
415 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
416 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
417 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
418 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
419 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
420 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
421 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
422 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
423irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
424 .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
425 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
426 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
427 .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
428 .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
429 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
430 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
431 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
432 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
433 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
434 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
435 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
436 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
437 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
438 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
439 .endm
440
441#if 1
442/*
443 * Uncomment these if you wish to get more debugging into about data aborts.
444 * FIXME - I bet we can find a way to encode these and keep performance.
445 */
446#define FAULT_CODE_LDRSTRPOST 0x80
447#define FAULT_CODE_LDRSTRPRE 0x40
448#define FAULT_CODE_LDRSTRREG 0x20
449#define FAULT_CODE_LDMSTM 0x10
450#define FAULT_CODE_LDCSTC 0x08
451#endif
452#define FAULT_CODE_PREFETCH 0x04
453#define FAULT_CODE_WRITE 0x02
454#define FAULT_CODE_FORCECOW 0x01
455
456/*=============================================================================
457 * Undefined FIQs
458 *-----------------------------------------------------------------------------
459 */
460_unexp_fiq: ldr sp, .LCfiq
461 mov r12, #IOC_BASE
462 strb r12, [r12, #0x38] @ Disable FIQ register
463 teqp pc, #PSR_I_BIT | PSR_F_BIT | MODE_SVC26
464 mov r0, r0
465 stmfd sp!, {r0 - r3, ip, lr}
466 adr r0, Lfiqmsg
467 bl printk
468 ldmfd sp!, {r0 - r3, ip, lr}
469 teqp pc, #PSR_I_BIT | PSR_F_BIT | MODE_FIQ26
470 mov r0, r0
471 movs pc, lr
472
473Lfiqmsg: .ascii "*** Unexpected FIQ\n\0"
474 .align
475
476.LCfiq: .word __temp_fiq
477.LCirq: .word __temp_irq
478
479/*=============================================================================
480 * Undefined instruction handler
481 *-----------------------------------------------------------------------------
482 * Handles floating point instructions
483 */
484vector_undefinstr:
485 tst lr, #MODE_SVC26 @ did we come from a non-user mode?
486 bne __und_svc @ yes - deal with it.
487/* Otherwise, fall through for the user-space (common) case. */
488 save_user_regs
489 zero_fp @ zero frame pointer
490 teqp pc, #PSR_I_BIT | MODE_SVC26 @ disable IRQs
491.Lbug_undef:
492 ldr r4, .LC2
493 ldr pc, [r4] @ Call FP module entry point
494/* FIXME - should we trap for a null pointer here? */
495
496/* The SVC mode case */
497__und_svc: save_svc_regs @ Non-user mode
498 mask_pc r0, lr
499 and r2, lr, #3
500 sub r0, r0, #4
501 mov r1, sp
502 bl do_undefinstr
503 restore_svc_regs
504
505/* We get here if the FP emulator doesnt handle the undef instr.
506 * If the insn WAS handled, the emulator jumps to ret_from_exception by itself/
507 */
508 .globl fpundefinstr
509fpundefinstr:
510 mov r0, lr
511 mov r1, sp
512 teqp pc, #MODE_SVC26
513 bl do_undefinstr
514 b ret_from_exception @ Normal FP exit
515
516#if defined CONFIG_FPE_NWFPE || defined CONFIG_FPE_FASTFPE
517 /* The FPE is always present */
518 .equ fpe_not_present, 0
519#else
520/* We get here if an undefined instruction happens and the floating
521 * point emulator is not present. If the offending instruction was
522 * a WFS, we just perform a normal return as if we had emulated the
523 * operation. This is a hack to allow some basic userland binaries
524 * to run so that the emulator module proper can be loaded. --philb
525 * FIXME - probably a broken useless hack...
526 */
527fpe_not_present:
528 adr r10, wfs_mask_data
529 ldmia r10, {r4, r5, r6, r7, r8}
530 ldr r10, [sp, #S_PC] @ Load PC
531 sub r10, r10, #4
532 mask_pc r10, r10
533 ldrt r10, [r10] @ get instruction
534 and r5, r10, r5
535 teq r5, r4 @ Is it WFS?
536 beq ret_from_exception
537 and r5, r10, r8
538 teq r5, r6 @ Is it LDF/STF on sp or fp?
539 teqne r5, r7
540 bne fpundefinstr
541 tst r10, #0x00200000 @ Does it have WB
542 beq ret_from_exception
543 and r4, r10, #255 @ get offset
544 and r6, r10, #0x000f0000
545 tst r10, #0x00800000 @ +/-
546 ldr r5, [sp, r6, lsr #14] @ Load reg
547 rsbeq r4, r4, #0
548 add r5, r5, r4, lsl #2
549 str r5, [sp, r6, lsr #14] @ Save reg
550 b ret_from_exception
551
552wfs_mask_data: .word 0x0e200110 @ WFS/RFS
553 .word 0x0fef0fff
554 .word 0x0d0d0100 @ LDF [sp]/STF [sp]
555 .word 0x0d0b0100 @ LDF [fp]/STF [fp]
556 .word 0x0f0f0f00
557#endif
558
559.LC2: .word fp_enter
560
561/*=============================================================================
562 * Prefetch abort handler
563 *-----------------------------------------------------------------------------
564 */
565#define DEBUG_UNDEF
566/* remember: lr = USR pc */
567vector_prefetch:
568 sub lr, lr, #4
569 tst lr, #MODE_SVC26
570 bne __pabt_invalid
571 save_user_regs
572 teqp pc, #MODE_SVC26 @ Enable IRQs...
573 mask_pc r0, lr @ Address of abort
574 mov r1, sp @ Tasks registers
575 bl do_PrefetchAbort
576 teq r0, #0 @ If non-zero, we believe this abort..
577 bne ret_from_exception
578#ifdef DEBUG_UNDEF
579 adr r0, t
580 bl printk
581#endif
582 ldr lr, [sp,#S_PC] @ FIXME program to test this on. I think its
583 b .Lbug_undef @ broken at the moment though!)
584
585__pabt_invalid: save_svc_regs
586 mov r0, sp @ Prefetch aborts are definitely *not*
587 mov r1, #BAD_PREFETCH @ allowed in non-user modes. We cant
588 and r2, lr, #3 @ recover from this problem.
589 b bad_mode
590
591#ifdef DEBUG_UNDEF
592t: .ascii "*** undef ***\r\n\0"
593 .align
594#endif
595
596/*=============================================================================
597 * Address exception handler
598 *-----------------------------------------------------------------------------
599 * These aren't too critical.
600 * (they're not supposed to happen).
601 * In order to debug the reason for address exceptions in non-user modes,
602 * we have to obtain all the registers so that we can see what's going on.
603 */
604
605vector_addrexcptn:
606 sub lr, lr, #8
607 tst lr, #3
608 bne Laddrexcptn_not_user
609 save_user_regs
610 teq pc, #MODE_SVC26
611 mask_pc r0, lr @ Point to instruction
612 mov r1, sp @ Point to registers
613 mov r2, #0x400
614 mov lr, pc
615 bl do_excpt
616 b ret_from_exception
617
618Laddrexcptn_not_user:
619 save_svc_regs
620 and r2, lr, #3
621 teq r2, #3
622 bne Laddrexcptn_illegal_mode
623 teqp pc, #MODE_SVC26
624 mask_pc r0, lr
625 mov r1, sp
626 orr r2, r2, #0x400
627 bl do_excpt
628 ldmia sp, {r0 - lr} @ I cant remember the reason I changed this...
629 add sp, sp, #15*4
630 movs pc, lr
631
632Laddrexcptn_illegal_mode:
633 mov r0, sp
634 str lr, [sp, #-4]!
635 orr r1, r2, #PSR_I_BIT | PSR_F_BIT
636 teqp r1, #0 @ change into mode (wont be user mode)
637 mov r0, r0
638 mov r1, r8 @ Any register from r8 - r14 can be banked
639 mov r2, r9
640 mov r3, r10
641 mov r4, r11
642 mov r5, r12
643 mov r6, r13
644 mov r7, r14
645 teqp pc, #PSR_F_BIT | MODE_SVC26 @ back to svc
646 mov r0, r0
647 stmfd sp!, {r1-r7}
648 ldmia r0, {r0-r7}
649 stmfd sp!, {r0-r7}
650 mov r0, sp
651 mov r1, #BAD_ADDREXCPTN
652 b bad_mode
653
654/*=============================================================================
655 * Interrupt (IRQ) handler
656 *-----------------------------------------------------------------------------
657 * Note: if the IRQ was taken whilst in user mode, then *no* kernel routine
658 * is running, so do not have to save svc lr.
659 *
660 * Entered in IRQ mode.
661 */
662
663vector_IRQ: ldr sp, .LCirq @ Setup some temporary stack
664 sub lr, lr, #4
665 str lr, [sp] @ push return address
666
667 tst lr, #3
668 bne __irq_non_usr
669
670__irq_usr: teqp pc, #PSR_I_BIT | MODE_SVC26 @ Enter SVC mode
671 mov r0, r0
672
673 ldr lr, .LCirq
674 ldr lr, [lr] @ Restore lr for jump back to USR
675
676 save_user_regs
677
678 handle_irq
679
680 mov why, #0
681 get_thread_info tsk
682 b ret_to_user
683
684@ Place the IRQ priority table here so that the handle_irq macros above
685@ and below here can access it.
686
687 irq_prio_table
688
689__irq_non_usr: teqp pc, #PSR_I_BIT | MODE_SVC26 @ Enter SVC mode
690 mov r0, r0
691
692 save_svc_regs_irq
693
694 and r2, lr, #3
695 teq r2, #3
696 bne __irq_invalid @ IRQ not from SVC mode
697
698 handle_irq
699
700 restore_svc_regs
701
702__irq_invalid: mov r0, sp
703 mov r1, #BAD_IRQ
704 b bad_mode
705
706/*=============================================================================
707 * Data abort handler code
708 *-----------------------------------------------------------------------------
709 *
710 * This handles both exceptions from user and SVC modes, computes the address
711 * range of the problem, and does any correction that is required. It then
712 * calls the kernel data abort routine.
713 *
714 * This is where I wish that the ARM would tell you which address aborted.
715 */
716
717vector_data: sub lr, lr, #8 @ Correct lr
718 tst lr, #3
719 bne Ldata_not_user
720 save_user_regs
721 teqp pc, #MODE_SVC26
722 mask_pc r0, lr
723 bl Ldata_do
724 b ret_from_exception
725
726Ldata_not_user:
727 save_svc_regs
728 and r2, lr, #3
729 teq r2, #3
730 bne Ldata_illegal_mode
731 tst lr, #PSR_I_BIT
732 teqeqp pc, #MODE_SVC26
733 mask_pc r0, lr
734 bl Ldata_do
735 restore_svc_regs
736
737Ldata_illegal_mode:
738 mov r0, sp
739 mov r1, #BAD_DATA
740 b bad_mode
741
742Ldata_do: mov r3, sp
743 ldr r4, [r0] @ Get instruction
744 mov r2, #0
745 tst r4, #1 << 20 @ Check to see if it is a write instruction
746 orreq r2, r2, #FAULT_CODE_WRITE @ Indicate write instruction
747 mov r1, r4, lsr #22 @ Now branch to the relevent processing routine
748 and r1, r1, #15 << 2
749 add pc, pc, r1
750 movs pc, lr
751 b Ldata_unknown
752 b Ldata_unknown
753 b Ldata_unknown
754 b Ldata_unknown
755 b Ldata_ldrstr_post @ ldr rd, [rn], #m
756 b Ldata_ldrstr_numindex @ ldr rd, [rn, #m] @ RegVal
757 b Ldata_ldrstr_post @ ldr rd, [rn], rm
758 b Ldata_ldrstr_regindex @ ldr rd, [rn, rm]
759 b Ldata_ldmstm @ ldm*a rn, <rlist>
760 b Ldata_ldmstm @ ldm*b rn, <rlist>
761 b Ldata_unknown
762 b Ldata_unknown
763 b Ldata_ldrstr_post @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
764 b Ldata_ldcstc_pre @ ldc rd, [rn, #m]
765 b Ldata_unknown
766Ldata_unknown: @ Part of jumptable
767 mov r0, r1
768 mov r1, r4
769 mov r2, r3
770 b baddataabort
771
772Ldata_ldrstr_post:
773 mov r0, r4, lsr #14 @ Get Rn
774 and r0, r0, #15 << 2 @ Mask out reg.
775 teq r0, #15 << 2
776 ldr r0, [r3, r0] @ Get register
777 biceq r0, r0, #PCMASK
778 mov r1, r0
779#ifdef FAULT_CODE_LDRSTRPOST
780 orr r2, r2, #FAULT_CODE_LDRSTRPOST
781#endif
782 b do_DataAbort
783
784Ldata_ldrstr_numindex:
785 mov r0, r4, lsr #14 @ Get Rn
786 and r0, r0, #15 << 2 @ Mask out reg.
787 teq r0, #15 << 2
788 ldr r0, [r3, r0] @ Get register
789 mov r1, r4, lsl #20
790 biceq r0, r0, #PCMASK
791 tst r4, #1 << 23
792 addne r0, r0, r1, lsr #20
793 subeq r0, r0, r1, lsr #20
794 mov r1, r0
795#ifdef FAULT_CODE_LDRSTRPRE
796 orr r2, r2, #FAULT_CODE_LDRSTRPRE
797#endif
798 b do_DataAbort
799
800Ldata_ldrstr_regindex:
801 mov r0, r4, lsr #14 @ Get Rn
802 and r0, r0, #15 << 2 @ Mask out reg.
803 teq r0, #15 << 2
804 ldr r0, [r3, r0] @ Get register
805 and r7, r4, #15
806 biceq r0, r0, #PCMASK
807 teq r7, #15 @ Check for PC
808 ldr r7, [r3, r7, lsl #2] @ Get Rm
809 and r8, r4, #0x60 @ Get shift types
810 biceq r7, r7, #PCMASK
811 mov r9, r4, lsr #7 @ Get shift amount
812 and r9, r9, #31
813 teq r8, #0
814 moveq r7, r7, lsl r9
815 teq r8, #0x20 @ LSR shift
816 moveq r7, r7, lsr r9
817 teq r8, #0x40 @ ASR shift
818 moveq r7, r7, asr r9
819 teq r8, #0x60 @ ROR shift
820 moveq r7, r7, ror r9
821 tst r4, #1 << 23
822 addne r0, r0, r7
823 subeq r0, r0, r7 @ Apply correction
824 mov r1, r0
825#ifdef FAULT_CODE_LDRSTRREG
826 orr r2, r2, #FAULT_CODE_LDRSTRREG
827#endif
828 b do_DataAbort
829
830Ldata_ldmstm:
831 mov r7, #0x11
832 orr r7, r7, r7, lsl #8
833 and r0, r4, r7
834 and r1, r4, r7, lsl #1
835 add r0, r0, r1, lsr #1
836 and r1, r4, r7, lsl #2
837 add r0, r0, r1, lsr #2
838 and r1, r4, r7, lsl #3
839 add r0, r0, r1, lsr #3
840 add r0, r0, r0, lsr #8
841 add r0, r0, r0, lsr #4
842 and r7, r0, #15 @ r7 = no. of registers to transfer.
843 mov r5, r4, lsr #14 @ Get Rn
844 and r5, r5, #15 << 2
845 ldr r0, [r3, r5] @ Get reg
846 eor r6, r4, r4, lsl #2
847 tst r6, #1 << 23 @ Check inc/dec ^ writeback
848 rsbeq r7, r7, #0
849 add r7, r0, r7, lsl #2 @ Do correction (signed)
850 subne r1, r7, #1
851 subeq r1, r0, #1
852 moveq r0, r7
853 tst r4, #1 << 21 @ Check writeback
854 strne r7, [r3, r5]
855 eor r6, r4, r4, lsl #1
856 tst r6, #1 << 24 @ Check Pre/Post ^ inc/dec
857 addeq r0, r0, #4
858 addeq r1, r1, #4
859 teq r5, #15*4 @ CHECK FOR PC
860 biceq r1, r1, #PCMASK
861 biceq r0, r0, #PCMASK
862#ifdef FAULT_CODE_LDMSTM
863 orr r2, r2, #FAULT_CODE_LDMSTM
864#endif
865 b do_DataAbort
866
867Ldata_ldcstc_pre:
868 mov r0, r4, lsr #14 @ Get Rn
869 and r0, r0, #15 << 2 @ Mask out reg.
870 teq r0, #15 << 2
871 ldr r0, [r3, r0] @ Get register
872 mov r1, r4, lsl #24 @ Get offset
873 biceq r0, r0, #PCMASK
874 tst r4, #1 << 23
875 addne r0, r0, r1, lsr #24
876 subeq r0, r0, r1, lsr #24
877 mov r1, r0
878#ifdef FAULT_CODE_LDCSTC
879 orr r2, r2, #FAULT_CODE_LDCSTC
880#endif
881 b do_DataAbort
882
883
884/*
885 * This is the return code to user mode for abort handlers
886 */
887ENTRY(ret_from_exception)
888 get_thread_info tsk
889 mov why, #0
890 b ret_to_user
891
892 .data
893ENTRY(fp_enter)
894 .word fpe_not_present
895 .text
896/*
897 * Register switch for older 26-bit only ARMs
898 */
899ENTRY(__switch_to)
900 add r0, r0, #TI_CPU_SAVE
901 stmia r0, {r4 - sl, fp, sp, lr}
902 add r1, r1, #TI_CPU_SAVE
903 ldmia r1, {r4 - sl, fp, sp, pc}^
904
905/*
906 *=============================================================================
907 * Low-level interface code
908 *-----------------------------------------------------------------------------
909 * Trap initialisation
910 *-----------------------------------------------------------------------------
911 *
912 * Note - FIQ code has changed. The default is a couple of words in 0x1c, 0x20
913 * that call _unexp_fiq. Nowever, we now copy the FIQ routine to 0x1c (removes
914 * some excess cycles).
915 *
916 * What we need to put into 0-0x1c are branches to branch to the kernel.
917 */
918
919 .section ".init.text",#alloc,#execinstr
920
921.Ljump_addresses:
922 swi SYS_ERROR0
923 .word vector_undefinstr - 12
924 .word vector_swi - 16
925 .word vector_prefetch - 20
926 .word vector_data - 24
927 .word vector_addrexcptn - 28
928 .word vector_IRQ - 32
929 .word _unexp_fiq - 36
930 b . + 8
931/*
932 * initialise the trap system
933 */
934ENTRY(__trap_init)
935 stmfd sp!, {r4 - r7, lr}
936 adr r1, .Ljump_addresses
937 ldmia r1, {r1 - r7, ip, lr}
938 orr r2, lr, r2, lsr #2
939 orr r3, lr, r3, lsr #2
940 orr r4, lr, r4, lsr #2
941 orr r5, lr, r5, lsr #2
942 orr r6, lr, r6, lsr #2
943 orr r7, lr, r7, lsr #2
944 orr ip, lr, ip, lsr #2
945 mov r0, #0
946 stmia r0, {r1 - r7, ip}
947 ldmfd sp!, {r4 - r7, pc}^
948
949 .bss
950__temp_irq: .space 4 @ saved lr_irq
951__temp_fiq: .space 128
diff --git a/arch/arm26/kernel/fiq.c b/arch/arm26/kernel/fiq.c
deleted file mode 100644
index c4776c96be6b..000000000000
--- a/arch/arm26/kernel/fiq.c
+++ /dev/null
@@ -1,201 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/fiq.c
3 *
4 * Copyright (C) 1998 Russell King
5 * Copyright (C) 1998, 1999 Phil Blundell
6 * Copyright (C) 2003 Ian Molton
7 *
8 * FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
9 *
10 * FIQ support re-written by Russell King to be more generic
11 *
12 * We now properly support a method by which the FIQ handlers can
13 * be stacked onto the vector. We still do not support sharing
14 * the FIQ vector itself.
15 *
16 * Operation is as follows:
17 * 1. Owner A claims FIQ:
18 * - default_fiq relinquishes control.
19 * 2. Owner A:
20 * - inserts code.
21 * - sets any registers,
22 * - enables FIQ.
23 * 3. Owner B claims FIQ:
24 * - if owner A has a relinquish function.
25 * - disable FIQs.
26 * - saves any registers.
27 * - returns zero.
28 * 4. Owner B:
29 * - inserts code.
30 * - sets any registers,
31 * - enables FIQ.
32 * 5. Owner B releases FIQ:
33 * - Owner A is asked to reacquire FIQ:
34 * - inserts code.
35 * - restores saved registers.
36 * - enables FIQ.
37 * 6. Goto 3
38 */
39#include <linux/module.h>
40#include <linux/mm.h>
41#include <linux/mman.h>
42#include <linux/init.h>
43#include <linux/seq_file.h>
44
45#include <asm/fiq.h>
46#include <asm/io.h>
47#include <asm/irq.h>
48#include <asm/pgalloc.h>
49#include <asm/system.h>
50#include <asm/uaccess.h>
51
52#define FIQ_VECTOR (vectors_base() + 0x1c)
53
54static unsigned long no_fiq_insn;
55
56#define unprotect_page_0()
57#define protect_page_0()
58
59/* Default reacquire function
60 * - we always relinquish FIQ control
61 * - we always reacquire FIQ control
62 */
63static int fiq_def_op(void *ref, int relinquish)
64{
65 if (!relinquish) {
66 unprotect_page_0();
67 *(unsigned long *)FIQ_VECTOR = no_fiq_insn;
68 protect_page_0();
69 }
70
71 return 0;
72}
73
74static struct fiq_handler default_owner = {
75 .name = "default",
76 .fiq_op = fiq_def_op,
77};
78
79static struct fiq_handler *current_fiq = &default_owner;
80
81int show_fiq_list(struct seq_file *p, void *v)
82{
83 if (current_fiq != &default_owner)
84 seq_printf(p, "FIQ: %s\n", current_fiq->name);
85
86 return 0;
87}
88
89void set_fiq_handler(void *start, unsigned int length)
90{
91 unprotect_page_0();
92
93 memcpy((void *)FIQ_VECTOR, start, length);
94
95 protect_page_0();
96}
97
98/*
99 * Taking an interrupt in FIQ mode is death, so both these functions
100 * disable irqs for the duration.
101 */
102void set_fiq_regs(struct pt_regs *regs)
103{
104 register unsigned long tmp, tmp2;
105 __asm__ volatile (
106 "mov %0, pc \n"
107 "bic %1, %0, #0x3 \n"
108 "orr %1, %1, %3 \n"
109 "teqp %1, #0 @ select FIQ mode \n"
110 "mov r0, r0 \n"
111 "ldmia %2, {r8 - r14} \n"
112 "teqp %0, #0 @ return to SVC mode \n"
113 "mov r0, r0 "
114 : "=&r" (tmp), "=&r" (tmp2)
115 : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | MODE_FIQ26)
116 /* These registers aren't modified by the above code in a way
117 visible to the compiler, but we mark them as clobbers anyway
118 so that GCC won't put any of the input or output operands in
119 them. */
120 : "r8", "r9", "r10", "r11", "r12", "r13", "r14");
121}
122
123void get_fiq_regs(struct pt_regs *regs)
124{
125 register unsigned long tmp, tmp2;
126 __asm__ volatile (
127 "mov %0, pc \n"
128 "bic %1, %0, #0x3 \n"
129 "orr %1, %1, %3 \n"
130 "teqp %1, #0 @ select FIQ mode \n"
131 "mov r0, r0 \n"
132 "stmia %2, {r8 - r14} \n"
133 "teqp %0, #0 @ return to SVC mode \n"
134 "mov r0, r0 "
135 : "=&r" (tmp), "=&r" (tmp2)
136 : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | MODE_FIQ26)
137 /* These registers aren't modified by the above code in a way
138 visible to the compiler, but we mark them as clobbers anyway
139 so that GCC won't put any of the input or output operands in
140 them. */
141 : "r8", "r9", "r10", "r11", "r12", "r13", "r14");
142}
143
144int claim_fiq(struct fiq_handler *f)
145{
146 int ret = 0;
147
148 if (current_fiq) {
149 ret = -EBUSY;
150
151 if (current_fiq->fiq_op != NULL)
152 ret = current_fiq->fiq_op(current_fiq->dev_id, 1);
153 }
154
155 if (!ret) {
156 f->next = current_fiq;
157 current_fiq = f;
158 }
159
160 return ret;
161}
162
163void release_fiq(struct fiq_handler *f)
164{
165 if (current_fiq != f) {
166 printk(KERN_ERR "%s FIQ trying to release %s FIQ\n",
167 f->name, current_fiq->name);
168#ifdef CONFIG_DEBUG_ERRORS
169 __backtrace();
170#endif
171 return;
172 }
173
174 do
175 current_fiq = current_fiq->next;
176 while (current_fiq->fiq_op(current_fiq->dev_id, 0));
177}
178
179void enable_fiq(int fiq)
180{
181 enable_irq(fiq + FIQ_START);
182}
183
184void disable_fiq(int fiq)
185{
186 disable_irq(fiq + FIQ_START);
187}
188
189EXPORT_SYMBOL(set_fiq_handler);
190EXPORT_SYMBOL(set_fiq_regs);
191EXPORT_SYMBOL(get_fiq_regs);
192EXPORT_SYMBOL(claim_fiq);
193EXPORT_SYMBOL(release_fiq);
194EXPORT_SYMBOL(enable_fiq);
195EXPORT_SYMBOL(disable_fiq);
196
197void __init init_FIQ(void)
198{
199 no_fiq_insn = *(unsigned long *)FIQ_VECTOR;
200 set_fs(get_fs());
201}
diff --git a/arch/arm26/kernel/head.S b/arch/arm26/kernel/head.S
deleted file mode 100644
index 93575e0e58fe..000000000000
--- a/arch/arm26/kernel/head.S
+++ /dev/null
@@ -1,112 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/head.S
3 *
4 * Copyright (C) 1994-2000 Russell King
5 * Copyright (C) 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * 26-bit kernel startup code
12 */
13#include <linux/linkage.h>
14#include <asm/mach-types.h>
15
16 .globl swapper_pg_dir
17 .equ swapper_pg_dir, 0x0207d000
18
19/*
20 * Entry point.
21 */
22 .section ".init.text",#alloc,#execinstr
23ENTRY(stext)
24
25__entry:
26 cmp pc, #0x02000000
27 ldrlt pc, LC0 @ if 0x01800000, call at 0x02080000
28 teq r0, #0 @ Check for old calling method
29 blne oldparams @ Move page if old
30
31 adr r0, LC0
32 ldmib r0, {r2-r5, sp} @ Setup stack (and fetch other values)
33
34 mov r0, #0 @ Clear BSS
351: cmp r2, r3
36 strcc r0, [r2], #4
37 bcc 1b
38
39 bl detect_proc_type
40 str r0, [r4]
41 bl detect_arch_type
42 str r0, [r5]
43
44#ifdef CONFIG_XIP_KERNEL
45 ldr r3, ETEXT @ data section copy
46 ldr r4, SDATA
47 ldr r5, EDATA
481:
49 ldr r6, [r3], #4
50 str r6, [r4], #4
51 cmp r4, r5
52 blt 1b
53#endif
54 mov fp, #0
55 b start_kernel
56
57LC0: .word _stext
58 .word __bss_start @ r2
59 .word _end @ r3
60 .word processor_id @ r4
61 .word __machine_arch_type @ r5
62 .word init_thread_union+8192 @ sp
63#ifdef CONFIG_XIP_KERNEL
64ETEXT: .word _endtext
65SDATA: .word _sdata
66EDATA: .word __bss_start
67#endif
68
69arm2_id: .long 0x41560200 @ ARM2 and 250 dont have a CPUID
70arm250_id: .long 0x41560250 @ So we create some after probing for them
71 .align
72
73oldparams: mov r4, #0x02000000
74 add r3, r4, #0x00080000
75 add r4, r4, #0x0007c000
761: ldmia r0!, {r5 - r12}
77 stmia r4!, {r5 - r12}
78 cmp r4, r3
79 blt 1b
80 mov pc, lr
81
82/*
83 * We need some way to automatically detect the difference between
84 * these two machines. Unfortunately, it is not possible to detect
85 * the presence of the SuperIO chip, because that will hang the old
86 * Archimedes machines solid.
87 */
88/* DAG: Outdated, these have been combined !!!!!!! */
89detect_arch_type:
90#if defined(CONFIG_ARCH_ARC)
91 mov r0, #MACH_TYPE_ARCHIMEDES
92#elif defined(CONFIG_ARCH_A5K)
93 mov r0, #MACH_TYPE_A5K
94#endif
95 mov pc, lr
96
97detect_proc_type:
98 mov ip, lr
99 mov r2, #0xea000000 @ Point undef instr to continuation
100 adr r0, continue - 12
101 orr r0, r2, r0, lsr #2
102 mov r1, #0
103 str r0, [r1, #4]
104 ldr r0, arm2_id
105 swp r2, r2, [r1] @ check for swp (ARM2 cant)
106 ldr r0, arm250_id
107 mrc 15, 0, r3, c0, c0 @ check for CP#15 (ARM250 cant)
108 mov r0, r3
109continue: mov r2, #0xeb000000 @ Make undef vector loop
110 sub r2, r2, #2
111 str r2, [r1, #4]
112 mov pc, ip
diff --git a/arch/arm26/kernel/init_task.c b/arch/arm26/kernel/init_task.c
deleted file mode 100644
index 4191565b889b..000000000000
--- a/arch/arm26/kernel/init_task.c
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/init_task.c
3 *
4 * Copyright (C) 2003 Ian Molton
5 *
6 */
7#include <linux/mm.h>
8#include <linux/module.h>
9#include <linux/fs.h>
10#include <linux/sched.h>
11#include <linux/init.h>
12#include <linux/init_task.h>
13#include <linux/mqueue.h>
14
15#include <asm/uaccess.h>
16#include <asm/pgtable.h>
17
18static struct fs_struct init_fs = INIT_FS;
19static struct files_struct init_files = INIT_FILES;
20static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
21static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
22struct mm_struct init_mm = INIT_MM(init_mm);
23
24EXPORT_SYMBOL(init_mm);
25
26/*
27 * Initial thread structure.
28 *
29 * We need to make sure that this is 8192-byte aligned due to the
30 * way process stacks are handled. This is done by making sure
31 * the linker maps this in the .text segment right after head.S,
32 * and making the linker scripts ensure the proper alignment.
33 *
34 * FIXME - should this be 32K alignment on arm26?
35 *
36 * The things we do for performance...
37 */
38union thread_union init_thread_union
39 __attribute__((__section__(".init.task"))) =
40 { INIT_THREAD_INFO(init_task) };
41
42/*
43 * Initial task structure.
44 *
45 * All other task structs will be allocated on slabs in fork.c
46 */
47struct task_struct init_task = INIT_TASK(init_task);
48
49EXPORT_SYMBOL(init_task);
diff --git a/arch/arm26/kernel/irq.c b/arch/arm26/kernel/irq.c
deleted file mode 100644
index 2ffe695b062e..000000000000
--- a/arch/arm26/kernel/irq.c
+++ /dev/null
@@ -1,722 +0,0 @@
1/*
2 * linux/arch/arm/kernel/irq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6 * 'Borrowed' for ARM26 and (C) 2003 Ian Molton.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This file contains the code used by various IRQ handling routines:
13 * asking for different IRQ's should be done through these routines
14 * instead of just grabbing them. Thus setups with different IRQ numbers
15 * shouldn't result in any weird surprises, and installing new handlers
16 * should be easier.
17 *
18 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
19 * Naturally it's not a 1:1 relation, but there are similarities.
20 */
21#include <linux/module.h>
22#include <linux/ptrace.h>
23#include <linux/kernel_stat.h>
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/ioport.h>
27#include <linux/interrupt.h>
28#include <linux/slab.h>
29#include <linux/random.h>
30#include <linux/smp.h>
31#include <linux/init.h>
32#include <linux/seq_file.h>
33#include <linux/errno.h>
34
35#include <asm/irq.h>
36#include <asm/system.h>
37#include <asm/irqchip.h>
38
39//FIXME - this ought to be in a header IMO
40void __init arc_init_irq(void);
41
42/*
43 * Maximum IRQ count. Currently, this is arbitary. However, it should
44 * not be set too low to prevent false triggering. Conversely, if it
45 * is set too high, then you could miss a stuck IRQ.
46 *
47 * FIXME Maybe we ought to set a timer and re-enable the IRQ at a later time?
48 */
49#define MAX_IRQ_CNT 100000
50
51static volatile unsigned long irq_err_count;
52static DEFINE_SPINLOCK(irq_controller_lock);
53
54struct irqdesc irq_desc[NR_IRQS];
55
56/*
57 * Dummy mask/unmask handler
58 */
59void dummy_mask_unmask_irq(unsigned int irq)
60{
61}
62
63void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
64{
65 irq_err_count += 1;
66 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
67}
68
69static struct irqchip bad_chip = {
70 .ack = dummy_mask_unmask_irq,
71 .mask = dummy_mask_unmask_irq,
72 .unmask = dummy_mask_unmask_irq,
73};
74
75static struct irqdesc bad_irq_desc = {
76 .chip = &bad_chip,
77 .handle = do_bad_IRQ,
78 .depth = 1,
79};
80
81/**
82 * disable_irq - disable an irq and wait for completion
83 * @irq: Interrupt to disable
84 *
85 * Disable the selected interrupt line. We do this lazily.
86 *
87 * This function may be called from IRQ context.
88 */
89void disable_irq(unsigned int irq)
90{
91 struct irqdesc *desc = irq_desc + irq;
92 unsigned long flags;
93 spin_lock_irqsave(&irq_controller_lock, flags);
94 if (!desc->depth++)
95 desc->enabled = 0;
96 spin_unlock_irqrestore(&irq_controller_lock, flags);
97}
98EXPORT_SYMBOL(disable_irq);
99
100void disable_irq_nosync(unsigned int irq) __attribute__((alias("disable_irq")));
101
102EXPORT_SYMBOL(disable_irq_nosync);
103
104/**
105 * enable_irq - enable interrupt handling on an irq
106 * @irq: Interrupt to enable
107 *
108 * Re-enables the processing of interrupts on this IRQ line.
109 * Note that this may call the interrupt handler, so you may
110 * get unexpected results if you hold IRQs disabled.
111 *
112 * This function may be called from IRQ context.
113 */
114void enable_irq(unsigned int irq)
115{
116 struct irqdesc *desc = irq_desc + irq;
117 unsigned long flags;
118 int pending = 0;
119
120 spin_lock_irqsave(&irq_controller_lock, flags);
121 if (unlikely(!desc->depth)) {
122 printk("enable_irq(%u) unbalanced from %p\n", irq,
123 __builtin_return_address(0)); //FIXME bum addresses reported - why?
124 } else if (!--desc->depth) {
125 desc->probing = 0;
126 desc->enabled = 1;
127 desc->chip->unmask(irq);
128 pending = desc->pending;
129 desc->pending = 0;
130 /*
131 * If the interrupt was waiting to be processed,
132 * retrigger it.
133 */
134 if (pending)
135 desc->chip->rerun(irq);
136 }
137 spin_unlock_irqrestore(&irq_controller_lock, flags);
138}
139EXPORT_SYMBOL(enable_irq);
140
141int show_interrupts(struct seq_file *p, void *v)
142{
143 int i = *(loff_t *) v;
144 struct irqaction * action;
145
146 if (i < NR_IRQS) {
147 action = irq_desc[i].action;
148 if (!action)
149 goto out;
150 seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
151 seq_printf(p, " %s", action->name);
152 for (action = action->next; action; action = action->next) {
153 seq_printf(p, ", %s", action->name);
154 }
155 seq_putc(p, '\n');
156 } else if (i == NR_IRQS) {
157 show_fiq_list(p, v);
158 seq_printf(p, "Err: %10lu\n", irq_err_count);
159 }
160out:
161 return 0;
162}
163
164/*
165 * IRQ lock detection.
166 *
167 * Hopefully, this should get us out of a few locked situations.
168 * However, it may take a while for this to happen, since we need
169 * a large number if IRQs to appear in the same jiffie with the
170 * same instruction pointer (or within 2 instructions).
171 */
172static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
173{
174 unsigned long instr_ptr = instruction_pointer(regs);
175
176 if (desc->lck_jif == jiffies &&
177 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
178 desc->lck_cnt += 1;
179
180 if (desc->lck_cnt > MAX_IRQ_CNT) {
181 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
182 return 1;
183 }
184 } else {
185 desc->lck_cnt = 0;
186 desc->lck_pc = instruction_pointer(regs);
187 desc->lck_jif = jiffies;
188 }
189 return 0;
190}
191
192static void
193__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
194{
195 unsigned int status;
196 int ret;
197
198 spin_unlock(&irq_controller_lock);
199 if (!(action->flags & IRQF_DISABLED))
200 local_irq_enable();
201
202 status = 0;
203 do {
204 ret = action->handler(irq, action->dev_id, regs);
205 if (ret == IRQ_HANDLED)
206 status |= action->flags;
207 action = action->next;
208 } while (action);
209
210 if (status & IRQF_SAMPLE_RANDOM)
211 add_interrupt_randomness(irq);
212
213 spin_lock_irq(&irq_controller_lock);
214}
215
216/*
217 * This is for software-decoded IRQs. The caller is expected to
218 * handle the ack, clear, mask and unmask issues.
219 */
220void
221do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
222{
223 struct irqaction *action;
224 const int cpu = smp_processor_id();
225
226 desc->triggered = 1;
227
228 kstat_cpu(cpu).irqs[irq]++;
229
230 action = desc->action;
231 if (action)
232 __do_irq(irq, desc->action, regs);
233}
234
235/*
236 * Most edge-triggered IRQ implementations seem to take a broken
237 * approach to this. Hence the complexity.
238 */
239void
240do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
241{
242 const int cpu = smp_processor_id();
243
244 desc->triggered = 1;
245
246 /*
247 * If we're currently running this IRQ, or its disabled,
248 * we shouldn't process the IRQ. Instead, turn on the
249 * hardware masks.
250 */
251 if (unlikely(desc->running || !desc->enabled))
252 goto running;
253
254 /*
255 * Acknowledge and clear the IRQ, but don't mask it.
256 */
257 desc->chip->ack(irq);
258
259 /*
260 * Mark the IRQ currently in progress.
261 */
262 desc->running = 1;
263
264 kstat_cpu(cpu).irqs[irq]++;
265
266 do {
267 struct irqaction *action;
268
269 action = desc->action;
270 if (!action)
271 break;
272
273 if (desc->pending && desc->enabled) {
274 desc->pending = 0;
275 desc->chip->unmask(irq);
276 }
277
278 __do_irq(irq, action, regs);
279 } while (desc->pending);
280
281 desc->running = 0;
282
283 /*
284 * If we were disabled or freed, shut down the handler.
285 */
286 if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
287 return;
288
289 running:
290 /*
291 * We got another IRQ while this one was masked or
292 * currently running. Delay it.
293 */
294 desc->pending = 1;
295 desc->chip->mask(irq);
296 desc->chip->ack(irq);
297}
298
299/*
300 * Level-based IRQ handler. Nice and simple.
301 */
302void
303do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
304{
305 struct irqaction *action;
306 const int cpu = smp_processor_id();
307
308 desc->triggered = 1;
309
310 /*
311 * Acknowledge, clear _AND_ disable the interrupt.
312 */
313 desc->chip->ack(irq);
314
315 if (likely(desc->enabled)) {
316 kstat_cpu(cpu).irqs[irq]++;
317
318 /*
319 * Return with this interrupt masked if no action
320 */
321 action = desc->action;
322 if (action) {
323 __do_irq(irq, desc->action, regs);
324
325 if (likely(desc->enabled &&
326 !check_irq_lock(desc, irq, regs)))
327 desc->chip->unmask(irq);
328 }
329 }
330}
331
332/*
333 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
334 * come via this function. Instead, they should provide their
335 * own 'handler'
336 */
337asmlinkage void asm_do_IRQ(int irq, struct pt_regs *regs)
338{
339 struct irqdesc *desc = irq_desc + irq;
340
341 /*
342 * Some hardware gives randomly wrong interrupts. Rather
343 * than crashing, do something sensible.
344 */
345 if (irq >= NR_IRQS)
346 desc = &bad_irq_desc;
347
348 irq_enter();
349 spin_lock(&irq_controller_lock);
350 desc->handle(irq, desc, regs);
351 spin_unlock(&irq_controller_lock);
352 irq_exit();
353}
354
355void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
356{
357 struct irqdesc *desc;
358 unsigned long flags;
359
360 if (irq >= NR_IRQS) {
361 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
362 return;
363 }
364
365 if (handle == NULL)
366 handle = do_bad_IRQ;
367
368 desc = irq_desc + irq;
369
370 if (is_chained && desc->chip == &bad_chip)
371 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
372
373 spin_lock_irqsave(&irq_controller_lock, flags);
374 if (handle == do_bad_IRQ) {
375 desc->chip->mask(irq);
376 desc->chip->ack(irq);
377 desc->depth = 1;
378 desc->enabled = 0;
379 }
380 desc->handle = handle;
381 if (handle != do_bad_IRQ && is_chained) {
382 desc->valid = 0;
383 desc->probe_ok = 0;
384 desc->depth = 0;
385 desc->chip->unmask(irq);
386 }
387 spin_unlock_irqrestore(&irq_controller_lock, flags);
388}
389
390void set_irq_chip(unsigned int irq, struct irqchip *chip)
391{
392 struct irqdesc *desc;
393 unsigned long flags;
394
395 if (irq >= NR_IRQS) {
396 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
397 return;
398 }
399
400 if (chip == NULL)
401 chip = &bad_chip;
402
403 desc = irq_desc + irq;
404 spin_lock_irqsave(&irq_controller_lock, flags);
405 desc->chip = chip;
406 spin_unlock_irqrestore(&irq_controller_lock, flags);
407}
408
409int set_irq_type(unsigned int irq, unsigned int type)
410{
411 struct irqdesc *desc;
412 unsigned long flags;
413 int ret = -ENXIO;
414
415 if (irq >= NR_IRQS) {
416 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
417 return -ENODEV;
418 }
419
420 desc = irq_desc + irq;
421 if (desc->chip->type) {
422 spin_lock_irqsave(&irq_controller_lock, flags);
423 ret = desc->chip->type(irq, type);
424 spin_unlock_irqrestore(&irq_controller_lock, flags);
425 }
426
427 return ret;
428}
429
430void set_irq_flags(unsigned int irq, unsigned int iflags)
431{
432 struct irqdesc *desc;
433 unsigned long flags;
434
435 if (irq >= NR_IRQS) {
436 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
437 return;
438 }
439
440 desc = irq_desc + irq;
441 spin_lock_irqsave(&irq_controller_lock, flags);
442 desc->valid = (iflags & IRQF_VALID) != 0;
443 desc->probe_ok = (iflags & IRQF_PROBE) != 0;
444 desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
445 spin_unlock_irqrestore(&irq_controller_lock, flags);
446}
447
448int setup_irq(unsigned int irq, struct irqaction *new)
449{
450 int shared = 0;
451 struct irqaction *old, **p;
452 unsigned long flags;
453 struct irqdesc *desc;
454
455 /*
456 * Some drivers like serial.c use request_irq() heavily,
457 * so we have to be careful not to interfere with a
458 * running system.
459 */
460 if (new->flags & IRQF_SAMPLE_RANDOM) {
461 /*
462 * This function might sleep, we want to call it first,
463 * outside of the atomic block.
464 * Yes, this might clear the entropy pool if the wrong
465 * driver is attempted to be loaded, without actually
466 * installing a new handler, but is this really a problem,
467 * only the sysadmin is able to do this.
468 */
469 rand_initialize_irq(irq);
470 }
471
472 /*
473 * The following block of code has to be executed atomically
474 */
475 desc = irq_desc + irq;
476 spin_lock_irqsave(&irq_controller_lock, flags);
477 p = &desc->action;
478 if ((old = *p) != NULL) {
479 /* Can't share interrupts unless both agree to */
480 if (!(old->flags & new->flags & IRQF_SHARED)) {
481 spin_unlock_irqrestore(&irq_controller_lock, flags);
482 return -EBUSY;
483 }
484
485 /* add new interrupt at end of irq queue */
486 do {
487 p = &old->next;
488 old = *p;
489 } while (old);
490 shared = 1;
491 }
492
493 *p = new;
494
495 if (!shared) {
496 desc->probing = 0;
497 desc->running = 0;
498 desc->pending = 0;
499 desc->depth = 1;
500 if (!desc->noautoenable) {
501 desc->depth = 0;
502 desc->enabled = 1;
503 desc->chip->unmask(irq);
504 }
505 }
506
507 spin_unlock_irqrestore(&irq_controller_lock, flags);
508 return 0;
509}
510
511/**
512 * request_irq - allocate an interrupt line
513 * @irq: Interrupt line to allocate
514 * @handler: Function to be called when the IRQ occurs
515 * @irqflags: Interrupt type flags
516 * @devname: An ascii name for the claiming device
517 * @dev_id: A cookie passed back to the handler function
518 *
519 * This call allocates interrupt resources and enables the
520 * interrupt line and IRQ handling. From the point this
521 * call is made your handler function may be invoked. Since
522 * your handler function must clear any interrupt the board
523 * raises, you must take care both to initialise your hardware
524 * and to set up the interrupt handler in the right order.
525 *
526 * Dev_id must be globally unique. Normally the address of the
527 * device data structure is used as the cookie. Since the handler
528 * receives this value it makes sense to use it.
529 *
530 * If your interrupt is shared you must pass a non NULL dev_id
531 * as this is required when freeing the interrupt.
532 *
533 * Flags:
534 *
535 * IRQF_SHARED Interrupt is shared
536 *
537 * IRQF_DISABLED Disable local interrupts while processing
538 *
539 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
540 *
541 */
542
543//FIXME - handler used to return void - whats the significance of the change?
544int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
545 unsigned long irq_flags, const char * devname, void *dev_id)
546{
547 unsigned long retval;
548 struct irqaction *action;
549
550 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
551 (irq_flags & IRQF_SHARED && !dev_id))
552 return -EINVAL;
553
554 action = kmalloc(sizeof(struct irqaction), GFP_KERNEL);
555 if (!action)
556 return -ENOMEM;
557
558 action->handler = handler;
559 action->flags = irq_flags;
560 cpus_clear(action->mask);
561 action->name = devname;
562 action->next = NULL;
563 action->dev_id = dev_id;
564
565 retval = setup_irq(irq, action);
566
567 if (retval)
568 kfree(action);
569 return retval;
570}
571
572EXPORT_SYMBOL(request_irq);
573
574/**
575 * free_irq - free an interrupt
576 * @irq: Interrupt line to free
577 * @dev_id: Device identity to free
578 *
579 * Remove an interrupt handler. The handler is removed and if the
580 * interrupt line is no longer in use by any driver it is disabled.
581 * On a shared IRQ the caller must ensure the interrupt is disabled
582 * on the card it drives before calling this function.
583 *
584 * This function may be called from interrupt context.
585 */
586void free_irq(unsigned int irq, void *dev_id)
587{
588 struct irqaction * action, **p;
589 unsigned long flags;
590
591 if (irq >= NR_IRQS || !irq_desc[irq].valid) {
592 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
593#ifdef CONFIG_DEBUG_ERRORS
594 __backtrace();
595#endif
596 return;
597 }
598
599 spin_lock_irqsave(&irq_controller_lock, flags);
600 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
601 if (action->dev_id != dev_id)
602 continue;
603
604 /* Found it - now free it */
605 *p = action->next;
606 kfree(action);
607 goto out;
608 }
609 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
610#ifdef CONFIG_DEBUG_ERRORS
611 __backtrace();
612#endif
613out:
614 spin_unlock_irqrestore(&irq_controller_lock, flags);
615}
616
617EXPORT_SYMBOL(free_irq);
618
619/* Start the interrupt probing. Unlike other architectures,
620 * we don't return a mask of interrupts from probe_irq_on,
621 * but return the number of interrupts enabled for the probe.
622 * The interrupts which have been enabled for probing is
623 * instead recorded in the irq_desc structure.
624 */
625unsigned long probe_irq_on(void)
626{
627 unsigned int i, irqs = 0;
628 unsigned long delay;
629
630 /*
631 * first snaffle up any unassigned but
632 * probe-able interrupts
633 */
634 spin_lock_irq(&irq_controller_lock);
635 for (i = 0; i < NR_IRQS; i++) {
636 if (!irq_desc[i].probe_ok || irq_desc[i].action)
637 continue;
638
639 irq_desc[i].probing = 1;
640 irq_desc[i].triggered = 0;
641 if (irq_desc[i].chip->type)
642 irq_desc[i].chip->type(i, IRQT_PROBE);
643 irq_desc[i].chip->unmask(i);
644 irqs += 1;
645 }
646 spin_unlock_irq(&irq_controller_lock);
647
648 /*
649 * wait for spurious interrupts to mask themselves out again
650 */
651 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
652 /* min 100ms delay */;
653
654 /*
655 * now filter out any obviously spurious interrupts
656 */
657 spin_lock_irq(&irq_controller_lock);
658 for (i = 0; i < NR_IRQS; i++) {
659 if (irq_desc[i].probing && irq_desc[i].triggered) {
660 irq_desc[i].probing = 0;
661 irqs -= 1;
662 }
663 }
664 spin_unlock_irq(&irq_controller_lock);
665
666 return irqs;
667}
668
669EXPORT_SYMBOL(probe_irq_on);
670
671/*
672 * Possible return values:
673 * >= 0 - interrupt number
674 * -1 - no interrupt/many interrupts
675 */
676int probe_irq_off(unsigned long irqs)
677{
678 unsigned int i;
679 int irq_found = NO_IRQ;
680
681 /*
682 * look at the interrupts, and find exactly one
683 * that we were probing has been triggered
684 */
685 spin_lock_irq(&irq_controller_lock);
686 for (i = 0; i < NR_IRQS; i++) {
687 if (irq_desc[i].probing &&
688 irq_desc[i].triggered) {
689 if (irq_found != NO_IRQ) {
690 irq_found = NO_IRQ;
691 goto out;
692 }
693 irq_found = i;
694 }
695 }
696
697 if (irq_found == -1)
698 irq_found = NO_IRQ;
699out:
700 spin_unlock_irq(&irq_controller_lock);
701
702 return irq_found;
703}
704
705EXPORT_SYMBOL(probe_irq_off);
706
707void __init init_irq_proc(void)
708{
709}
710
711void __init init_IRQ(void)
712{
713 struct irqdesc *desc;
714 extern void init_dma(void);
715 int irq;
716
717 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++)
718 *desc = bad_irq_desc;
719
720 arc_init_irq();
721 init_dma();
722}
diff --git a/arch/arm26/kernel/process.c b/arch/arm26/kernel/process.c
deleted file mode 100644
index dcd81e62ff4e..000000000000
--- a/arch/arm26/kernel/process.c
+++ /dev/null
@@ -1,392 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/process.c
3 *
4 * Copyright (C) 2003 Ian Molton - adapted for ARM26
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Origional Copyright (C) 1995 Linus Torvalds
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <stdarg.h>
13
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/stddef.h>
19#include <linux/unistd.h>
20#include <linux/ptrace.h>
21#include <linux/slab.h>
22#include <linux/user.h>
23#include <linux/a.out.h>
24#include <linux/delay.h>
25#include <linux/reboot.h>
26#include <linux/interrupt.h>
27#include <linux/init.h>
28
29#include <asm/system.h>
30#include <asm/io.h>
31#include <asm/leds.h>
32#include <asm/processor.h>
33#include <asm/uaccess.h>
34
35extern const char *processor_modes[];
36extern void setup_mm_for_reboot(char mode);
37
38static volatile int hlt_counter;
39
40void disable_hlt(void)
41{
42 hlt_counter++;
43}
44
45EXPORT_SYMBOL(disable_hlt);
46
47void enable_hlt(void)
48{
49 hlt_counter--;
50}
51
52EXPORT_SYMBOL(enable_hlt);
53
54static int __init nohlt_setup(char *__unused)
55{
56 hlt_counter = 1;
57 return 1;
58}
59
60static int __init hlt_setup(char *__unused)
61{
62 hlt_counter = 0;
63 return 1;
64}
65
66__setup("nohlt", nohlt_setup);
67__setup("hlt", hlt_setup);
68
69/*
70 * This is our default idle handler. We need to disable
71 * interrupts here to ensure we don't miss a wakeup call.
72 */
73void cpu_idle(void)
74{
75 /* endless idle loop with no priority at all */
76 while (1) {
77 while (!need_resched())
78 cpu_relax();
79 preempt_enable_no_resched();
80 schedule();
81 preempt_disable();
82 }
83}
84
85static char reboot_mode = 'h';
86
87int __init reboot_setup(char *str)
88{
89 reboot_mode = str[0];
90 return 1;
91}
92
93__setup("reboot=", reboot_setup);
94
95/* ARM26 cant do these but we still need to define them. */
96void machine_halt(void)
97{
98}
99void machine_power_off(void)
100{
101}
102
103void machine_restart(char * __unused)
104{
105 /*
106 * Clean and disable cache, and turn off interrupts
107 */
108 cpu_proc_fin();
109
110 /*
111 * Tell the mm system that we are going to reboot -
112 * we may need it to insert some 1:1 mappings so that
113 * soft boot works.
114 */
115 setup_mm_for_reboot(reboot_mode);
116
117 /*
118 * copy branch instruction to reset location and call it
119 */
120
121 *(unsigned long *)0 = *(unsigned long *)0x03800000;
122 ((void(*)(void))0)();
123
124 /*
125 * Whoops - the architecture was unable to reboot.
126 * Tell the user! Should never happen...
127 */
128 mdelay(1000);
129 printk("Reboot failed -- System halted\n");
130 while (1);
131}
132
133void show_regs(struct pt_regs * regs)
134{
135 unsigned long flags;
136
137 flags = condition_codes(regs);
138
139 printk("pc : [<%08lx>] lr : [<%08lx>] %s\n"
140 "sp : %08lx ip : %08lx fp : %08lx\n",
141 instruction_pointer(regs),
142 regs->ARM_lr, print_tainted(), regs->ARM_sp,
143 regs->ARM_ip, regs->ARM_fp);
144 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
145 regs->ARM_r10, regs->ARM_r9,
146 regs->ARM_r8);
147 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
148 regs->ARM_r7, regs->ARM_r6,
149 regs->ARM_r5, regs->ARM_r4);
150 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
151 regs->ARM_r3, regs->ARM_r2,
152 regs->ARM_r1, regs->ARM_r0);
153 printk("Flags: %c%c%c%c",
154 flags & PSR_N_BIT ? 'N' : 'n',
155 flags & PSR_Z_BIT ? 'Z' : 'z',
156 flags & PSR_C_BIT ? 'C' : 'c',
157 flags & PSR_V_BIT ? 'V' : 'v');
158 printk(" IRQs o%s FIQs o%s Mode %s Segment %s\n",
159 interrupts_enabled(regs) ? "n" : "ff",
160 fast_interrupts_enabled(regs) ? "n" : "ff",
161 processor_modes[processor_mode(regs)],
162 get_fs() == get_ds() ? "kernel" : "user");
163}
164
165void show_fpregs(struct user_fp *regs)
166{
167 int i;
168
169 for (i = 0; i < 8; i++) {
170 unsigned long *p;
171 char type;
172
173 p = (unsigned long *)(regs->fpregs + i);
174
175 switch (regs->ftype[i]) {
176 case 1: type = 'f'; break;
177 case 2: type = 'd'; break;
178 case 3: type = 'e'; break;
179 default: type = '?'; break;
180 }
181 if (regs->init_flag)
182 type = '?';
183
184 printk(" f%d(%c): %08lx %08lx %08lx%c",
185 i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' ');
186 }
187
188
189 printk("FPSR: %08lx FPCR: %08lx\n",
190 (unsigned long)regs->fpsr,
191 (unsigned long)regs->fpcr);
192}
193
194/*
195 * Task structure and kernel stack allocation.
196 */
197static unsigned long *thread_info_head;
198static unsigned int nr_thread_info;
199
200extern unsigned long get_page_8k(int priority);
201extern void free_page_8k(unsigned long page);
202
203// FIXME - is this valid?
204#define EXTRA_TASK_STRUCT 0
205#define ll_alloc_task_struct() ((struct thread_info *)get_page_8k(GFP_KERNEL))
206#define ll_free_task_struct(p) free_page_8k((unsigned long)(p))
207
208//FIXME - do we use *task param below looks like we dont, which is ok?
209//FIXME - if EXTRA_TASK_STRUCT is zero we can optimise the below away permanently. *IF* its supposed to be zero.
210struct thread_info *alloc_thread_info(struct task_struct *task)
211{
212 struct thread_info *thread = NULL;
213
214 if (EXTRA_TASK_STRUCT) {
215 unsigned long *p = thread_info_head;
216
217 if (p) {
218 thread_info_head = (unsigned long *)p[0];
219 nr_thread_info -= 1;
220 }
221 thread = (struct thread_info *)p;
222 }
223
224 if (!thread)
225 thread = ll_alloc_task_struct();
226
227#ifdef CONFIG_MAGIC_SYSRQ
228 /*
229 * The stack must be cleared if you want SYSRQ-T to
230 * give sensible stack usage information
231 */
232 if (thread) {
233 char *p = (char *)thread;
234 memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
235 }
236#endif
237 return thread;
238}
239
240void free_thread_info(struct thread_info *thread)
241{
242 if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) {
243 unsigned long *p = (unsigned long *)thread;
244 p[0] = (unsigned long)thread_info_head;
245 thread_info_head = p;
246 nr_thread_info += 1;
247 } else
248 ll_free_task_struct(thread);
249}
250
251/*
252 * Free current thread data structures etc..
253 */
254void exit_thread(void)
255{
256}
257
258void flush_thread(void)
259{
260 struct thread_info *thread = current_thread_info();
261 struct task_struct *tsk = current;
262
263 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
264 memset(&thread->fpstate, 0, sizeof(union fp_state));
265
266 clear_used_math();
267}
268
269void release_thread(struct task_struct *dead_task)
270{
271}
272
273asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
274
275int
276copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
277 unsigned long unused, struct task_struct *p, struct pt_regs *regs)
278{
279 struct thread_info *thread = task_thread_info(p);
280 struct pt_regs *childregs = task_pt_regs(p);
281
282 *childregs = *regs;
283 childregs->ARM_r0 = 0;
284 childregs->ARM_sp = stack_start;
285
286 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
287 thread->cpu_context.sp = (unsigned long)childregs;
288 thread->cpu_context.pc = (unsigned long)ret_from_fork | MODE_SVC26 | PSR_I_BIT;
289
290 return 0;
291}
292
293/*
294 * fill in the fpe structure for a core dump...
295 */
296int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
297{
298 struct thread_info *thread = current_thread_info();
299 int used_math = !!used_math();
300
301 if (used_math)
302 memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
303
304 return used_math;
305}
306
307/*
308 * fill in the user structure for a core dump..
309 */
310void dump_thread(struct pt_regs * regs, struct user * dump)
311{
312 struct task_struct *tsk = current;
313
314 dump->magic = CMAGIC;
315 dump->start_code = tsk->mm->start_code;
316 dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
317
318 dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
319 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
320 dump->u_ssize = 0;
321
322 dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
323 dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
324 dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn;
325 dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn;
326 dump->u_debugreg[4] = tsk->thread.debug.nsaved;
327
328 if (dump->start_stack < 0x04000000)
329 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
330
331 dump->regs = *regs;
332 dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
333}
334
335/*
336 * Shuffle the argument into the correct register before calling the
337 * thread function. r1 is the thread argument, r2 is the pointer to
338 * the thread function, and r3 points to the exit function.
339 * FIXME - make sure this is right - the older code used to zero fp
340 * and cause the parent to call sys_exit (do_exit in this version)
341 */
342extern void kernel_thread_helper(void);
343
344asm( ".section .text\n"
345" .align\n"
346" .type kernel_thread_helper, #function\n"
347"kernel_thread_helper:\n"
348" mov r0, r1\n"
349" mov lr, r3\n"
350" mov pc, r2\n"
351" .size kernel_thread_helper, . - kernel_thread_helper\n"
352" .previous");
353
354/*
355 * Create a kernel thread.
356 */
357pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
358{
359 struct pt_regs regs;
360
361 memset(&regs, 0, sizeof(regs));
362
363 regs.ARM_r1 = (unsigned long)arg;
364 regs.ARM_r2 = (unsigned long)fn;
365 regs.ARM_r3 = (unsigned long)do_exit;
366 regs.ARM_pc = (unsigned long)kernel_thread_helper | MODE_SVC26;
367
368 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
369}
370EXPORT_SYMBOL(kernel_thread);
371
372
373unsigned long get_wchan(struct task_struct *p)
374{
375 unsigned long fp, lr;
376 unsigned long stack_page;
377 int count = 0;
378 if (!p || p == current || p->state == TASK_RUNNING)
379 return 0;
380
381 stack_page = 4096 + (unsigned long)p;
382 fp = thread_saved_fp(p);
383 do {
384 if (fp < stack_page || fp > 4092+stack_page)
385 return 0;
386 lr = pc_pointer (((unsigned long *)fp)[-1]);
387 if (!in_sched_functions(lr))
388 return lr;
389 fp = *(unsigned long *) (fp - 12);
390 } while (count ++ < 16);
391 return 0;
392}
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c
deleted file mode 100644
index 0fefb86970c6..000000000000
--- a/arch/arm26/kernel/ptrace.c
+++ /dev/null
@@ -1,670 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/ptrace.c
3 *
4 * By Ross Biro 1/23/92
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/smp.h>
16#include <linux/ptrace.h>
17#include <linux/user.h>
18#include <linux/security.h>
19#include <linux/signal.h>
20
21#include <asm/uaccess.h>
22#include <asm/pgtable.h>
23#include <asm/system.h>
24//#include <asm/processor.h>
25
26#include "ptrace.h"
27
28#define REG_PC 15
29#define REG_PSR 15
30/*
31 * does not yet catch signals sent when the child dies.
32 * in exit.c or in signal.c.
33 */
34
35/*
36 * Breakpoint SWI instruction: SWI &9F0001
37 */
38#define BREAKINST_ARM 0xef9f0001
39
40/*
41 * this routine will get a word off of the processes privileged stack.
42 * the offset is how far from the base addr as stored in the THREAD.
43 * this routine assumes that all the privileged stacks are in our
44 * data space.
45 */
46static inline long get_user_reg(struct task_struct *task, int offset)
47{
48 return task_pt_regs(task)->uregs[offset];
49}
50
51/*
52 * this routine will put a word on the processes privileged stack.
53 * the offset is how far from the base addr as stored in the THREAD.
54 * this routine assumes that all the privileged stacks are in our
55 * data space.
56 */
57static inline int
58put_user_reg(struct task_struct *task, int offset, long data)
59{
60 struct pt_regs newregs, *regs = task_pt_regs(task);
61 int ret = -EINVAL;
62
63 newregs = *regs;
64 newregs.uregs[offset] = data;
65
66 if (valid_user_regs(&newregs)) {
67 regs->uregs[offset] = data;
68 ret = 0;
69 }
70
71 return ret;
72}
73
74static inline int
75read_u32(struct task_struct *task, unsigned long addr, u32 *res)
76{
77 int ret;
78
79 ret = access_process_vm(task, addr, res, sizeof(*res), 0);
80
81 return ret == sizeof(*res) ? 0 : -EIO;
82}
83
84static inline int
85read_instr(struct task_struct *task, unsigned long addr, u32 *res)
86{
87 int ret;
88 u32 val;
89 ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0);
90 ret = ret == sizeof(val) ? 0 : -EIO;
91 *res = val;
92 return ret;
93}
94
95/*
96 * Get value of register `rn' (in the instruction)
97 */
98static unsigned long
99ptrace_getrn(struct task_struct *child, unsigned long insn)
100{
101 unsigned int reg = (insn >> 16) & 15;
102 unsigned long val;
103
104 val = get_user_reg(child, reg);
105 if (reg == 15)
106 val = pc_pointer(val + 8); //FIXME - correct for arm26?
107
108 return val;
109}
110
111/*
112 * Get value of operand 2 (in an ALU instruction)
113 */
114static unsigned long
115ptrace_getaluop2(struct task_struct *child, unsigned long insn)
116{
117 unsigned long val;
118 int shift;
119 int type;
120
121 if (insn & 1 << 25) {
122 val = insn & 255;
123 shift = (insn >> 8) & 15;
124 type = 3;
125 } else {
126 val = get_user_reg (child, insn & 15);
127
128 if (insn & (1 << 4))
129 shift = (int)get_user_reg (child, (insn >> 8) & 15);
130 else
131 shift = (insn >> 7) & 31;
132
133 type = (insn >> 5) & 3;
134 }
135
136 switch (type) {
137 case 0: val <<= shift; break;
138 case 1: val >>= shift; break;
139 case 2:
140 val = (((signed long)val) >> shift);
141 break;
142 case 3:
143 val = (val >> shift) | (val << (32 - shift));
144 break;
145 }
146 return val;
147}
148
149/*
150 * Get value of operand 2 (in a LDR instruction)
151 */
152static unsigned long
153ptrace_getldrop2(struct task_struct *child, unsigned long insn)
154{
155 unsigned long val;
156 int shift;
157 int type;
158
159 val = get_user_reg(child, insn & 15);
160 shift = (insn >> 7) & 31;
161 type = (insn >> 5) & 3;
162
163 switch (type) {
164 case 0: val <<= shift; break;
165 case 1: val >>= shift; break;
166 case 2:
167 val = (((signed long)val) >> shift);
168 break;
169 case 3:
170 val = (val >> shift) | (val << (32 - shift));
171 break;
172 }
173 return val;
174}
175
176#define OP_MASK 0x01e00000
177#define OP_AND 0x00000000
178#define OP_EOR 0x00200000
179#define OP_SUB 0x00400000
180#define OP_RSB 0x00600000
181#define OP_ADD 0x00800000
182#define OP_ADC 0x00a00000
183#define OP_SBC 0x00c00000
184#define OP_RSC 0x00e00000
185#define OP_ORR 0x01800000
186#define OP_MOV 0x01a00000
187#define OP_BIC 0x01c00000
188#define OP_MVN 0x01e00000
189
190static unsigned long
191get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn)
192{
193 u32 alt = 0;
194
195 switch (insn & 0x0e000000) {
196 case 0x00000000:
197 case 0x02000000: {
198 /*
199 * data processing
200 */
201 long aluop1, aluop2, ccbit;
202
203 if ((insn & 0xf000) != 0xf000)
204 break;
205
206 aluop1 = ptrace_getrn(child, insn);
207 aluop2 = ptrace_getaluop2(child, insn);
208 ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0;
209
210 switch (insn & OP_MASK) {
211 case OP_AND: alt = aluop1 & aluop2; break;
212 case OP_EOR: alt = aluop1 ^ aluop2; break;
213 case OP_SUB: alt = aluop1 - aluop2; break;
214 case OP_RSB: alt = aluop2 - aluop1; break;
215 case OP_ADD: alt = aluop1 + aluop2; break;
216 case OP_ADC: alt = aluop1 + aluop2 + ccbit; break;
217 case OP_SBC: alt = aluop1 - aluop2 + ccbit; break;
218 case OP_RSC: alt = aluop2 - aluop1 + ccbit; break;
219 case OP_ORR: alt = aluop1 | aluop2; break;
220 case OP_MOV: alt = aluop2; break;
221 case OP_BIC: alt = aluop1 & ~aluop2; break;
222 case OP_MVN: alt = ~aluop2; break;
223 }
224 break;
225 }
226
227 case 0x04000000:
228 case 0x06000000:
229 /*
230 * ldr
231 */
232 if ((insn & 0x0010f000) == 0x0010f000) {
233 unsigned long base;
234
235 base = ptrace_getrn(child, insn);
236 if (insn & 1 << 24) {
237 long aluop2;
238
239 if (insn & 0x02000000)
240 aluop2 = ptrace_getldrop2(child, insn);
241 else
242 aluop2 = insn & 0xfff;
243
244 if (insn & 1 << 23)
245 base += aluop2;
246 else
247 base -= aluop2;
248 }
249 if (read_u32(child, base, &alt) == 0)
250 alt = pc_pointer(alt);
251 }
252 break;
253
254 case 0x08000000:
255 /*
256 * ldm
257 */
258 if ((insn & 0x00108000) == 0x00108000) {
259 unsigned long base;
260 unsigned int nr_regs;
261
262 if (insn & (1 << 23)) {
263 nr_regs = hweight16(insn & 65535) << 2;
264
265 if (!(insn & (1 << 24)))
266 nr_regs -= 4;
267 } else {
268 if (insn & (1 << 24))
269 nr_regs = -4;
270 else
271 nr_regs = 0;
272 }
273
274 base = ptrace_getrn(child, insn);
275
276 if (read_u32(child, base + nr_regs, &alt) == 0)
277 alt = pc_pointer(alt);
278 break;
279 }
280 break;
281
282 case 0x0a000000: {
283 /*
284 * bl or b
285 */
286 signed long displ;
287 /* It's a branch/branch link: instead of trying to
288 * figure out whether the branch will be taken or not,
289 * we'll put a breakpoint at both locations. This is
290 * simpler, more reliable, and probably not a whole lot
291 * slower than the alternative approach of emulating the
292 * branch.
293 */
294 displ = (insn & 0x00ffffff) << 8;
295 displ = (displ >> 6) + 8;
296 if (displ != 0 && displ != 4)
297 alt = pc + displ;
298 }
299 break;
300 }
301
302 return alt;
303}
304
305static int
306swap_insn(struct task_struct *task, unsigned long addr,
307 void *old_insn, void *new_insn, int size)
308{
309 int ret;
310
311 ret = access_process_vm(task, addr, old_insn, size, 0);
312 if (ret == size)
313 ret = access_process_vm(task, addr, new_insn, size, 1);
314 return ret;
315}
316
317static void
318add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr)
319{
320 int nr = dbg->nsaved;
321
322 if (nr < 2) {
323 u32 new_insn = BREAKINST_ARM;
324 int res;
325
326 res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4);
327
328 if (res == 4) {
329 dbg->bp[nr].address = addr;
330 dbg->nsaved += 1;
331 }
332 } else
333 printk(KERN_ERR "ptrace: too many breakpoints\n");
334}
335
336/*
337 * Clear one breakpoint in the user program. We copy what the hardware
338 * does and use bit 0 of the address to indicate whether this is a Thumb
339 * breakpoint or an ARM breakpoint.
340 */
341static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
342{
343 unsigned long addr = bp->address;
344 u32 old_insn;
345 int ret;
346
347 ret = swap_insn(task, addr & ~3, &old_insn,
348 &bp->insn, 4);
349
350 if (ret != 4 || old_insn != BREAKINST_ARM)
351 printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
352 "0x%08lx (0x%08x)\n", task->comm, task->pid,
353 addr, old_insn);
354}
355
356void ptrace_set_bpt(struct task_struct *child)
357{
358 struct pt_regs *regs;
359 unsigned long pc;
360 u32 insn;
361 int res;
362
363 regs = task_pt_regs(child);
364 pc = instruction_pointer(regs);
365
366 res = read_instr(child, pc, &insn);
367 if (!res) {
368 struct debug_info *dbg = &child->thread.debug;
369 unsigned long alt;
370
371 dbg->nsaved = 0;
372
373 alt = get_branch_address(child, pc, insn);
374 if (alt)
375 add_breakpoint(child, dbg, alt);
376
377 /*
378 * Note that we ignore the result of setting the above
379 * breakpoint since it may fail. When it does, this is
380 * not so much an error, but a forewarning that we may
381 * be receiving a prefetch abort shortly.
382 *
383 * If we don't set this breakpoint here, then we can
384 * lose control of the thread during single stepping.
385 */
386 if (!alt || predicate(insn) != PREDICATE_ALWAYS)
387 add_breakpoint(child, dbg, pc + 4);
388 }
389}
390
391/*
392 * Ensure no single-step breakpoint is pending. Returns non-zero
393 * value if child was being single-stepped.
394 */
395void ptrace_cancel_bpt(struct task_struct *child)
396{
397 int i, nsaved = child->thread.debug.nsaved;
398
399 child->thread.debug.nsaved = 0;
400
401 if (nsaved > 2) {
402 printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
403 nsaved = 2;
404 }
405
406 for (i = 0; i < nsaved; i++)
407 clear_breakpoint(child, &child->thread.debug.bp[i]);
408}
409
410/*
411 * Called by kernel/ptrace.c when detaching..
412 *
413 * Make sure the single step bit is not set.
414 */
415void ptrace_disable(struct task_struct *child)
416{
417 child->ptrace &= ~PT_SINGLESTEP;
418 ptrace_cancel_bpt(child);
419}
420
421/*
422 * Handle hitting a breakpoint.
423 */
424void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
425{
426 siginfo_t info;
427
428 /*
429 * The PC is always left pointing at the next instruction. Fix this.
430 */
431 regs->ARM_pc -= 4;
432
433 if (tsk->thread.debug.nsaved == 0)
434 printk(KERN_ERR "ptrace: bogus breakpoint trap\n");
435
436 ptrace_cancel_bpt(tsk);
437
438 info.si_signo = SIGTRAP;
439 info.si_errno = 0;
440 info.si_code = TRAP_BRKPT;
441 info.si_addr = (void *)instruction_pointer(regs) - 4;
442
443 force_sig_info(SIGTRAP, &info, tsk);
444}
445
446/*
447 * Read the word at offset "off" into the "struct user". We
448 * actually access the pt_regs stored on the kernel stack.
449 */
450static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
451 unsigned long *ret)
452{
453 unsigned long tmp;
454
455 if (off & 3 || off >= sizeof(struct user))
456 return -EIO;
457
458 tmp = 0;
459 if (off < sizeof(struct pt_regs))
460 tmp = get_user_reg(tsk, off >> 2);
461
462 return put_user(tmp, ret);
463}
464
465/*
466 * Write the word at offset "off" into "struct user". We
467 * actually access the pt_regs stored on the kernel stack.
468 */
469static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
470 unsigned long val)
471{
472 if (off & 3 || off >= sizeof(struct user))
473 return -EIO;
474
475 if (off >= sizeof(struct pt_regs))
476 return 0;
477
478 return put_user_reg(tsk, off >> 2, val);
479}
480
481/*
482 * Get all user integer registers.
483 */
484static int ptrace_getregs(struct task_struct *tsk, void *uregs)
485{
486 struct pt_regs *regs = task_pt_regs(tsk);
487
488 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
489}
490
491/*
492 * Set all user integer registers.
493 */
494static int ptrace_setregs(struct task_struct *tsk, void *uregs)
495{
496 struct pt_regs newregs;
497 int ret;
498
499 ret = -EFAULT;
500 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
501 struct pt_regs *regs = task_pt_regs(tsk);
502
503 ret = -EINVAL;
504 if (valid_user_regs(&newregs)) {
505 *regs = newregs;
506 ret = 0;
507 }
508 }
509
510 return ret;
511}
512
513/*
514 * Get the child FPU state.
515 */
516static int ptrace_getfpregs(struct task_struct *tsk, void *ufp)
517{
518 return copy_to_user(ufp, &task_thread_info(tsk)->fpstate,
519 sizeof(struct user_fp)) ? -EFAULT : 0;
520}
521
522/*
523 * Set the child FPU state.
524 */
525static int ptrace_setfpregs(struct task_struct *tsk, void *ufp)
526{
527 set_stopped_child_used_math(tsk);
528 return copy_from_user(&task_thread_info(tsk)->fpstate, ufp,
529 sizeof(struct user_fp)) ? -EFAULT : 0;
530}
531
532long arch_ptrace(struct task_struct *child, long request, long addr, long data)
533{
534 int ret;
535
536 switch (request) {
537 /*
538 * read word at location "addr" in the child process.
539 */
540 case PTRACE_PEEKTEXT:
541 case PTRACE_PEEKDATA:
542 ret = generic_ptrace_peekdata(child, addr, data);
543 break;
544
545 case PTRACE_PEEKUSR:
546 ret = ptrace_read_user(child, addr, (unsigned long *)data);
547 break;
548
549 /*
550 * write the word at location addr.
551 */
552 case PTRACE_POKETEXT:
553 case PTRACE_POKEDATA:
554 ret = generic_ptrace_pokedata(child, addr, data);
555 break;
556
557 case PTRACE_POKEUSR:
558 ret = ptrace_write_user(child, addr, data);
559 break;
560
561 /*
562 * continue/restart and stop at next (return from) syscall
563 */
564 case PTRACE_SYSCALL:
565 case PTRACE_CONT:
566 ret = -EIO;
567 if (!valid_signal(data))
568 break;
569 if (request == PTRACE_SYSCALL)
570 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
571 else
572 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
573 child->exit_code = data;
574 /* make sure single-step breakpoint is gone. */
575 child->ptrace &= ~PT_SINGLESTEP;
576 ptrace_cancel_bpt(child);
577 wake_up_process(child);
578 ret = 0;
579 break;
580
581 /*
582 * make the child exit. Best I can do is send it a sigkill.
583 * perhaps it should be put in the status that it wants to
584 * exit.
585 */
586 case PTRACE_KILL:
587 /* make sure single-step breakpoint is gone. */
588 child->ptrace &= ~PT_SINGLESTEP;
589 ptrace_cancel_bpt(child);
590 if (child->exit_state != EXIT_ZOMBIE) {
591 child->exit_code = SIGKILL;
592 wake_up_process(child);
593 }
594 ret = 0;
595 break;
596
597 /*
598 * execute single instruction.
599 */
600 case PTRACE_SINGLESTEP:
601 ret = -EIO;
602 if (!valid_signal(data))
603 break;
604 child->ptrace |= PT_SINGLESTEP;
605 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
606 child->exit_code = data;
607 /* give it a chance to run. */
608 wake_up_process(child);
609 ret = 0;
610 break;
611
612 case PTRACE_DETACH:
613 ret = ptrace_detach(child, data);
614 break;
615
616 case PTRACE_GETREGS:
617 ret = ptrace_getregs(child, (void *)data);
618 break;
619
620 case PTRACE_SETREGS:
621 ret = ptrace_setregs(child, (void *)data);
622 break;
623
624 case PTRACE_GETFPREGS:
625 ret = ptrace_getfpregs(child, (void *)data);
626 break;
627
628 case PTRACE_SETFPREGS:
629 ret = ptrace_setfpregs(child, (void *)data);
630 break;
631
632 default:
633 ret = ptrace_request(child, request, addr, data);
634 break;
635 }
636
637 return ret;
638}
639
640asmlinkage void syscall_trace(int why, struct pt_regs *regs)
641{
642 unsigned long ip;
643
644 if (!test_thread_flag(TIF_SYSCALL_TRACE))
645 return;
646 if (!(current->ptrace & PT_PTRACED))
647 return;
648
649 /*
650 * Save IP. IP is used to denote syscall entry/exit:
651 * IP = 0 -> entry, = 1 -> exit
652 */
653 ip = regs->ARM_ip;
654 regs->ARM_ip = why;
655
656 /* the 0x80 provides a way for the tracing parent to distinguish
657 between a syscall stop and SIGTRAP delivery */
658 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
659 ? 0x80 : 0));
660 /*
661 * this isn't the same as continuing with a signal, but it will do
662 * for normal use. strace only continues with a signal if the
663 * stopping signal is not SIGTRAP. -brl
664 */
665 if (current->exit_code) {
666 send_sig(current->exit_code, current, 1);
667 current->exit_code = 0;
668 }
669 regs->ARM_ip = ip;
670}
diff --git a/arch/arm26/kernel/ptrace.h b/arch/arm26/kernel/ptrace.h
deleted file mode 100644
index 846c9d8d36ed..000000000000
--- a/arch/arm26/kernel/ptrace.h
+++ /dev/null
@@ -1,13 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/ptrace.h
3 *
4 * Copyright (C) 2000-2003 Russell King
5 * Copyright (C) 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11extern void ptrace_cancel_bpt(struct task_struct *);
12extern void ptrace_set_bpt(struct task_struct *);
13extern void ptrace_break(struct task_struct *, struct pt_regs *);
diff --git a/arch/arm26/kernel/semaphore.c b/arch/arm26/kernel/semaphore.c
deleted file mode 100644
index 5447a06db3fa..000000000000
--- a/arch/arm26/kernel/semaphore.c
+++ /dev/null
@@ -1,222 +0,0 @@
1/*
2 * ARM semaphore implementation, taken from
3 *
4 * i386 semaphore implementation.
5 *
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 2003 Ian Molton (ARM26 mods)
8 *
9 * Modified for ARM by Russell King
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/errno.h>
18#include <linux/init.h>
19
20#include <asm/semaphore.h>
21
22/*
23 * Semaphores are implemented using a two-way counter:
24 * The "count" variable is decremented for each process
25 * that tries to acquire the semaphore, while the "sleeping"
26 * variable is a count of such acquires.
27 *
28 * Notably, the inline "up()" and "down()" functions can
29 * efficiently test if they need to do any extra work (up
30 * needs to do something only if count was negative before
31 * the increment operation.
32 *
33 * "sleeping" and the contention routine ordering is
34 * protected by the semaphore spinlock.
35 *
36 * Note that these functions are only called when there is
37 * contention on the lock, and as such all this is the
38 * "non-critical" part of the whole semaphore business. The
39 * critical part is the inline stuff in <asm/semaphore.h>
40 * where we want to avoid any extra jumps and calls.
41 */
42
43/*
44 * Logic:
45 * - only on a boundary condition do we need to care. When we go
46 * from a negative count to a non-negative, we wake people up.
47 * - when we go from a non-negative count to a negative do we
48 * (a) synchronize with the "sleeper" count and (b) make sure
49 * that we're on the wakeup list before we synchronize so that
50 * we cannot lose wakeup events.
51 */
52
53void __up(struct semaphore *sem)
54{
55 wake_up(&sem->wait);
56}
57
58static DEFINE_SPINLOCK(semaphore_lock);
59
60void __sched __down(struct semaphore * sem)
61{
62 struct task_struct *tsk = current;
63 DECLARE_WAITQUEUE(wait, tsk);
64 tsk->state = TASK_UNINTERRUPTIBLE;
65 add_wait_queue_exclusive(&sem->wait, &wait);
66
67 spin_lock_irq(&semaphore_lock);
68 sem->sleepers++;
69 for (;;) {
70 int sleepers = sem->sleepers;
71
72 /*
73 * Add "everybody else" into it. They aren't
74 * playing, because we own the spinlock.
75 */
76 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
77 sem->sleepers = 0;
78 break;
79 }
80 sem->sleepers = 1; /* us - see -1 above */
81 spin_unlock_irq(&semaphore_lock);
82
83 schedule();
84 tsk->state = TASK_UNINTERRUPTIBLE;
85 spin_lock_irq(&semaphore_lock);
86 }
87 spin_unlock_irq(&semaphore_lock);
88 remove_wait_queue(&sem->wait, &wait);
89 tsk->state = TASK_RUNNING;
90 wake_up(&sem->wait);
91}
92
93int __sched __down_interruptible(struct semaphore * sem)
94{
95 int retval = 0;
96 struct task_struct *tsk = current;
97 DECLARE_WAITQUEUE(wait, tsk);
98 tsk->state = TASK_INTERRUPTIBLE;
99 add_wait_queue_exclusive(&sem->wait, &wait);
100
101 spin_lock_irq(&semaphore_lock);
102 sem->sleepers ++;
103 for (;;) {
104 int sleepers = sem->sleepers;
105
106 /*
107 * With signals pending, this turns into
108 * the trylock failure case - we won't be
109 * sleeping, and we* can't get the lock as
110 * it has contention. Just correct the count
111 * and exit.
112 */
113 if (signal_pending(current)) {
114 retval = -EINTR;
115 sem->sleepers = 0;
116 atomic_add(sleepers, &sem->count);
117 break;
118 }
119
120 /*
121 * Add "everybody else" into it. They aren't
122 * playing, because we own the spinlock. The
123 * "-1" is because we're still hoping to get
124 * the lock.
125 */
126 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
127 sem->sleepers = 0;
128 break;
129 }
130 sem->sleepers = 1; /* us - see -1 above */
131 spin_unlock_irq(&semaphore_lock);
132
133 schedule();
134 tsk->state = TASK_INTERRUPTIBLE;
135 spin_lock_irq(&semaphore_lock);
136 }
137 spin_unlock_irq(&semaphore_lock);
138 tsk->state = TASK_RUNNING;
139 remove_wait_queue(&sem->wait, &wait);
140 wake_up(&sem->wait);
141 return retval;
142}
143
144/*
145 * Trylock failed - make sure we correct for
146 * having decremented the count.
147 *
148 * We could have done the trylock with a
149 * single "cmpxchg" without failure cases,
150 * but then it wouldn't work on a 386.
151 */
152int __down_trylock(struct semaphore * sem)
153{
154 int sleepers;
155 unsigned long flags;
156
157 spin_lock_irqsave(&semaphore_lock, flags);
158 sleepers = sem->sleepers + 1;
159 sem->sleepers = 0;
160
161 /*
162 * Add "everybody else" and us into it. They aren't
163 * playing, because we own the spinlock.
164 */
165 if (!atomic_add_negative(sleepers, &sem->count))
166 wake_up(&sem->wait);
167
168 spin_unlock_irqrestore(&semaphore_lock, flags);
169 return 1;
170}
171
172/*
173 * The semaphore operations have a special calling sequence that
174 * allow us to do a simpler in-line version of them. These routines
175 * need to convert that sequence back into the C sequence when
176 * there is contention on the semaphore.
177 *
178 * ip contains the semaphore pointer on entry. Save the C-clobbered
179 * registers (r0 to r3 and lr), but not ip, as we use it as a return
180 * value in some cases..
181 */
182asm(" .section .sched.text , #alloc, #execinstr \n\
183 .align 5 \n\
184 .globl __down_failed \n\
185__down_failed: \n\
186 stmfd sp!, {r0 - r3, lr} \n\
187 mov r0, ip \n\
188 bl __down \n\
189 ldmfd sp!, {r0 - r3, pc}^ \n\
190 \n\
191 .align 5 \n\
192 .globl __down_interruptible_failed \n\
193__down_interruptible_failed: \n\
194 stmfd sp!, {r0 - r3, lr} \n\
195 mov r0, ip \n\
196 bl __down_interruptible \n\
197 mov ip, r0 \n\
198 ldmfd sp!, {r0 - r3, pc}^ \n\
199 \n\
200 .align 5 \n\
201 .globl __down_trylock_failed \n\
202__down_trylock_failed: \n\
203 stmfd sp!, {r0 - r3, lr} \n\
204 mov r0, ip \n\
205 bl __down_trylock \n\
206 mov ip, r0 \n\
207 ldmfd sp!, {r0 - r3, pc}^ \n\
208 \n\
209 .align 5 \n\
210 .globl __up_wakeup \n\
211__up_wakeup: \n\
212 stmfd sp!, {r0 - r3, lr} \n\
213 mov r0, ip \n\
214 bl __up \n\
215 ldmfd sp!, {r0 - r3, pc}^ \n\
216 ");
217
218EXPORT_SYMBOL(__down_failed);
219EXPORT_SYMBOL(__down_interruptible_failed);
220EXPORT_SYMBOL(__down_trylock_failed);
221EXPORT_SYMBOL(__up_wakeup);
222
diff --git a/arch/arm26/kernel/setup.c b/arch/arm26/kernel/setup.c
deleted file mode 100644
index 0e006c6cd5a0..000000000000
--- a/arch/arm26/kernel/setup.c
+++ /dev/null
@@ -1,572 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/blkdev.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
20#include <linux/screen_info.h>
21#include <linux/init.h>
22#include <linux/root_dev.h>
23
24#include <asm/elf.h>
25#include <asm/hardware.h>
26#include <asm/io.h>
27#include <asm/procinfo.h>
28#include <asm/setup.h>
29#include <asm/mach-types.h>
30#include <asm/tlbflush.h>
31
32#include <asm/irqchip.h>
33
34#ifndef MEM_SIZE
35#define MEM_SIZE (16*1024*1024)
36#endif
37
38#ifdef CONFIG_PREEMPT
39DEFINE_SPINLOCK(kernel_flag);
40#endif
41
42#if defined(CONFIG_FPE_NWFPE)
43char fpe_type[8];
44
45static int __init fpe_setup(char *line)
46{
47 memcpy(fpe_type, line, 8);
48 return 1;
49}
50
51__setup("fpe=", fpe_setup);
52#endif
53
54extern void paging_init(struct meminfo *);
55extern void convert_to_tag_list(struct tag *tags);
56extern void squash_mem_tags(struct tag *tag);
57extern void bootmem_init(struct meminfo *);
58extern int root_mountflags;
59extern int _stext, _text, _etext, _edata, _end;
60#ifdef CONFIG_XIP_KERNEL
61extern int _endtext, _sdata;
62#endif
63
64
65unsigned int processor_id;
66unsigned int __machine_arch_type;
67unsigned int system_rev;
68unsigned int system_serial_low;
69unsigned int system_serial_high;
70unsigned int elf_hwcap;
71unsigned int memc_ctrl_reg;
72unsigned int number_mfm_drives;
73
74struct processor processor;
75
76char elf_platform[ELF_PLATFORM_SIZE];
77
78unsigned long phys_initrd_start __initdata = 0;
79unsigned long phys_initrd_size __initdata = 0;
80static struct meminfo meminfo __initdata = { 0, };
81static struct proc_info_item proc_info;
82static const char *machine_name;
83static char __initdata command_line[COMMAND_LINE_SIZE];
84
85static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
86
87/*
88 * Standard memory resources
89 */
90static struct resource mem_res[] = {
91 { "Video RAM", 0, 0, IORESOURCE_MEM },
92 { "Kernel code", 0, 0, IORESOURCE_MEM },
93 { "Kernel data", 0, 0, IORESOURCE_MEM }
94};
95
96#define video_ram mem_res[0]
97#define kernel_code mem_res[1]
98#define kernel_data mem_res[2]
99
100static struct resource io_res[] = {
101 { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
102 { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
103 { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
104};
105
106#define lp0 io_res[0]
107#define lp1 io_res[1]
108#define lp2 io_res[2]
109
110#define dump_cpu_info() do { } while (0)
111
112static void __init setup_processor(void)
113{
114 extern struct proc_info_list __proc_info_begin, __proc_info_end;
115 struct proc_info_list *list;
116
117 /*
118 * locate processor in the list of supported processor
119 * types. The linker builds this table for us from the
120 * entries in arch/arm26/mm/proc-*.S
121 */
122 for (list = &__proc_info_begin; list < &__proc_info_end ; list++)
123 if ((processor_id & list->cpu_mask) == list->cpu_val)
124 break;
125
126 /*
127 * If processor type is unrecognised, then we
128 * can do nothing...
129 */
130 if (list >= &__proc_info_end) {
131 printk("CPU configuration botched (ID %08x), unable "
132 "to continue.\n", processor_id);
133 while (1);
134 }
135
136 proc_info = *list->info;
137 processor = *list->proc;
138
139
140 printk("CPU: %s %s revision %d\n",
141 proc_info.manufacturer, proc_info.cpu_name,
142 (int)processor_id & 15);
143
144 dump_cpu_info();
145
146 sprintf(init_utsname()->machine, "%s", list->arch_name);
147 sprintf(elf_platform, "%s", list->elf_name);
148 elf_hwcap = list->elf_hwcap;
149
150 cpu_proc_init();
151}
152
153/*
154 * Initial parsing of the command line. We need to pick out the
155 * memory size. We look for mem=size@start, where start and size
156 * are "size[KkMm]"
157 */
158static void __init
159parse_cmdline(struct meminfo *mi, char **cmdline_p, char *from)
160{
161 char c = ' ', *to = command_line;
162 int usermem = 0, len = 0;
163
164 for (;;) {
165 if (c == ' ' && !memcmp(from, "mem=", 4)) {
166 unsigned long size, start;
167
168 if (to != command_line)
169 to -= 1;
170
171 /*
172 * If the user specifies memory size, we
173 * blow away any automatically generated
174 * size.
175 */
176 if (usermem == 0) {
177 usermem = 1;
178 mi->nr_banks = 0;
179 }
180
181 start = PHYS_OFFSET;
182 size = memparse(from + 4, &from);
183 if (*from == '@')
184 start = memparse(from + 1, &from);
185
186 mi->bank[mi->nr_banks].start = start;
187 mi->bank[mi->nr_banks].size = size;
188 mi->bank[mi->nr_banks].node = PHYS_TO_NID(start);
189 mi->nr_banks += 1;
190 }
191 c = *from++;
192 if (!c)
193 break;
194 if (COMMAND_LINE_SIZE <= ++len)
195 break;
196 *to++ = c;
197 }
198 *to = '\0';
199 *cmdline_p = command_line;
200}
201
202static void __init
203setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
204{
205#ifdef CONFIG_BLK_DEV_RAM
206 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
207
208 rd_image_start = image_start;
209 rd_prompt = prompt;
210 rd_doload = doload;
211
212 if (rd_sz)
213 rd_size = rd_sz;
214#endif
215}
216
217static void __init
218request_standard_resources(struct meminfo *mi)
219{
220 struct resource *res;
221 int i;
222
223 kernel_code.start = init_mm.start_code;
224 kernel_code.end = init_mm.end_code - 1;
225#ifdef CONFIG_XIP_KERNEL
226 kernel_data.start = init_mm.start_data;
227#else
228 kernel_data.start = init_mm.end_code;
229#endif
230 kernel_data.end = init_mm.brk - 1;
231
232 for (i = 0; i < mi->nr_banks; i++) {
233 unsigned long virt_start, virt_end;
234
235 if (mi->bank[i].size == 0)
236 continue;
237
238 virt_start = mi->bank[i].start;
239 virt_end = virt_start + mi->bank[i].size - 1;
240
241 res = alloc_bootmem_low(sizeof(*res));
242 res->name = "System RAM";
243 res->start = virt_start;
244 res->end = virt_end;
245 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
246
247 request_resource(&iomem_resource, res);
248
249 if (kernel_code.start >= res->start &&
250 kernel_code.end <= res->end)
251 request_resource(res, &kernel_code);
252 if (kernel_data.start >= res->start &&
253 kernel_data.end <= res->end)
254 request_resource(res, &kernel_data);
255 }
256
257/* FIXME - needed? if (mdesc->video_start) {
258 video_ram.start = mdesc->video_start;
259 video_ram.end = mdesc->video_end;
260 request_resource(&iomem_resource, &video_ram);
261 }*/
262
263 /*
264 * Some machines don't have the possibility of ever
265 * possessing lp1 or lp2
266 */
267 if (0) /* FIXME - need to do this for A5k at least */
268 request_resource(&ioport_resource, &lp0);
269}
270
271/*
272 * Tag parsing.
273 *
274 * This is the new way of passing data to the kernel at boot time. Rather
275 * than passing a fixed inflexible structure to the kernel, we pass a list
276 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
277 * tag for the list to be recognised (to distinguish the tagged list from
278 * a param_struct). The list is terminated with a zero-length tag (this tag
279 * is not parsed in any way).
280 */
281static int __init parse_tag_core(const struct tag *tag)
282{
283 if (tag->hdr.size > 2) {
284 if ((tag->u.core.flags & 1) == 0)
285 root_mountflags &= ~MS_RDONLY;
286 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
287 }
288 return 0;
289}
290
291__tagtable(ATAG_CORE, parse_tag_core);
292
293static int __init parse_tag_mem32(const struct tag *tag)
294{
295 if (meminfo.nr_banks >= NR_BANKS) {
296 printk(KERN_WARNING
297 "Ignoring memory bank 0x%08x size %dKB\n",
298 tag->u.mem.start, tag->u.mem.size / 1024);
299 return -EINVAL;
300 }
301 meminfo.bank[meminfo.nr_banks].start = tag->u.mem.start;
302 meminfo.bank[meminfo.nr_banks].size = tag->u.mem.size;
303 meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(tag->u.mem.start);
304 meminfo.nr_banks += 1;
305
306 return 0;
307}
308
309__tagtable(ATAG_MEM, parse_tag_mem32);
310
311#if defined(CONFIG_DUMMY_CONSOLE)
312struct screen_info screen_info = {
313 .orig_video_lines = 30,
314 .orig_video_cols = 80,
315 .orig_video_mode = 0,
316 .orig_video_ega_bx = 0,
317 .orig_video_isVGA = 1,
318 .orig_video_points = 8
319};
320
321static int __init parse_tag_videotext(const struct tag *tag)
322{
323 screen_info.orig_x = tag->u.videotext.x;
324 screen_info.orig_y = tag->u.videotext.y;
325 screen_info.orig_video_page = tag->u.videotext.video_page;
326 screen_info.orig_video_mode = tag->u.videotext.video_mode;
327 screen_info.orig_video_cols = tag->u.videotext.video_cols;
328 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
329 screen_info.orig_video_lines = tag->u.videotext.video_lines;
330 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
331 screen_info.orig_video_points = tag->u.videotext.video_points;
332 return 0;
333}
334
335__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
336#endif
337
338static int __init parse_tag_acorn(const struct tag *tag)
339{
340 memc_ctrl_reg = tag->u.acorn.memc_control_reg;
341 number_mfm_drives = tag->u.acorn.adfsdrives;
342 return 0;
343}
344
345__tagtable(ATAG_ACORN, parse_tag_acorn);
346
347static int __init parse_tag_ramdisk(const struct tag *tag)
348{
349 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
350 (tag->u.ramdisk.flags & 2) == 0,
351 tag->u.ramdisk.start, tag->u.ramdisk.size);
352 return 0;
353}
354
355__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
356
357static int __init parse_tag_initrd(const struct tag *tag)
358{
359 printk(KERN_WARNING "ATAG_INITRD is deprecated; please update your bootloader. \n");
360 phys_initrd_start = (unsigned long)tag->u.initrd.start;
361 phys_initrd_size = (unsigned long)tag->u.initrd.size;
362 return 0;
363}
364
365__tagtable(ATAG_INITRD, parse_tag_initrd);
366
367static int __init parse_tag_initrd2(const struct tag *tag)
368{
369 printk(KERN_WARNING "ATAG_INITRD is deprecated; please update your bootloader. \n");
370 phys_initrd_start = (unsigned long)tag->u.initrd.start;
371 phys_initrd_size = (unsigned long)tag->u.initrd.size;
372 return 0;
373}
374
375__tagtable(ATAG_INITRD2, parse_tag_initrd2);
376
377static int __init parse_tag_serialnr(const struct tag *tag)
378{
379 system_serial_low = tag->u.serialnr.low;
380 system_serial_high = tag->u.serialnr.high;
381 return 0;
382}
383
384__tagtable(ATAG_SERIAL, parse_tag_serialnr);
385
386static int __init parse_tag_revision(const struct tag *tag)
387{
388 system_rev = tag->u.revision.rev;
389 return 0;
390}
391
392__tagtable(ATAG_REVISION, parse_tag_revision);
393
394static int __init parse_tag_cmdline(const struct tag *tag)
395{
396 strncpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
397 default_command_line[COMMAND_LINE_SIZE - 1] = '\0';
398 return 0;
399}
400
401__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
402
403/*
404 * Scan the tag table for this tag, and call its parse function.
405 * The tag table is built by the linker from all the __tagtable
406 * declarations.
407 */
408static int __init parse_tag(const struct tag *tag)
409{
410 extern struct tagtable __tagtable_begin, __tagtable_end;
411 struct tagtable *t;
412
413 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
414 if (tag->hdr.tag == t->tag) {
415 t->parse(tag);
416 break;
417 }
418
419 return t < &__tagtable_end;
420}
421
422/*
423 * Parse all tags in the list, checking both the global and architecture
424 * specific tag tables.
425 */
426static void __init parse_tags(const struct tag *t)
427{
428 for (; t->hdr.size; t = tag_next(t))
429 if (!parse_tag(t))
430 printk(KERN_WARNING
431 "Ignoring unrecognised tag 0x%08x\n",
432 t->hdr.tag);
433}
434
435/*
436 * This holds our defaults.
437 */
438static struct init_tags {
439 struct tag_header hdr1;
440 struct tag_core core;
441 struct tag_header hdr2;
442 struct tag_mem32 mem;
443 struct tag_header hdr3;
444} init_tags __initdata = {
445 { tag_size(tag_core), ATAG_CORE },
446 { 1, PAGE_SIZE, 0xff },
447 { tag_size(tag_mem32), ATAG_MEM },
448 { MEM_SIZE, PHYS_OFFSET },
449 { 0, ATAG_NONE }
450};
451
452void __init setup_arch(char **cmdline_p)
453{
454 struct tag *tags = (struct tag *)&init_tags;
455 char *from = default_command_line;
456
457 setup_processor();
458 if(machine_arch_type == MACH_TYPE_A5K)
459 machine_name = "A5000";
460 else if(machine_arch_type == MACH_TYPE_ARCHIMEDES)
461 machine_name = "Archimedes";
462 else
463 machine_name = "UNKNOWN";
464
465 //FIXME - the tag struct is always copied here but this is a block
466 // of RAM that is accidentally reserved along with video RAM. perhaps
467 // it would be a good idea to explicitly reserve this?
468
469 tags = (struct tag *)0x0207c000;
470
471 /*
472 * If we have the old style parameters, convert them to
473 * a tag list.
474 */
475 if (tags->hdr.tag != ATAG_CORE)
476 convert_to_tag_list(tags);
477 if (tags->hdr.tag != ATAG_CORE)
478 tags = (struct tag *)&init_tags;
479 if (tags->hdr.tag == ATAG_CORE) {
480 if (meminfo.nr_banks != 0)
481 squash_mem_tags(tags);
482 parse_tags(tags);
483 }
484
485 init_mm.start_code = (unsigned long) &_text;
486#ifndef CONFIG_XIP_KERNEL
487 init_mm.end_code = (unsigned long) &_etext;
488#else
489 init_mm.end_code = (unsigned long) &_endtext;
490 init_mm.start_data = (unsigned long) &_sdata;
491#endif
492 init_mm.end_data = (unsigned long) &_edata;
493 init_mm.brk = (unsigned long) &_end;
494
495 memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
496 boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
497 parse_cmdline(&meminfo, cmdline_p, from);
498 bootmem_init(&meminfo);
499 paging_init(&meminfo);
500 request_standard_resources(&meminfo);
501
502#ifdef CONFIG_VT
503#if defined(CONFIG_DUMMY_CONSOLE)
504 conswitchp = &dummy_con;
505#endif
506#endif
507}
508
509static const char *hwcap_str[] = {
510 "swp",
511 "half",
512 "thumb",
513 "26bit",
514 "fastmult",
515 "fpa",
516 "vfp",
517 "edsp",
518 NULL
519};
520
521static int c_show(struct seq_file *m, void *v)
522{
523 int i;
524
525 seq_printf(m, "Processor\t: %s %s rev %d (%s)\n",
526 proc_info.manufacturer, proc_info.cpu_name,
527 (int)processor_id & 15, elf_platform);
528
529 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
530 loops_per_jiffy / (500000/HZ),
531 (loops_per_jiffy / (5000/HZ)) % 100);
532
533 /* dump out the processor features */
534 seq_puts(m, "Features\t: ");
535
536 for (i = 0; hwcap_str[i]; i++)
537 if (elf_hwcap & (1 << i))
538 seq_printf(m, "%s ", hwcap_str[i]);
539
540 seq_puts(m, "\n");
541
542 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
543 seq_printf(m, "CPU revision\t: %d\n\n", processor_id & 15);
544 seq_printf(m, "Hardware\t: %s\n", machine_name);
545 seq_printf(m, "Revision\t: %04x\n", system_rev);
546 seq_printf(m, "Serial\t\t: %08x%08x\n",
547 system_serial_high, system_serial_low);
548
549 return 0;
550}
551
552static void *c_start(struct seq_file *m, loff_t *pos)
553{
554 return *pos < 1 ? (void *)1 : NULL;
555}
556
557static void *c_next(struct seq_file *m, void *v, loff_t *pos)
558{
559 ++*pos;
560 return NULL;
561}
562
563static void c_stop(struct seq_file *m, void *v)
564{
565}
566
567struct seq_operations cpuinfo_op = {
568 .start = c_start,
569 .next = c_next,
570 .stop = c_stop,
571 .show = c_show
572};
diff --git a/arch/arm26/kernel/signal.c b/arch/arm26/kernel/signal.c
deleted file mode 100644
index 379b82dc645f..000000000000
--- a/arch/arm26/kernel/signal.c
+++ /dev/null
@@ -1,538 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/signal.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 * Copyright (C) 2003 Ian Molton (ARM26)
6 *
7 * FIXME!!! This is probably very broken (13/05/2003)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/smp.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/signal.h>
19#include <linux/wait.h>
20#include <linux/ptrace.h>
21#include <linux/personality.h>
22#include <linux/tty.h>
23#include <linux/binfmts.h>
24#include <linux/elf.h>
25
26#include <asm/pgalloc.h>
27#include <asm/ucontext.h>
28#include <asm/uaccess.h>
29#include <asm/unistd.h>
30
31#include "ptrace.h"
32
33#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
34
35/*
36 * For ARM syscalls, we encode the syscall number into the instruction.
37 */
38#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn))
39#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn))
40
41static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
42
43/*
44 * atomically swap in the new signal mask, and wait for a signal.
45 */
46asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
47{
48 sigset_t saveset;
49
50 mask &= _BLOCKABLE;
51 spin_lock_irq(&current->sighand->siglock);
52 saveset = current->blocked;
53 siginitset(&current->blocked, mask);
54 recalc_sigpending();
55 spin_unlock_irq(&current->sighand->siglock);
56 regs->ARM_r0 = -EINTR;
57
58 while (1) {
59 current->state = TASK_INTERRUPTIBLE;
60 schedule();
61 if (do_signal(&saveset, regs, 0))
62 return regs->ARM_r0;
63 }
64}
65
66asmlinkage int
67sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, struct pt_regs *regs)
68{
69 sigset_t saveset, newset;
70
71 /* XXX: Don't preclude handling different sized sigset_t's. */
72 if (sigsetsize != sizeof(sigset_t))
73 return -EINVAL;
74
75 if (copy_from_user(&newset, unewset, sizeof(newset)))
76 return -EFAULT;
77 sigdelsetmask(&newset, ~_BLOCKABLE);
78
79 spin_lock_irq(&current->sighand->siglock);
80 saveset = current->blocked;
81 current->blocked = newset;
82 recalc_sigpending();
83 spin_unlock_irq(&current->sighand->siglock);
84 regs->ARM_r0 = -EINTR;
85
86 while (1) {
87 current->state = TASK_INTERRUPTIBLE;
88 schedule();
89 if (do_signal(&saveset, regs, 0))
90 return regs->ARM_r0;
91 }
92}
93
94asmlinkage int
95sys_sigaction(int sig, const struct old_sigaction *act,
96 struct old_sigaction *oact)
97{
98 struct k_sigaction new_ka, old_ka;
99 int ret;
100
101 if (act) {
102 old_sigset_t mask;
103 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
104 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
105 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
106 return -EFAULT;
107 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
108 __get_user(mask, &act->sa_mask);
109 siginitset(&new_ka.sa.sa_mask, mask);
110 }
111
112 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
113
114 if (!ret && oact) {
115 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
116 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
117 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
118 return -EFAULT;
119 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
120 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
121 }
122
123 return ret;
124}
125
126/*
127 * Do a signal return; undo the signal stack.
128 */
129struct sigframe
130{
131 struct sigcontext sc;
132 unsigned long extramask[_NSIG_WORDS-1];
133 unsigned long retcode;
134};
135
136struct rt_sigframe
137{
138 struct siginfo *pinfo;
139 void *puc;
140 struct siginfo info;
141 struct ucontext uc;
142 unsigned long retcode;
143};
144
145static int
146restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
147{
148 int err = 0;
149
150 __get_user_error(regs->ARM_r0, &sc->arm_r0, err);
151 __get_user_error(regs->ARM_r1, &sc->arm_r1, err);
152 __get_user_error(regs->ARM_r2, &sc->arm_r2, err);
153 __get_user_error(regs->ARM_r3, &sc->arm_r3, err);
154 __get_user_error(regs->ARM_r4, &sc->arm_r4, err);
155 __get_user_error(regs->ARM_r5, &sc->arm_r5, err);
156 __get_user_error(regs->ARM_r6, &sc->arm_r6, err);
157 __get_user_error(regs->ARM_r7, &sc->arm_r7, err);
158 __get_user_error(regs->ARM_r8, &sc->arm_r8, err);
159 __get_user_error(regs->ARM_r9, &sc->arm_r9, err);
160 __get_user_error(regs->ARM_r10, &sc->arm_r10, err);
161 __get_user_error(regs->ARM_fp, &sc->arm_fp, err);
162 __get_user_error(regs->ARM_ip, &sc->arm_ip, err);
163 __get_user_error(regs->ARM_sp, &sc->arm_sp, err);
164 __get_user_error(regs->ARM_lr, &sc->arm_lr, err);
165 __get_user_error(regs->ARM_pc, &sc->arm_pc, err);
166
167 err |= !valid_user_regs(regs);
168
169 return err;
170}
171
172asmlinkage int sys_sigreturn(struct pt_regs *regs)
173{
174 struct sigframe *frame;
175 sigset_t set;
176
177 /*
178 * Since we stacked the signal on a 64-bit boundary,
179 * then 'sp' should be word aligned here. If it's
180 * not, then the user is trying to mess with us.
181 */
182 if (regs->ARM_sp & 7)
183 goto badframe;
184
185 frame = (struct sigframe *)regs->ARM_sp;
186
187 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
188 goto badframe;
189 if (__get_user(set.sig[0], &frame->sc.oldmask)
190 || (_NSIG_WORDS > 1
191 && __copy_from_user(&set.sig[1], &frame->extramask,
192 sizeof(frame->extramask))))
193 goto badframe;
194
195 sigdelsetmask(&set, ~_BLOCKABLE);
196 spin_lock_irq(&current->sighand->siglock);
197 current->blocked = set;
198 recalc_sigpending();
199 spin_unlock_irq(&current->sighand->siglock);
200
201 if (restore_sigcontext(regs, &frame->sc))
202 goto badframe;
203
204 /* Send SIGTRAP if we're single-stepping */
205 if (current->ptrace & PT_SINGLESTEP) {
206 ptrace_cancel_bpt(current);
207 send_sig(SIGTRAP, current, 1);
208 }
209
210 return regs->ARM_r0;
211
212badframe:
213 force_sig(SIGSEGV, current);
214 return 0;
215}
216
217asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
218{
219 struct rt_sigframe *frame;
220 sigset_t set;
221
222 /*
223 * Since we stacked the signal on a 64-bit boundary,
224 * then 'sp' should be word aligned here. If it's
225 * not, then the user is trying to mess with us.
226 */
227 if (regs->ARM_sp & 7)
228 goto badframe;
229
230 frame = (struct rt_sigframe *)regs->ARM_sp;
231
232 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
233 goto badframe;
234 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
235 goto badframe;
236
237 sigdelsetmask(&set, ~_BLOCKABLE);
238 spin_lock_irq(&current->sighand->siglock);
239 current->blocked = set;
240 recalc_sigpending();
241 spin_unlock_irq(&current->sighand->siglock);
242
243 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
244 goto badframe;
245
246 /* Send SIGTRAP if we're single-stepping */
247 if (current->ptrace & PT_SINGLESTEP) {
248 ptrace_cancel_bpt(current);
249 send_sig(SIGTRAP, current, 1);
250 }
251
252 return regs->ARM_r0;
253
254badframe:
255 force_sig(SIGSEGV, current);
256 return 0;
257}
258
259static int
260setup_sigcontext(struct sigcontext *sc, /*struct _fpstate *fpstate,*/
261 struct pt_regs *regs, unsigned long mask)
262{
263 int err = 0;
264
265 __put_user_error(regs->ARM_r0, &sc->arm_r0, err);
266 __put_user_error(regs->ARM_r1, &sc->arm_r1, err);
267 __put_user_error(regs->ARM_r2, &sc->arm_r2, err);
268 __put_user_error(regs->ARM_r3, &sc->arm_r3, err);
269 __put_user_error(regs->ARM_r4, &sc->arm_r4, err);
270 __put_user_error(regs->ARM_r5, &sc->arm_r5, err);
271 __put_user_error(regs->ARM_r6, &sc->arm_r6, err);
272 __put_user_error(regs->ARM_r7, &sc->arm_r7, err);
273 __put_user_error(regs->ARM_r8, &sc->arm_r8, err);
274 __put_user_error(regs->ARM_r9, &sc->arm_r9, err);
275 __put_user_error(regs->ARM_r10, &sc->arm_r10, err);
276 __put_user_error(regs->ARM_fp, &sc->arm_fp, err);
277 __put_user_error(regs->ARM_ip, &sc->arm_ip, err);
278 __put_user_error(regs->ARM_sp, &sc->arm_sp, err);
279 __put_user_error(regs->ARM_lr, &sc->arm_lr, err);
280 __put_user_error(regs->ARM_pc, &sc->arm_pc, err);
281
282 __put_user_error(current->thread.trap_no, &sc->trap_no, err);
283 __put_user_error(current->thread.error_code, &sc->error_code, err);
284 __put_user_error(current->thread.address, &sc->fault_address, err);
285 __put_user_error(mask, &sc->oldmask, err);
286
287 return err;
288}
289
290static inline void *
291get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
292{
293 unsigned long sp = regs->ARM_sp;
294
295 /*
296 * This is the X/Open sanctioned signal stack switching.
297 */
298 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
299 sp = current->sas_ss_sp + current->sas_ss_size;
300
301 /*
302 * ATPCS B01 mandates 8-byte alignment
303 */
304 return (void *)((sp - framesize) & ~7);
305}
306
307static int
308setup_return(struct pt_regs *regs, struct k_sigaction *ka,
309 unsigned long *rc, void *frame, int usig)
310{
311 unsigned long handler = (unsigned long)ka->sa.sa_handler;
312 unsigned long retcode;
313
314 if (ka->sa.sa_flags & SA_RESTORER) {
315 retcode = (unsigned long)ka->sa.sa_restorer;
316 } else {
317
318 if (__put_user((ka->sa.sa_flags & SA_SIGINFO)?SWI_SYS_RT_SIGRETURN:SWI_SYS_SIGRETURN, rc))
319 return 1;
320
321 retcode = ((unsigned long)rc);
322 }
323
324 regs->ARM_r0 = usig;
325 regs->ARM_sp = (unsigned long)frame;
326 regs->ARM_lr = retcode;
327 regs->ARM_pc = handler & ~3;
328
329 return 0;
330}
331
332static int
333setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs)
334{
335 struct sigframe *frame = get_sigframe(ka, regs, sizeof(*frame));
336 int err = 0;
337
338 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
339 return 1;
340
341 err |= setup_sigcontext(&frame->sc, /*&frame->fpstate,*/ regs, set->sig[0]);
342
343 if (_NSIG_WORDS > 1) {
344 err |= __copy_to_user(frame->extramask, &set->sig[1],
345 sizeof(frame->extramask));
346 }
347
348 if (err == 0)
349 err = setup_return(regs, ka, &frame->retcode, frame, usig);
350
351 return err;
352}
353
354static int
355setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
356 sigset_t *set, struct pt_regs *regs)
357{
358 struct rt_sigframe *frame = get_sigframe(ka, regs, sizeof(*frame));
359 int err = 0;
360
361 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
362 return 1;
363
364 __put_user_error(&frame->info, &frame->pinfo, err);
365 __put_user_error(&frame->uc, &frame->puc, err);
366 err |= copy_siginfo_to_user(&frame->info, info);
367
368 /* Clear all the bits of the ucontext we don't use. */
369 err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
370
371 err |= setup_sigcontext(&frame->uc.uc_mcontext, /*&frame->fpstate,*/
372 regs, set->sig[0]);
373 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
374
375 if (err == 0)
376 err = setup_return(regs, ka, &frame->retcode, frame, usig);
377
378 if (err == 0) {
379 /*
380 * For realtime signals we must also set the second and third
381 * arguments for the signal handler.
382 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
383 */
384 regs->ARM_r1 = (unsigned long)frame->pinfo;
385 regs->ARM_r2 = (unsigned long)frame->puc;
386 }
387
388 return err;
389}
390
391static inline void restart_syscall(struct pt_regs *regs)
392{
393 regs->ARM_r0 = regs->ARM_ORIG_r0;
394 regs->ARM_pc -= 4;
395}
396
397/*
398 * OK, we're invoking a handler
399 */
400static void
401handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
402 struct pt_regs * regs, int syscall)
403{
404 struct thread_info *thread = current_thread_info();
405 struct task_struct *tsk = current;
406 struct k_sigaction *ka = &tsk->sighand->action[sig-1];
407 int usig = sig;
408 int ret;
409
410 /*
411 * If we were from a system call, check for system call restarting...
412 */
413 if (syscall) {
414 switch (regs->ARM_r0) {
415 case -ERESTART_RESTARTBLOCK:
416 current_thread_info()->restart_block.fn =
417 do_no_restart_syscall;
418 case -ERESTARTNOHAND:
419 regs->ARM_r0 = -EINTR;
420 break;
421 case -ERESTARTSYS:
422 if (!(ka->sa.sa_flags & SA_RESTART)) {
423 regs->ARM_r0 = -EINTR;
424 break;
425 }
426 /* fallthrough */
427 case -ERESTARTNOINTR:
428 restart_syscall(regs);
429 }
430 }
431
432 /*
433 * translate the signal
434 */
435 if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
436 usig = thread->exec_domain->signal_invmap[usig];
437
438 /*
439 * Set up the stack frame
440 */
441 if (ka->sa.sa_flags & SA_SIGINFO)
442 ret = setup_rt_frame(usig, ka, info, oldset, regs);
443 else
444 ret = setup_frame(usig, ka, oldset, regs);
445
446 /*
447 * Check that the resulting registers are actually sane.
448 */
449 ret |= !valid_user_regs(regs);
450
451 if (ret == 0) {
452 if (ka->sa.sa_flags & SA_ONESHOT)
453 ka->sa.sa_handler = SIG_DFL;
454
455 spin_lock_irq(&tsk->sighand->siglock);
456 sigorsets(&tsk->blocked, &tsk->blocked,
457 &ka->sa.sa_mask);
458 if (!(ka->sa.sa_flags & SA_NODEFER))
459 sigaddset(&tsk->blocked, sig);
460 recalc_sigpending();
461 spin_unlock_irq(&tsk->sighand->siglock);
462 return;
463 }
464
465 force_sigsegv(sig, tsk);
466}
467
468/*
469 * Note that 'init' is a special process: it doesn't get signals it doesn't
470 * want to handle. Thus you cannot kill init even with a SIGKILL even by
471 * mistake.
472 *
473 * Note that we go through the signals twice: once to check the signals that
474 * the kernel can handle, and then we build all the user-level signal handling
475 * stack-frames in one go after that.
476 */
477static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
478{
479 siginfo_t info;
480 int signr;
481 struct k_sigaction ka;
482
483 /*
484 * We want the common case to go fast, which
485 * is why we may in certain cases get here from
486 * kernel mode. Just return without doing anything
487 * if so.
488 */
489 if (!user_mode(regs))
490 return 0;
491
492 if (current->ptrace & PT_SINGLESTEP)
493 ptrace_cancel_bpt(current);
494
495 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
496 if (signr > 0) {
497 handle_signal(signr, &info, oldset, regs, syscall);
498 if (current->ptrace & PT_SINGLESTEP)
499 ptrace_set_bpt(current);
500 return 1;
501 }
502
503 /*
504 * No signal to deliver to the process - restart the syscall.
505 */
506 if (syscall) {
507 if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
508 u32 *usp;
509
510 regs->ARM_sp -= 12;
511 usp = (u32 *)regs->ARM_sp;
512
513 put_user(regs->ARM_pc, &usp[0]);
514 /* swi __NR_restart_syscall */
515 put_user(0xef000000 | __NR_restart_syscall, &usp[1]);
516 /* ldr pc, [sp], #12 */
517// FIXME!!! is #12 correct there?
518 put_user(0xe49df00c, &usp[2]);
519
520 regs->ARM_pc = regs->ARM_sp + 4;
521 }
522 if (regs->ARM_r0 == -ERESTARTNOHAND ||
523 regs->ARM_r0 == -ERESTARTSYS ||
524 regs->ARM_r0 == -ERESTARTNOINTR) {
525 restart_syscall(regs);
526 }
527 }
528 if (current->ptrace & PT_SINGLESTEP)
529 ptrace_set_bpt(current);
530 return 0;
531}
532
533asmlinkage void
534do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
535{
536 if (thread_flags & _TIF_SIGPENDING)
537 do_signal(&current->blocked, regs, syscall);
538}
diff --git a/arch/arm26/kernel/sys_arm.c b/arch/arm26/kernel/sys_arm.c
deleted file mode 100644
index dc05aba58baf..000000000000
--- a/arch/arm26/kernel/sys_arm.c
+++ /dev/null
@@ -1,323 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/sys_arm.c
3 *
4 * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
5 * Copyright (C) 1995, 1996 Russell King.
6 * Copyright (C) 2003 Ian Molton.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This file contains various random system calls that
13 * have a non-standard calling sequence on the Linux/arm
14 * platform.
15 */
16#include <linux/module.h>
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/sem.h>
22#include <linux/msg.h>
23#include <linux/shm.h>
24#include <linux/stat.h>
25#include <linux/syscalls.h>
26#include <linux/mman.h>
27#include <linux/fs.h>
28#include <linux/file.h>
29#include <linux/utsname.h>
30
31#include <asm/uaccess.h>
32#include <asm/ipc.h>
33
34extern unsigned long do_mremap(unsigned long addr, unsigned long old_len,
35 unsigned long new_len, unsigned long flags,
36 unsigned long new_addr);
37
38/*
39 * sys_pipe() is the normal C calling standard for creating
40 * a pipe. It's not the way unix traditionally does this, though.
41 */
42asmlinkage int sys_pipe(unsigned long * fildes)
43{
44 int fd[2];
45 int error;
46
47 error = do_pipe(fd);
48 if (!error) {
49 if (copy_to_user(fildes, fd, 2*sizeof(int)))
50 error = -EFAULT;
51 }
52 return error;
53}
54
55/* common code for old and new mmaps */
56inline long do_mmap2(
57 unsigned long addr, unsigned long len,
58 unsigned long prot, unsigned long flags,
59 unsigned long fd, unsigned long pgoff)
60{
61 int error = -EINVAL;
62 struct file * file = NULL;
63
64 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
65
66 /*
67 * If we are doing a fixed mapping, and address < FIRST_USER_ADDRESS,
68 * then deny it.
69 */
70 if (flags & MAP_FIXED && addr < FIRST_USER_ADDRESS)
71 goto out;
72
73 error = -EBADF;
74 if (!(flags & MAP_ANONYMOUS)) {
75 file = fget(fd);
76 if (!file)
77 goto out;
78 }
79
80 down_write(&current->mm->mmap_sem);
81 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
82 up_write(&current->mm->mmap_sem);
83
84 if (file)
85 fput(file);
86out:
87 return error;
88}
89
90struct mmap_arg_struct {
91 unsigned long addr;
92 unsigned long len;
93 unsigned long prot;
94 unsigned long flags;
95 unsigned long fd;
96 unsigned long offset;
97};
98
99asmlinkage int old_mmap(struct mmap_arg_struct *arg)
100{
101 int error = -EFAULT;
102 struct mmap_arg_struct a;
103
104 if (copy_from_user(&a, arg, sizeof(a)))
105 goto out;
106
107 error = -EINVAL;
108 if (a.offset & ~PAGE_MASK)
109 goto out;
110
111 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
112out:
113 return error;
114}
115
116asmlinkage unsigned long
117sys_arm_mremap(unsigned long addr, unsigned long old_len,
118 unsigned long new_len, unsigned long flags,
119 unsigned long new_addr)
120{
121 unsigned long ret = -EINVAL;
122
123 /*
124 * If we are doing a fixed mapping, and address < FIRST_USER_ADDRESS,
125 * then deny it.
126 */
127 if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS)
128 goto out;
129
130 down_write(&current->mm->mmap_sem);
131 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
132 up_write(&current->mm->mmap_sem);
133
134out:
135 return ret;
136}
137
138/*
139 * Perform the select(nd, in, out, ex, tv) and mmap() system
140 * calls.
141 */
142
143struct sel_arg_struct {
144 unsigned long n;
145 fd_set *inp, *outp, *exp;
146 struct timeval *tvp;
147};
148
149asmlinkage int old_select(struct sel_arg_struct *arg)
150{
151 struct sel_arg_struct a;
152
153 if (copy_from_user(&a, arg, sizeof(a)))
154 return -EFAULT;
155 /* sys_select() does the appropriate kernel locking */
156 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
157}
158
159/*
160 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
161 *
162 * This is really horribly ugly.
163 */
164asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
165{
166 int version, ret;
167
168 version = call >> 16; /* hack for backward compatibility */
169 call &= 0xffff;
170
171 switch (call) {
172 case SEMOP:
173 return sys_semop (first, (struct sembuf *)ptr, second);
174 case SEMGET:
175 return sys_semget (first, second, third);
176 case SEMCTL: {
177 union semun fourth;
178 if (!ptr)
179 return -EINVAL;
180 if (get_user(fourth.__pad, (void **) ptr))
181 return -EFAULT;
182 return sys_semctl (first, second, third, fourth);
183 }
184
185 case MSGSND:
186 return sys_msgsnd (first, (struct msgbuf *) ptr,
187 second, third);
188 case MSGRCV:
189 switch (version) {
190 case 0: {
191 struct ipc_kludge tmp;
192 if (!ptr)
193 return -EINVAL;
194 if (copy_from_user(&tmp,(struct ipc_kludge *) ptr,
195 sizeof (tmp)))
196 return -EFAULT;
197 return sys_msgrcv (first, tmp.msgp, second,
198 tmp.msgtyp, third);
199 }
200 default:
201 return sys_msgrcv (first,
202 (struct msgbuf *) ptr,
203 second, fifth, third);
204 }
205 case MSGGET:
206 return sys_msgget ((key_t) first, second);
207 case MSGCTL:
208 return sys_msgctl (first, second, (struct msqid_ds *) ptr);
209
210 case SHMAT:
211 switch (version) {
212 default: {
213 ulong raddr;
214 ret = do_shmat (first, (char *) ptr, second, &raddr);
215 if (ret)
216 return ret;
217 return put_user (raddr, (ulong *) third);
218 }
219 case 1: /* iBCS2 emulator entry point */
220 if (!segment_eq(get_fs(), get_ds()))
221 return -EINVAL;
222 return do_shmat (first, (char *) ptr,
223 second, (ulong *) third);
224 }
225 case SHMDT:
226 return sys_shmdt ((char *)ptr);
227 case SHMGET:
228 return sys_shmget (first, second, third);
229 case SHMCTL:
230 return sys_shmctl (first, second,
231 (struct shmid_ds *) ptr);
232 default:
233 return -EINVAL;
234 }
235}
236
237/* Fork a new task - this creates a new program thread.
238 * This is called indirectly via a small wrapper
239 */
240asmlinkage int sys_fork(struct pt_regs *regs)
241{
242 return do_fork(SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
243}
244
245/* Clone a task - this clones the calling program thread.
246 * This is called indirectly via a small wrapper
247 */
248asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, struct pt_regs *regs)
249{
250 /*
251 * We don't support SETTID / CLEARTID (FIXME!!! (nicked from arm32))
252 */
253 if (clone_flags & (CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID))
254 return -EINVAL;
255
256 if (!newsp)
257 newsp = regs->ARM_sp;
258
259 return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
260}
261
262asmlinkage int sys_vfork(struct pt_regs *regs)
263{
264 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
265}
266
267/* sys_execve() executes a new program.
268 * This is called indirectly via a small wrapper
269 */
270asmlinkage int sys_execve(char *filenamei, char **argv, char **envp, struct pt_regs *regs)
271{
272 int error;
273 char * filename;
274
275 filename = getname(filenamei);
276 error = PTR_ERR(filename);
277 if (IS_ERR(filename))
278 goto out;
279 error = do_execve(filename, argv, envp, regs);
280 putname(filename);
281out:
282 return error;
283}
284
285/* FIXME - see if this is correct for arm26 */
286int kernel_execve(const char *filename, char *const argv[], char *const envp[])
287{
288 struct pt_regs regs;
289 int ret;
290 memset(&regs, 0, sizeof(struct pt_regs));
291 ret = do_execve((char *)filename, (char __user * __user *)argv, (char __user * __user *)envp, &regs);
292 if (ret < 0)
293 goto out;
294
295 /*
296 * Save argc to the register structure for userspace.
297 */
298 regs.ARM_r0 = ret;
299
300 /*
301 * We were successful. We won't be returning to our caller, but
302 * instead to user space by manipulating the kernel stack.
303 */
304 asm( "add r0, %0, %1\n\t"
305 "mov r1, %2\n\t"
306 "mov r2, %3\n\t"
307 "bl memmove\n\t" /* copy regs to top of stack */
308 "mov r8, #0\n\t" /* not a syscall */
309 "mov r9, %0\n\t" /* thread structure */
310 "mov sp, r0\n\t" /* reposition stack pointer */
311 "b ret_to_user"
312 :
313 : "r" (current_thread_info()),
314 "Ir" (THREAD_SIZE - 8 - sizeof(regs)),
315 "r" (&regs),
316 "Ir" (sizeof(regs))
317 : "r0", "r1", "r2", "r3", "ip", "memory");
318
319 out:
320 return ret;
321}
322
323EXPORT_SYMBOL(kernel_execve);
diff --git a/arch/arm26/kernel/time.c b/arch/arm26/kernel/time.c
deleted file mode 100644
index 0f1d57fbd3d7..000000000000
--- a/arch/arm26/kernel/time.c
+++ /dev/null
@@ -1,210 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/time.c
3 *
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 * Modifications for ARM (C) 1994-2001 Russell King
6 * Mods for ARM26 (C) 2003 Ian Molton
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This file contains the ARM-specific time handling details:
13 * reading the RTC at bootup, etc...
14 *
15 * 1994-07-02 Alan Modra
16 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
17 * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
18 * "A Kernel Model for Precision Timekeeping" by Dave Mills
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/interrupt.h>
24#include <linux/time.h>
25#include <linux/init.h>
26#include <linux/smp.h>
27#include <linux/timex.h>
28#include <linux/errno.h>
29#include <linux/profile.h>
30
31#include <asm/hardware.h>
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <asm/ioc.h>
35
36/* this needs a better home */
37DEFINE_SPINLOCK(rtc_lock);
38
39/* change this if you have some constant time drift */
40#define USECS_PER_JIFFY (1000000/HZ)
41
42static int dummy_set_rtc(void)
43{
44 return 0;
45}
46
47/*
48 * hook for setting the RTC's idea of the current time.
49 */
50int (*set_rtc)(void) = dummy_set_rtc;
51
52/*
53 * Get time offset based on IOCs timer.
54 * FIXME - if this is called with interrutps off, why the shennanigans
55 * below ?
56 */
57static unsigned long gettimeoffset(void)
58{
59 unsigned int count1, count2, status;
60 long offset;
61
62 ioc_writeb (0, IOC_T0LATCH);
63 barrier ();
64 count1 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8);
65 barrier ();
66 status = ioc_readb(IOC_IRQREQA);
67 barrier ();
68 ioc_writeb (0, IOC_T0LATCH);
69 barrier ();
70 count2 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8);
71
72 offset = count2;
73 if (count2 < count1) {
74 /*
75 * We have not had an interrupt between reading count1
76 * and count2.
77 */
78 if (status & (1 << 5))
79 offset -= LATCH;
80 } else if (count2 > count1) {
81 /*
82 * We have just had another interrupt between reading
83 * count1 and count2.
84 */
85 offset -= LATCH;
86 }
87
88 offset = (LATCH - offset) * (tick_nsec / 1000);
89 return (offset + LATCH/2) / LATCH;
90}
91
92static unsigned long next_rtc_update;
93
94/*
95 * If we have an externally synchronized linux clock, then update
96 * CMOS clock accordingly every ~11 minutes. set_rtc() has to be
97 * called as close as possible to 500 ms before the new second
98 * starts.
99 */
100static inline void do_set_rtc(void)
101{
102 if (!ntp_synced() || set_rtc == NULL)
103 return;
104
105//FIXME - timespec.tv_sec is a time_t not unsigned long
106 if (next_rtc_update &&
107 time_before((unsigned long)xtime.tv_sec, next_rtc_update))
108 return;
109
110 if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) &&
111 xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1))
112 return;
113
114 if (set_rtc())
115 /*
116 * rtc update failed. Try again in 60s
117 */
118 next_rtc_update = xtime.tv_sec + 60;
119 else
120 next_rtc_update = xtime.tv_sec + 660;
121}
122
123#define do_leds()
124
125void do_gettimeofday(struct timeval *tv)
126{
127 unsigned long flags;
128 unsigned long seq;
129 unsigned long usec, sec;
130
131 do {
132 seq = read_seqbegin_irqsave(&xtime_lock, flags);
133 usec = gettimeoffset();
134 sec = xtime.tv_sec;
135 usec += xtime.tv_nsec / 1000;
136 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
137
138 /* usec may have gone up a lot: be safe */
139 while (usec >= 1000000) {
140 usec -= 1000000;
141 sec++;
142 }
143
144 tv->tv_sec = sec;
145 tv->tv_usec = usec;
146}
147
148EXPORT_SYMBOL(do_gettimeofday);
149
150int do_settimeofday(struct timespec *tv)
151{
152 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
153 return -EINVAL;
154
155 write_seqlock_irq(&xtime_lock);
156 /*
157 * This is revolting. We need to set "xtime" correctly. However, the
158 * value in this location is the value at the most recent update of
159 * wall time. Discover what correction gettimeofday() would have
160 * done, and then undo it!
161 */
162 tv->tv_nsec -= 1000 * gettimeoffset();
163
164 while (tv->tv_nsec < 0) {
165 tv->tv_nsec += NSEC_PER_SEC;
166 tv->tv_sec--;
167 }
168
169 xtime.tv_sec = tv->tv_sec;
170 xtime.tv_nsec = tv->tv_nsec;
171 ntp_clear();
172 write_sequnlock_irq(&xtime_lock);
173 clock_was_set();
174 return 0;
175}
176
177EXPORT_SYMBOL(do_settimeofday);
178
179static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
180{
181 do_timer(1);
182#ifndef CONFIG_SMP
183 update_process_times(user_mode(regs));
184#endif
185 do_set_rtc(); //FIME - EVERY timer IRQ?
186 profile_tick(CPU_PROFILING, regs);
187 return IRQ_HANDLED; //FIXME - is this right?
188}
189
190static struct irqaction timer_irq = {
191 .name = "timer",
192 .flags = IRQF_DISABLED,
193 .handler = timer_interrupt,
194};
195
196extern void ioctime_init(void);
197
198/*
199 * Set up timer interrupt.
200 */
201void __init time_init(void)
202{
203 ioc_writeb(LATCH & 255, IOC_T0LTCHL);
204 ioc_writeb(LATCH >> 8, IOC_T0LTCHH);
205 ioc_writeb(0, IOC_T0GO);
206
207
208 setup_irq(IRQ_TIMER, &timer_irq);
209}
210
diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c
deleted file mode 100644
index 2911e2eae80e..000000000000
--- a/arch/arm26/kernel/traps.c
+++ /dev/null
@@ -1,548 +0,0 @@
1/*
2 * linux/arch/arm26/kernel/traps.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
6 * Copyright (C) 2003 Ian Molton (ARM26)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * 'traps.c' handles hardware exceptions after we have saved some state in
13 * 'linux/arch/arm26/lib/traps.S'. Mostly a debugging aid, but will probably
14 * kill the offending process.
15 */
16
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/signal.h>
21#include <linux/sched.h>
22#include <linux/mm.h>
23#include <linux/spinlock.h>
24#include <linux/personality.h>
25#include <linux/ptrace.h>
26#include <linux/elf.h>
27#include <linux/interrupt.h>
28#include <linux/init.h>
29
30#include <asm/atomic.h>
31#include <asm/io.h>
32#include <asm/pgtable.h>
33#include <asm/system.h>
34#include <asm/uaccess.h>
35#include <asm/unistd.h>
36#include <linux/mutex.h>
37
38#include "ptrace.h"
39
40extern void c_backtrace (unsigned long fp, int pmode);
41extern void show_pte(struct mm_struct *mm, unsigned long addr);
42
43const char *processor_modes[] = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" };
44
45static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" "*bad reason*"};
46
47/*
48 * Stack pointers should always be within the kernels view of
49 * physical memory. If it is not there, then we can't dump
50 * out any information relating to the stack.
51 */
52static int verify_stack(unsigned long sp)
53{
54 if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0))
55 return -EFAULT;
56
57 return 0;
58}
59
60/*
61 * Dump out the contents of some memory nicely...
62 */
63static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
64{
65 unsigned long p = bottom & ~31;
66 mm_segment_t fs;
67 int i;
68
69 /*
70 * We need to switch to kernel mode so that we can use __get_user
71 * to safely read from kernel space. Note that we now dump the
72 * code first, just in case the backtrace kills us.
73 */
74 fs = get_fs();
75 set_fs(KERNEL_DS);
76
77 printk("%s", str);
78 printk("(0x%08lx to 0x%08lx)\n", bottom, top);
79
80 for (p = bottom & ~31; p < top;) {
81 printk("%04lx: ", p & 0xffff);
82
83 for (i = 0; i < 8; i++, p += 4) {
84 unsigned int val;
85
86 if (p < bottom || p >= top)
87 printk(" ");
88 else {
89 __get_user(val, (unsigned long *)p);
90 printk("%08x ", val);
91 }
92 }
93 printk ("\n");
94 }
95
96 set_fs(fs);
97}
98
99static void dump_instr(struct pt_regs *regs)
100{
101 unsigned long addr = instruction_pointer(regs);
102 const int width = 8;
103 mm_segment_t fs;
104 int i;
105
106 /*
107 * We need to switch to kernel mode so that we can use __get_user
108 * to safely read from kernel space. Note that we now dump the
109 * code first, just in case the backtrace kills us.
110 */
111 fs = get_fs();
112 set_fs(KERNEL_DS);
113
114 printk("Code: ");
115 for (i = -4; i < 1; i++) {
116 unsigned int val, bad;
117
118 bad = __get_user(val, &((u32 *)addr)[i]);
119
120 if (!bad)
121 printk(i == 0 ? "(%0*x) " : "%0*x ", width, val);
122 else {
123 printk("bad PC value.");
124 break;
125 }
126 }
127 printk("\n");
128
129 set_fs(fs);
130}
131
132/*static*/ void __dump_stack(struct task_struct *tsk, unsigned long sp)
133{
134 dump_mem("Stack: ", sp, 8192+(unsigned long)task_stack_page(tsk));
135}
136
137void dump_stack(void)
138{
139#ifdef CONFIG_DEBUG_ERRORS
140 __backtrace();
141#endif
142}
143
144EXPORT_SYMBOL(dump_stack);
145
146//FIXME - was a static fn
147void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
148{
149 unsigned int fp;
150 int ok = 1;
151
152 printk("Backtrace: ");
153 fp = regs->ARM_fp;
154 if (!fp) {
155 printk("no frame pointer");
156 ok = 0;
157 } else if (verify_stack(fp)) {
158 printk("invalid frame pointer 0x%08x", fp);
159 ok = 0;
160 } else if (fp < (unsigned long)end_of_stack(tsk))
161 printk("frame pointer underflow");
162 printk("\n");
163
164 if (ok)
165 c_backtrace(fp, processor_mode(regs));
166}
167
168/* FIXME - this is probably wrong.. */
169void show_stack(struct task_struct *task, unsigned long *sp) {
170 dump_mem("Stack: ", (unsigned long)sp, 8192+(unsigned long)task_stack_page(task));
171}
172
173DEFINE_SPINLOCK(die_lock);
174
175/*
176 * This function is protected against re-entrancy.
177 */
178NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
179{
180 struct task_struct *tsk = current;
181
182 console_verbose();
183 spin_lock_irq(&die_lock);
184
185 printk("Internal error: %s: %x\n", str, err);
186 printk("CPU: %d\n", smp_processor_id());
187 show_regs(regs);
188 add_taint(TAINT_DIE);
189 printk("Process %s (pid: %d, stack limit = 0x%p)\n",
190 current->comm, current->pid, end_of_stack(tsk));
191
192 if (!user_mode(regs) || in_interrupt()) {
193 __dump_stack(tsk, (unsigned long)(regs + 1));
194 dump_backtrace(regs, tsk);
195 dump_instr(regs);
196 }
197while(1);
198 spin_unlock_irq(&die_lock);
199 do_exit(SIGSEGV);
200}
201
202void die_if_kernel(const char *str, struct pt_regs *regs, int err)
203{
204 if (user_mode(regs))
205 return;
206
207 die(str, regs, err);
208}
209
210static DEFINE_MUTEX(undef_mutex);
211static int (*undef_hook)(struct pt_regs *);
212
213int request_undef_hook(int (*fn)(struct pt_regs *))
214{
215 int ret = -EBUSY;
216
217 mutex_lock(&undef_mutex);
218 if (undef_hook == NULL) {
219 undef_hook = fn;
220 ret = 0;
221 }
222 mutex_unlock(&undef_mutex);
223
224 return ret;
225}
226
227int release_undef_hook(int (*fn)(struct pt_regs *))
228{
229 int ret = -EINVAL;
230
231 mutex_lock(&undef_mutex);
232 if (undef_hook == fn) {
233 undef_hook = NULL;
234 ret = 0;
235 }
236 mutex_unlock(&undef_mutex);
237
238 return ret;
239}
240
241static int undefined_extension(struct pt_regs *regs, unsigned int op)
242{
243 switch (op) {
244 case 1: /* 0xde01 / 0x?7f001f0 */
245 ptrace_break(current, regs);
246 return 0;
247 }
248 return 1;
249}
250
251asmlinkage void do_undefinstr(struct pt_regs *regs)
252{
253 siginfo_t info;
254 void *pc;
255
256 regs->ARM_pc -= 4;
257
258 pc = (unsigned long *)instruction_pointer(regs); /* strip PSR */
259
260 if (user_mode(regs)) {
261 u32 instr;
262
263 get_user(instr, (u32 *)pc);
264
265 if ((instr & 0x0fff00ff) == 0x07f000f0 &&
266 undefined_extension(regs, (instr >> 8) & 255) == 0) {
267 regs->ARM_pc += 4;
268 return;
269 }
270 } else {
271 if (undef_hook && undef_hook(regs) == 0) {
272 regs->ARM_pc += 4;
273 return;
274 }
275 }
276
277#ifdef CONFIG_DEBUG_USER
278 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
279 current->comm, current->pid, pc);
280 dump_instr(regs);
281#endif
282
283 current->thread.error_code = 0;
284 current->thread.trap_no = 6;
285
286 info.si_signo = SIGILL;
287 info.si_errno = 0;
288 info.si_code = ILL_ILLOPC;
289 info.si_addr = pc;
290
291 force_sig_info(SIGILL, &info, current);
292
293 die_if_kernel("Oops - undefined instruction", regs, 0);
294}
295
296asmlinkage void do_excpt(unsigned long address, struct pt_regs *regs, int mode)
297{
298 siginfo_t info;
299
300#ifdef CONFIG_DEBUG_USER
301 printk(KERN_INFO "%s (%d): address exception: pc=%08lx\n",
302 current->comm, current->pid, instruction_pointer(regs));
303 dump_instr(regs);
304#endif
305
306 current->thread.error_code = 0;
307 current->thread.trap_no = 11;
308
309 info.si_signo = SIGBUS;
310 info.si_errno = 0;
311 info.si_code = BUS_ADRERR;
312 info.si_addr = (void *)address;
313
314 force_sig_info(SIGBUS, &info, current);
315
316 die_if_kernel("Oops - address exception", regs, mode);
317}
318
319asmlinkage void do_unexp_fiq (struct pt_regs *regs)
320{
321#ifndef CONFIG_IGNORE_FIQ
322 printk("Hmm. Unexpected FIQ received, but trying to continue\n");
323 printk("You may have a hardware problem...\n");
324#endif
325}
326
327/*
328 * bad_mode handles the impossible case in the vectors. If you see one of
329 * these, then it's extremely serious, and could mean you have buggy hardware.
330 * It never returns, and never tries to sync. We hope that we can at least
331 * dump out some state information...
332 */
333asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
334{
335 unsigned int vectors = vectors_base();
336
337 console_verbose();
338
339 printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n",
340 handler[reason<5?reason:4], processor_modes[proc_mode]);
341
342 /*
343 * Dump out the vectors and stub routines. Maybe a better solution
344 * would be to dump them out only if we detect that they are corrupted.
345 */
346 dump_mem(KERN_CRIT "Vectors: ", vectors, vectors + 0x40);
347 dump_mem(KERN_CRIT "Stubs: ", vectors + 0x200, vectors + 0x4b8);
348
349 die("Oops", regs, 0);
350 local_irq_disable();
351 panic("bad mode");
352}
353
354static int bad_syscall(int n, struct pt_regs *regs)
355{
356 struct thread_info *thread = current_thread_info();
357 siginfo_t info;
358
359 if (current->personality != PER_LINUX && thread->exec_domain->handler) {
360 thread->exec_domain->handler(n, regs);
361 return regs->ARM_r0;
362 }
363
364#ifdef CONFIG_DEBUG_USER
365 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
366 current->pid, current->comm, n);
367 dump_instr(regs);
368#endif
369
370 info.si_signo = SIGILL;
371 info.si_errno = 0;
372 info.si_code = ILL_ILLTRP;
373 info.si_addr = (void *)instruction_pointer(regs) - 4;
374
375 force_sig_info(SIGILL, &info, current);
376 die_if_kernel("Oops", regs, n);
377 return regs->ARM_r0;
378}
379
380static inline void
381do_cache_op(unsigned long start, unsigned long end, int flags)
382{
383 struct vm_area_struct *vma;
384
385 if (end < start)
386 return;
387
388 vma = find_vma(current->active_mm, start);
389 if (vma && vma->vm_start < end) {
390 if (start < vma->vm_start)
391 start = vma->vm_start;
392 if (end > vma->vm_end)
393 end = vma->vm_end;
394 }
395}
396
397/*
398 * Handle all unrecognised system calls.
399 * 0x9f0000 - 0x9fffff are some more esoteric system calls
400 */
401#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
402asmlinkage int arm_syscall(int no, struct pt_regs *regs)
403{
404 siginfo_t info;
405
406 if ((no >> 16) != 0x9f)
407 return bad_syscall(no, regs);
408
409 switch (no & 0xffff) {
410 case 0: /* branch through 0 */
411 info.si_signo = SIGSEGV;
412 info.si_errno = 0;
413 info.si_code = SEGV_MAPERR;
414 info.si_addr = NULL;
415
416 force_sig_info(SIGSEGV, &info, current);
417
418 die_if_kernel("branch through zero", regs, 0);
419 return 0;
420
421 case NR(breakpoint): /* SWI BREAK_POINT */
422 ptrace_break(current, regs);
423 return regs->ARM_r0;
424
425 case NR(cacheflush):
426 return 0;
427
428 case NR(usr26):
429 break;
430
431 default:
432 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
433 if not implemented, rather than raising SIGILL. This
434 way the calling program can gracefully determine whether
435 a feature is supported. */
436 if (no <= 0x7ff)
437 return -ENOSYS;
438 break;
439 }
440#ifdef CONFIG_DEBUG_USER
441 /*
442 * experience shows that these seem to indicate that
443 * something catastrophic has happened
444 */
445 printk("[%d] %s: arm syscall %d\n", current->pid, current->comm, no);
446 dump_instr(regs);
447 if (user_mode(regs)) {
448 show_regs(regs);
449 c_backtrace(regs->ARM_fp, processor_mode(regs));
450 }
451#endif
452 info.si_signo = SIGILL;
453 info.si_errno = 0;
454 info.si_code = ILL_ILLTRP;
455 info.si_addr = (void *)instruction_pointer(regs) - 4;
456
457 force_sig_info(SIGILL, &info, current);
458 die_if_kernel("Oops", regs, no);
459 return 0;
460}
461
462void __bad_xchg(volatile void *ptr, int size)
463{
464 printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
465 __builtin_return_address(0), ptr, size);
466 BUG();
467}
468
469/*
470 * A data abort trap was taken, but we did not handle the instruction.
471 * Try to abort the user program, or panic if it was the kernel.
472 */
473asmlinkage void
474baddataabort(int code, unsigned long instr, struct pt_regs *regs)
475{
476 unsigned long addr = instruction_pointer(regs);
477 siginfo_t info;
478
479#ifdef CONFIG_DEBUG_USER
480 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
481 current->pid, current->comm, code, instr);
482 dump_instr(regs);
483 show_pte(current->mm, addr);
484#endif
485
486 info.si_signo = SIGILL;
487 info.si_errno = 0;
488 info.si_code = ILL_ILLOPC;
489 info.si_addr = (void *)addr;
490
491 force_sig_info(SIGILL, &info, current);
492 die_if_kernel("unknown data abort code", regs, instr);
493}
494
495volatile void __bug(const char *file, int line, void *data)
496{
497 printk(KERN_CRIT"kernel BUG at %s:%d!", file, line);
498 if (data)
499 printk(KERN_CRIT" - extra data = %p", data);
500 printk("\n");
501 *(int *)0 = 0;
502}
503
504void __readwrite_bug(const char *fn)
505{
506 printk("%s called, but not implemented", fn);
507 BUG();
508}
509
510void __pte_error(const char *file, int line, unsigned long val)
511{
512 printk("%s:%d: bad pte %08lx.\n", file, line, val);
513}
514
515void __pmd_error(const char *file, int line, unsigned long val)
516{
517 printk("%s:%d: bad pmd %08lx.\n", file, line, val);
518}
519
520void __pgd_error(const char *file, int line, unsigned long val)
521{
522 printk("%s:%d: bad pgd %08lx.\n", file, line, val);
523}
524
525asmlinkage void __div0(void)
526{
527 printk("Division by zero in kernel.\n");
528 dump_stack();
529}
530
531void abort(void)
532{
533 BUG();
534
535 /* if that doesn't kill us, halt */
536 panic("Oops failed to kill thread");
537}
538
539void __init trap_init(void)
540{
541 extern void __trap_init(unsigned long);
542 unsigned long base = vectors_base();
543
544 __trap_init(base);
545 if (base != 0)
546 printk(KERN_DEBUG "Relocating machine vectors to 0x%08lx\n",
547 base);
548}
diff --git a/arch/arm26/kernel/vmlinux-arm26-xip.lds.in b/arch/arm26/kernel/vmlinux-arm26-xip.lds.in
deleted file mode 100644
index 4ec715c25dea..000000000000
--- a/arch/arm26/kernel/vmlinux-arm26-xip.lds.in
+++ /dev/null
@@ -1,136 +0,0 @@
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 * borrowed from Russels ARM port by Ian Molton
5 */
6
7#include <asm-generic/vmlinux.lds.h>
8
9OUTPUT_ARCH(arm)
10ENTRY(stext)
11jiffies = jiffies_64;
12SECTIONS
13{
14 . = TEXTADDR;
15 .init : { /* Init code and data */
16 _stext = .;
17 __init_begin = .;
18 _sinittext = .;
19 *(.init.text)
20 _einittext = .;
21 __proc_info_begin = .;
22 *(.proc.info)
23 __proc_info_end = .;
24 __arch_info_begin = .;
25 *(.arch.info)
26 __arch_info_end = .;
27 __tagtable_begin = .;
28 *(.taglist)
29 __tagtable_end = .;
30 . = ALIGN(16);
31 __setup_start = .;
32 *(.init.setup)
33 __setup_end = .;
34 __early_begin = .;
35 *(__early_param)
36 __early_end = .;
37 __initcall_start = .;
38 *(.initcall1.init)
39 *(.initcall2.init)
40 *(.initcall3.init)
41 *(.initcall4.init)
42 *(.initcall5.init)
43 *(.initcall6.init)
44 *(.initcall7.init)
45 __initcall_end = .;
46 __con_initcall_start = .;
47 *(.con_initcall.init)
48 __con_initcall_end = .;
49#ifdef CONFIG_BLK_DEV_INITRD
50 . = ALIGN(32);
51 __initramfs_start = .;
52 usr/built-in.o(.init.ramfs)
53 __initramfs_end = .;
54#endif
55 . = ALIGN(32768);
56 __init_end = .;
57 }
58
59 /DISCARD/ : { /* Exit code and data */
60 *(.exit.text)
61 *(.exit.data)
62 *(.exitcall.exit)
63 }
64
65 .text : { /* Real text segment */
66 _text = .; /* Text and read-only data */
67 TEXT_TEXT
68 SCHED_TEXT
69 LOCK_TEXT /* FIXME - borrowed from arm32 - check*/
70 *(.fixup)
71 *(.gnu.warning)
72 *(.rodata)
73 *(.rodata.*)
74 *(.glue_7)
75 *(.glue_7t)
76 *(.got) /* Global offset table */
77
78 _etext = .; /* End of text section */
79 }
80
81 . = ALIGN(16);
82 __ex_table : { /* Exception table */
83 __start___ex_table = .;
84 *(__ex_table)
85 __stop___ex_table = .;
86 }
87
88 RODATA
89
90 _endtext = .;
91
92 . = DATAADDR;
93
94 _sdata = .;
95
96 .data : {
97 . = ALIGN(8192);
98 /*
99 * first, the init thread union, aligned
100 * to an 8192 byte boundary. (see arm26/kernel/init_task.c)
101 * FIXME - sould this be 32K aligned on arm26?
102 */
103 *(.init.task)
104
105 /*
106 * The cacheline aligned data
107 */
108 . = ALIGN(32);
109 *(.data.cacheline_aligned)
110
111 /*
112 * and the usual data section
113 */
114 DATA_DATA
115 CONSTRUCTORS
116
117 *(.init.data)
118
119 _edata = .;
120 }
121
122 .bss : {
123 __bss_start = .; /* BSS */
124 *(.bss)
125 *(COMMON)
126 _end = . ;
127 }
128 /* Stabs debugging sections. */
129 .stab 0 : { *(.stab) }
130 .stabstr 0 : { *(.stabstr) }
131 .stab.excl 0 : { *(.stab.excl) }
132 .stab.exclstr 0 : { *(.stab.exclstr) }
133 .stab.index 0 : { *(.stab.index) }
134 .stab.indexstr 0 : { *(.stab.indexstr) }
135 .comment 0 : { *(.comment) }
136}
diff --git a/arch/arm26/kernel/vmlinux-arm26.lds.in b/arch/arm26/kernel/vmlinux-arm26.lds.in
deleted file mode 100644
index 6c44f6a17bf7..000000000000
--- a/arch/arm26/kernel/vmlinux-arm26.lds.in
+++ /dev/null
@@ -1,129 +0,0 @@
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 * borrowed from Russels ARM port by Ian Molton and subsequently modified.
5 */
6
7#include <asm-generic/vmlinux.lds.h>
8
9OUTPUT_ARCH(arm)
10ENTRY(stext)
11jiffies = jiffies_64;
12SECTIONS
13{
14 . = TEXTADDR;
15 .init : { /* Init code and data */
16 _stext = .;
17 __init_begin = .;
18 _sinittext = .;
19 *(.init.text)
20 _einittext = .;
21 __proc_info_begin = .;
22 *(.proc.info)
23 __proc_info_end = .;
24 __arch_info_begin = .;
25 *(.arch.info)
26 __arch_info_end = .;
27 __tagtable_begin = .;
28 *(.taglist)
29 __tagtable_end = .;
30 *(.init.data)
31 . = ALIGN(16);
32 __setup_start = .;
33 *(.init.setup)
34 __setup_end = .;
35 __early_begin = .;
36 *(__early_param)
37 __early_end = .;
38 __initcall_start = .;
39 *(.initcall1.init)
40 *(.initcall2.init)
41 *(.initcall3.init)
42 *(.initcall4.init)
43 *(.initcall5.init)
44 *(.initcall6.init)
45 *(.initcall7.init)
46 __initcall_end = .;
47 __con_initcall_start = .;
48 *(.con_initcall.init)
49 __con_initcall_end = .;
50#ifdef CONFIG_BLK_DEV_INITRD
51 . = ALIGN(32);
52 __initramfs_start = .;
53 usr/built-in.o(.init.ramfs)
54 __initramfs_end = .;
55#endif
56 . = ALIGN(32768);
57 __init_end = .;
58 }
59
60 /DISCARD/ : { /* Exit code and data */
61 *(.exit.text)
62 *(.exit.data)
63 *(.exitcall.exit)
64 }
65
66 .text : { /* Real text segment */
67 _text = .; /* Text and read-only data */
68 TEXT_TEXT
69 SCHED_TEXT
70 LOCK_TEXT
71 *(.fixup)
72 *(.gnu.warning)
73 *(.rodata)
74 *(.rodata.*)
75 *(.glue_7)
76 *(.glue_7t)
77 *(.got) /* Global offset table */
78
79 _etext = .; /* End of text section */
80 }
81
82 . = ALIGN(16);
83 __ex_table : { /* Exception table */
84 __start___ex_table = .;
85 *(__ex_table)
86 __stop___ex_table = .;
87 }
88
89 RODATA
90
91 . = ALIGN(8192);
92
93 .data : {
94 /*
95 * first, the init task union, aligned
96 * to an 8192 byte boundary. (see arm26/kernel/init_task.c)
97 */
98 *(.init.task)
99
100 /*
101 * The cacheline aligned data
102 */
103 . = ALIGN(32);
104 *(.data.cacheline_aligned)
105
106 /*
107 * and the usual data section
108 */
109 DATA_DATA
110 CONSTRUCTORS
111
112 _edata = .;
113 }
114
115 .bss : {
116 __bss_start = .; /* BSS */
117 *(.bss)
118 *(COMMON)
119 _end = . ;
120 }
121 /* Stabs debugging sections. */
122 .stab 0 : { *(.stab) }
123 .stabstr 0 : { *(.stabstr) }
124 .stab.excl 0 : { *(.stab.excl) }
125 .stab.exclstr 0 : { *(.stab.exclstr) }
126 .stab.index 0 : { *(.stab.index) }
127 .stab.indexstr 0 : { *(.stab.indexstr) }
128 .comment 0 : { *(.comment) }
129}
diff --git a/arch/arm26/kernel/vmlinux.lds.S b/arch/arm26/kernel/vmlinux.lds.S
deleted file mode 100644
index 1fa39f02e07c..000000000000
--- a/arch/arm26/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,11 +0,0 @@
1
2#ifdef CONFIG_XIP_KERNEL
3
4#include "vmlinux-arm26-xip.lds.in"
5
6#else
7
8#include "vmlinux-arm26.lds.in"
9
10#endif
11