aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm26/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm26/kernel')
-rw-r--r--arch/arm26/kernel/Makefile17
-rw-r--r--arch/arm26/kernel/armksyms.c220
-rw-r--r--arch/arm26/kernel/asm-offsets.c63
-rw-r--r--arch/arm26/kernel/calls.S265
-rw-r--r--arch/arm26/kernel/compat.c174
-rw-r--r--arch/arm26/kernel/dma.c273
-rw-r--r--arch/arm26/kernel/ecard.c850
-rw-r--r--arch/arm26/kernel/entry.S961
-rw-r--r--arch/arm26/kernel/fiq.c202
-rw-r--r--arch/arm26/kernel/head.S113
-rw-r--r--arch/arm26/kernel/init_task.c49
-rw-r--r--arch/arm26/kernel/irq.c716
-rw-r--r--arch/arm26/kernel/process.c401
-rw-r--r--arch/arm26/kernel/ptrace.c744
-rw-r--r--arch/arm26/kernel/ptrace.h13
-rw-r--r--arch/arm26/kernel/semaphore.c223
-rw-r--r--arch/arm26/kernel/setup.c573
-rw-r--r--arch/arm26/kernel/signal.c540
-rw-r--r--arch/arm26/kernel/sys_arm.c324
-rw-r--r--arch/arm26/kernel/time.c234
-rw-r--r--arch/arm26/kernel/traps.c548
-rw-r--r--arch/arm26/kernel/vmlinux-arm26-xip.lds.in134
-rw-r--r--arch/arm26/kernel/vmlinux-arm26.lds.in127
-rw-r--r--arch/arm26/kernel/vmlinux.lds.S12
24 files changed, 7776 insertions, 0 deletions
diff --git a/arch/arm26/kernel/Makefile b/arch/arm26/kernel/Makefile
new file mode 100644
index 000000000000..ee9fb49fdb78
--- /dev/null
+++ b/arch/arm26/kernel/Makefile
@@ -0,0 +1,17 @@
1#
2# Makefile for the linux kernel.
3#
4
5# Object file lists.
6
7AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR)
8
9obj-y := compat.o dma.o entry.o irq.o process.o ptrace.o \
10 semaphore.o setup.o signal.o sys_arm.o time.o traps.o \
11 ecard.o dma.o ecard.o fiq.o time.o
12
13extra-y := head.o init_task.o vmlinux.lds
14
15obj-$(CONFIG_FIQ) += fiq.o
16obj-$(CONFIG_MODULES) += armksyms.o
17
diff --git a/arch/arm26/kernel/armksyms.c b/arch/arm26/kernel/armksyms.c
new file mode 100644
index 000000000000..35514b398e2e
--- /dev/null
+++ b/arch/arm26/kernel/armksyms.c
@@ -0,0 +1,220 @@
1/*
2 * linux/arch/arm26/kernel/armksyms.c
3 *
4 * Copyright (C) 2003 Ian Molton
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/user.h>
14#include <linux/string.h>
15#include <linux/fs.h>
16#include <linux/mm.h>
17#include <linux/mman.h>
18#include <linux/delay.h>
19#include <linux/in6.h>
20#include <linux/interrupt.h>
21#include <linux/pm.h>
22#include <linux/tty.h>
23#include <linux/vt_kern.h>
24#include <linux/smp_lock.h>
25#include <linux/syscalls.h>
26
27#include <asm/byteorder.h>
28#include <asm/elf.h>
29#include <asm/io.h>
30#include <asm/irq.h>
31#include <asm/processor.h>
32#include <asm/semaphore.h>
33#include <asm/system.h>
34#include <asm/uaccess.h>
35#include <asm/checksum.h>
36#include <asm/mach-types.h>
37
38extern void dump_thread(struct pt_regs *, struct user *);
39extern int dump_fpu(struct pt_regs *, struct user_fp_struct *);
40extern void inswb(unsigned int port, void *to, int len);
41extern void outswb(unsigned int port, const void *to, int len);
42
43extern void __bad_xchg(volatile void *ptr, int size);
44
45/*
46 * libgcc functions - functions that are used internally by the
47 * compiler... (prototypes are not correct though, but that
48 * doesn't really matter since they're not versioned).
49 */
50extern void __ashldi3(void);
51extern void __ashrdi3(void);
52extern void __divsi3(void);
53extern void __lshrdi3(void);
54extern void __modsi3(void);
55extern void __muldi3(void);
56extern void __ucmpdi2(void);
57extern void __udivdi3(void);
58extern void __umoddi3(void);
59extern void __udivmoddi4(void);
60extern void __udivsi3(void);
61extern void __umodsi3(void);
62extern void abort(void);
63
64extern void ret_from_exception(void);
65extern void fpundefinstr(void);
66extern void fp_enter(void);
67
68/*
69 * This has a special calling convention; it doesn't
70 * modify any of the usual registers, except for LR.
71 * FIXME - we used to use our own local version - looks to be in kernel/softirq now
72 */
73//extern void __do_softirq(void);
74
75#define EXPORT_SYMBOL_ALIAS(sym,orig) \
76 const char __kstrtab_##sym[] \
77 __attribute__((section(".kstrtab"))) = \
78 __MODULE_STRING(sym); \
79 const struct module_symbol __ksymtab_##sym \
80 __attribute__((section("__ksymtab"))) = \
81 { (unsigned long)&orig, __kstrtab_##sym };
82
83/*
84 * floating point math emulator support.
85 * These symbols will never change their calling convention...
86 */
87EXPORT_SYMBOL_ALIAS(kern_fp_enter,fp_enter);
88EXPORT_SYMBOL_ALIAS(fp_printk,printk);
89EXPORT_SYMBOL_ALIAS(fp_send_sig,send_sig);
90
91EXPORT_SYMBOL(fpundefinstr);
92EXPORT_SYMBOL(ret_from_exception);
93
94#ifdef CONFIG_VT
95EXPORT_SYMBOL(kd_mksound);
96#endif
97
98//EXPORT_SYMBOL(__do_softirq);
99
100 /* platform dependent support */
101EXPORT_SYMBOL(dump_thread);
102EXPORT_SYMBOL(dump_fpu);
103EXPORT_SYMBOL(udelay);
104EXPORT_SYMBOL(kernel_thread);
105EXPORT_SYMBOL(system_rev);
106EXPORT_SYMBOL(system_serial_low);
107EXPORT_SYMBOL(system_serial_high);
108#ifdef CONFIG_DEBUG_BUGVERBOSE
109EXPORT_SYMBOL(__bug);
110#endif
111EXPORT_SYMBOL(__bad_xchg);
112EXPORT_SYMBOL(__readwrite_bug);
113EXPORT_SYMBOL(enable_irq);
114EXPORT_SYMBOL(disable_irq);
115EXPORT_SYMBOL(set_irq_type);
116EXPORT_SYMBOL(pm_idle);
117EXPORT_SYMBOL(pm_power_off);
118
119 /* processor dependencies */
120EXPORT_SYMBOL(__machine_arch_type);
121
122 /* networking */
123EXPORT_SYMBOL(csum_partial_copy_nocheck);
124EXPORT_SYMBOL(__csum_ipv6_magic);
125
126 /* io */
127#ifndef __raw_readsb
128EXPORT_SYMBOL(__raw_readsb);
129#endif
130#ifndef __raw_readsw
131EXPORT_SYMBOL(__raw_readsw);
132#endif
133#ifndef __raw_readsl
134EXPORT_SYMBOL(__raw_readsl);
135#endif
136#ifndef __raw_writesb
137EXPORT_SYMBOL(__raw_writesb);
138#endif
139#ifndef __raw_writesw
140EXPORT_SYMBOL(__raw_writesw);
141#endif
142#ifndef __raw_writesl
143EXPORT_SYMBOL(__raw_writesl);
144#endif
145
146 /* string / mem functions */
147EXPORT_SYMBOL(strcpy);
148EXPORT_SYMBOL(strncpy);
149EXPORT_SYMBOL(strcat);
150EXPORT_SYMBOL(strncat);
151EXPORT_SYMBOL(strcmp);
152EXPORT_SYMBOL(strncmp);
153EXPORT_SYMBOL(strchr);
154EXPORT_SYMBOL(strlen);
155EXPORT_SYMBOL(strnlen);
156EXPORT_SYMBOL(strpbrk);
157EXPORT_SYMBOL(strrchr);
158EXPORT_SYMBOL(strstr);
159EXPORT_SYMBOL(memset);
160EXPORT_SYMBOL(memcpy);
161EXPORT_SYMBOL(memmove);
162EXPORT_SYMBOL(memcmp);
163EXPORT_SYMBOL(memscan);
164EXPORT_SYMBOL(__memzero);
165
166 /* user mem (segment) */
167EXPORT_SYMBOL(uaccess_kernel);
168EXPORT_SYMBOL(uaccess_user);
169
170EXPORT_SYMBOL(__get_user_1);
171EXPORT_SYMBOL(__get_user_2);
172EXPORT_SYMBOL(__get_user_4);
173EXPORT_SYMBOL(__get_user_8);
174
175EXPORT_SYMBOL(__put_user_1);
176EXPORT_SYMBOL(__put_user_2);
177EXPORT_SYMBOL(__put_user_4);
178EXPORT_SYMBOL(__put_user_8);
179
180 /* gcc lib functions */
181EXPORT_SYMBOL(__ashldi3);
182EXPORT_SYMBOL(__ashrdi3);
183EXPORT_SYMBOL(__divsi3);
184EXPORT_SYMBOL(__lshrdi3);
185EXPORT_SYMBOL(__modsi3);
186EXPORT_SYMBOL(__muldi3);
187EXPORT_SYMBOL(__ucmpdi2);
188EXPORT_SYMBOL(__udivdi3);
189EXPORT_SYMBOL(__umoddi3);
190EXPORT_SYMBOL(__udivmoddi4);
191EXPORT_SYMBOL(__udivsi3);
192EXPORT_SYMBOL(__umodsi3);
193
194 /* bitops */
195EXPORT_SYMBOL(_set_bit_le);
196EXPORT_SYMBOL(_test_and_set_bit_le);
197EXPORT_SYMBOL(_clear_bit_le);
198EXPORT_SYMBOL(_test_and_clear_bit_le);
199EXPORT_SYMBOL(_change_bit_le);
200EXPORT_SYMBOL(_test_and_change_bit_le);
201EXPORT_SYMBOL(_find_first_zero_bit_le);
202EXPORT_SYMBOL(_find_next_zero_bit_le);
203
204 /* elf */
205EXPORT_SYMBOL(elf_platform);
206EXPORT_SYMBOL(elf_hwcap);
207
208 /* syscalls */
209EXPORT_SYMBOL(sys_write);
210EXPORT_SYMBOL(sys_read);
211EXPORT_SYMBOL(sys_lseek);
212EXPORT_SYMBOL(sys_open);
213EXPORT_SYMBOL(sys_exit);
214EXPORT_SYMBOL(sys_wait4);
215
216EXPORT_SYMBOL(get_wchan);
217
218#ifdef CONFIG_PREEMPT
219EXPORT_SYMBOL(kernel_flag);
220#endif
diff --git a/arch/arm26/kernel/asm-offsets.c b/arch/arm26/kernel/asm-offsets.c
new file mode 100644
index 000000000000..4ccacaef94df
--- /dev/null
+++ b/arch/arm26/kernel/asm-offsets.c
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 1995-2001 Russell King
3 * 2001-2002 Keith Owens
4 * 2003 Ian Molton
5 *
6 * Generate definitions needed by assembly language modules.
7 * This code generates raw asm output which is post-processed to extract
8 * and format the required data.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/config.h>
16#include <linux/sched.h>
17#include <linux/mm.h>
18
19#include <asm/pgtable.h>
20#include <asm/uaccess.h>
21
22/*
23 * Make sure that the compiler and target are compatible.
24 */
25#if defined(__APCS_32__) && defined(CONFIG_CPU_26)
26#error Sorry, your compiler targets APCS-32 but this kernel requires APCS-26
27#endif
28#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 95)
29#error Sorry, your compiler is known to miscompile kernels. Only use gcc 2.95.3 and later.
30#endif
31#if __GNUC__ == 2 && __GNUC_MINOR__ == 95
32/* shame we can't detect the .1 or .2 releases */
33#warning GCC 2.95.2 and earlier miscompiles kernels.
34#endif
35
36/* Use marker if you need to separate the values later */
37
38#define DEFINE(sym, val) \
39 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
40
41#define BLANK() asm volatile("\n->" : : )
42
43int main(void)
44{
45 DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
46 BLANK();
47 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
48 DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
49 BLANK();
50 DEFINE(VM_EXEC, VM_EXEC);
51 BLANK();
52 BLANK();
53 DEFINE(PAGE_PRESENT, _PAGE_PRESENT);
54 DEFINE(PAGE_READONLY, _PAGE_READONLY);
55 DEFINE(PAGE_NOT_USER, _PAGE_NOT_USER);
56 DEFINE(PAGE_OLD, _PAGE_OLD);
57 DEFINE(PAGE_CLEAN, _PAGE_CLEAN);
58 BLANK();
59 DEFINE(PAGE_SZ, PAGE_SIZE);
60 BLANK();
61 DEFINE(SYS_ERROR0, 0x9f0000);
62 return 0;
63}
diff --git a/arch/arm26/kernel/calls.S b/arch/arm26/kernel/calls.S
new file mode 100644
index 000000000000..e3d276827c84
--- /dev/null
+++ b/arch/arm26/kernel/calls.S
@@ -0,0 +1,265 @@
1/*
2 * linux/arch/arm26/kernel/calls.S
3 *
4 * Copyright (C) 2003 Ian Molton
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * FIXME
11 * This file is included twice in entry.S which may not be necessary
12 */
13
14//FIXME - clearly NR_syscalls is never defined here
15
16#ifndef NR_syscalls
17#define NR_syscalls 256
18#else
19
20__syscall_start:
21/* 0 */ .long sys_ni_syscall
22 .long sys_exit
23 .long sys_fork_wrapper
24 .long sys_read
25 .long sys_write
26/* 5 */ .long sys_open
27 .long sys_close
28 .long sys_ni_syscall /* was sys_waitpid */
29 .long sys_creat
30 .long sys_link
31/* 10 */ .long sys_unlink
32 .long sys_execve_wrapper
33 .long sys_chdir
34 .long sys_time /* used by libc4 */
35 .long sys_mknod
36/* 15 */ .long sys_chmod
37 .long sys_lchown16
38 .long sys_ni_syscall /* was sys_break */
39 .long sys_ni_syscall /* was sys_stat */
40 .long sys_lseek
41/* 20 */ .long sys_getpid
42 .long sys_mount
43 .long sys_oldumount /* used by libc4 */
44 .long sys_setuid16
45 .long sys_getuid16
46/* 25 */ .long sys_stime
47 .long sys_ptrace
48 .long sys_alarm /* used by libc4 */
49 .long sys_ni_syscall /* was sys_fstat */
50 .long sys_pause
51/* 30 */ .long sys_utime /* used by libc4 */
52 .long sys_ni_syscall /* was sys_stty */
53 .long sys_ni_syscall /* was sys_getty */
54 .long sys_access
55 .long sys_nice
56/* 35 */ .long sys_ni_syscall /* was sys_ftime */
57 .long sys_sync
58 .long sys_kill
59 .long sys_rename
60 .long sys_mkdir
61/* 40 */ .long sys_rmdir
62 .long sys_dup
63 .long sys_pipe
64 .long sys_times
65 .long sys_ni_syscall /* was sys_prof */
66/* 45 */ .long sys_brk
67 .long sys_setgid16
68 .long sys_getgid16
69 .long sys_ni_syscall /* was sys_signal */
70 .long sys_geteuid16
71/* 50 */ .long sys_getegid16
72 .long sys_acct
73 .long sys_umount
74 .long sys_ni_syscall /* was sys_lock */
75 .long sys_ioctl
76/* 55 */ .long sys_fcntl
77 .long sys_ni_syscall /* was sys_mpx */
78 .long sys_setpgid
79 .long sys_ni_syscall /* was sys_ulimit */
80 .long sys_ni_syscall /* was sys_olduname */
81/* 60 */ .long sys_umask
82 .long sys_chroot
83 .long sys_ustat
84 .long sys_dup2
85 .long sys_getppid
86/* 65 */ .long sys_getpgrp
87 .long sys_setsid
88 .long sys_sigaction
89 .long sys_ni_syscall /* was sys_sgetmask */
90 .long sys_ni_syscall /* was sys_ssetmask */
91/* 70 */ .long sys_setreuid16
92 .long sys_setregid16
93 .long sys_sigsuspend_wrapper
94 .long sys_sigpending
95 .long sys_sethostname
96/* 75 */ .long sys_setrlimit
97 .long sys_old_getrlimit /* used by libc4 */
98 .long sys_getrusage
99 .long sys_gettimeofday
100 .long sys_settimeofday
101/* 80 */ .long sys_getgroups16
102 .long sys_setgroups16
103 .long old_select /* used by libc4 */
104 .long sys_symlink
105 .long sys_ni_syscall /* was sys_lstat */
106/* 85 */ .long sys_readlink
107 .long sys_uselib
108 .long sys_swapon
109 .long sys_reboot
110 .long old_readdir /* used by libc4 */
111/* 90 */ .long old_mmap /* used by libc4 */
112 .long sys_munmap
113 .long sys_truncate
114 .long sys_ftruncate
115 .long sys_fchmod
116/* 95 */ .long sys_fchown16
117 .long sys_getpriority
118 .long sys_setpriority
119 .long sys_ni_syscall /* was sys_profil */
120 .long sys_statfs
121/* 100 */ .long sys_fstatfs
122 .long sys_ni_syscall
123 .long sys_socketcall
124 .long sys_syslog
125 .long sys_setitimer
126/* 105 */ .long sys_getitimer
127 .long sys_newstat
128 .long sys_newlstat
129 .long sys_newfstat
130 .long sys_ni_syscall /* was sys_uname */
131/* 110 */ .long sys_ni_syscall /* was sys_iopl */
132 .long sys_vhangup
133 .long sys_ni_syscall
134 .long sys_syscall /* call a syscall */
135 .long sys_wait4
136/* 115 */ .long sys_swapoff
137 .long sys_sysinfo
138 .long sys_ipc
139 .long sys_fsync
140 .long sys_sigreturn_wrapper
141/* 120 */ .long sys_clone_wapper
142 .long sys_setdomainname
143 .long sys_newuname
144 .long sys_ni_syscall
145 .long sys_adjtimex
146/* 125 */ .long sys_mprotect
147 .long sys_sigprocmask
148 .long sys_ni_syscall /* WAS: sys_create_module */
149 .long sys_init_module
150 .long sys_delete_module
151/* 130 */ .long sys_ni_syscall /* WAS: sys_get_kernel_syms */
152 .long sys_quotactl
153 .long sys_getpgid
154 .long sys_fchdir
155 .long sys_bdflush
156/* 135 */ .long sys_sysfs
157 .long sys_personality
158 .long sys_ni_syscall /* .long _sys_afs_syscall */
159 .long sys_setfsuid16
160 .long sys_setfsgid16
161/* 140 */ .long sys_llseek
162 .long sys_getdents
163 .long sys_select
164 .long sys_flock
165 .long sys_msync
166/* 145 */ .long sys_readv
167 .long sys_writev
168 .long sys_getsid
169 .long sys_fdatasync
170 .long sys_sysctl
171/* 150 */ .long sys_mlock
172 .long sys_munlock
173 .long sys_mlockall
174 .long sys_munlockall
175 .long sys_sched_setparam
176/* 155 */ .long sys_sched_getparam
177 .long sys_sched_setscheduler
178 .long sys_sched_getscheduler
179 .long sys_sched_yield
180 .long sys_sched_get_priority_max
181/* 160 */ .long sys_sched_get_priority_min
182 .long sys_sched_rr_get_interval
183 .long sys_nanosleep
184 .long sys_arm_mremap
185 .long sys_setresuid16
186/* 165 */ .long sys_getresuid16
187 .long sys_ni_syscall
188 .long sys_ni_syscall /* WAS: sys_query_module */
189 .long sys_poll
190 .long sys_nfsservctl
191/* 170 */ .long sys_setresgid16
192 .long sys_getresgid16
193 .long sys_prctl
194 .long sys_rt_sigreturn_wrapper
195 .long sys_rt_sigaction
196/* 175 */ .long sys_rt_sigprocmask
197 .long sys_rt_sigpending
198 .long sys_rt_sigtimedwait
199 .long sys_rt_sigqueueinfo
200 .long sys_rt_sigsuspend_wrapper
201/* 180 */ .long sys_pread64
202 .long sys_pwrite64
203 .long sys_chown16
204 .long sys_getcwd
205 .long sys_capget
206/* 185 */ .long sys_capset
207 .long sys_sigaltstack_wrapper
208 .long sys_sendfile
209 .long sys_ni_syscall
210 .long sys_ni_syscall
211/* 190 */ .long sys_vfork_wrapper
212 .long sys_getrlimit
213 .long sys_mmap2
214 .long sys_truncate64
215 .long sys_ftruncate64
216/* 195 */ .long sys_stat64
217 .long sys_lstat64
218 .long sys_fstat64
219 .long sys_lchown
220 .long sys_getuid
221/* 200 */ .long sys_getgid
222 .long sys_geteuid
223 .long sys_getegid
224 .long sys_setreuid
225 .long sys_setregid
226/* 205 */ .long sys_getgroups
227 .long sys_setgroups
228 .long sys_fchown
229 .long sys_setresuid
230 .long sys_getresuid
231/* 210 */ .long sys_setresgid
232 .long sys_getresgid
233 .long sys_chown
234 .long sys_setuid
235 .long sys_setgid
236/* 215 */ .long sys_setfsuid
237 .long sys_setfsgid
238 .long sys_getdents64
239 .long sys_pivot_root
240 .long sys_mincore
241/* 220 */ .long sys_madvise
242 .long sys_fcntl64
243 .long sys_ni_syscall /* TUX */
244 .long sys_ni_syscall /* WAS: sys_security */
245 .long sys_gettid
246/* 225 */ .long sys_readahead
247 .long sys_setxattr
248 .long sys_lsetxattr
249 .long sys_fsetxattr
250 .long sys_getxattr
251/* 230 */ .long sys_lgetxattr
252 .long sys_fgetxattr
253 .long sys_listxattr
254 .long sys_llistxattr
255 .long sys_flistxattr
256/* 235 */ .long sys_removexattr
257 .long sys_lremovexattr
258 .long sys_fremovexattr
259 .long sys_tkill
260__syscall_end:
261
262 .rept NR_syscalls - (__syscall_end - __syscall_start) / 4
263 .long sys_ni_syscall
264 .endr
265#endif
diff --git a/arch/arm26/kernel/compat.c b/arch/arm26/kernel/compat.c
new file mode 100644
index 000000000000..db0310db8998
--- /dev/null
+++ b/arch/arm26/kernel/compat.c
@@ -0,0 +1,174 @@
1/*
2 * linux/arch/arm26/kernel/compat.c
3 *
4 * Copyright (C) 2001 Russell King
5 * 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * We keep the old params compatibility cruft in one place (here)
12 * so we don't end up with lots of mess around other places.
13 *
14 * NOTE:
15 * The old struct param_struct is deprecated, but it will be kept in
16 * the kernel for 5 years from now (2001). This will allow boot loaders
17 * to convert to the new struct tag way.
18 */
19#include <linux/config.h>
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/init.h>
24
25#include <asm/setup.h>
26#include <asm/mach-types.h>
27#include <asm/page.h>
28
29//#include <asm/arch.h>
30//#include <asm/mach/irq.h>
31
32/*
33 * Usage:
34 * - do not go blindly adding fields, add them at the end
35 * - when adding fields, don't rely on the address until
36 * a patch from me has been released
37 * - unused fields should be zero (for future expansion)
38 * - this structure is relatively short-lived - only
39 * guaranteed to contain useful data in setup_arch()
40 *
41 * This is the old deprecated way to pass parameters to the kernel
42 */
43struct param_struct {
44 union {
45 struct {
46 unsigned long page_size; /* 0 */
47 unsigned long nr_pages; /* 4 */
48 unsigned long ramdisk_size; /* 8 */
49 unsigned long flags; /* 12 */
50#define FLAG_READONLY 1
51#define FLAG_RDLOAD 4
52#define FLAG_RDPROMPT 8
53 unsigned long rootdev; /* 16 */
54 unsigned long video_num_cols; /* 20 */
55 unsigned long video_num_rows; /* 24 */
56 unsigned long video_x; /* 28 */
57 unsigned long video_y; /* 32 */
58 unsigned long memc_control_reg; /* 36 */
59 unsigned char sounddefault; /* 40 */
60 unsigned char adfsdrives; /* 41 */
61 unsigned char bytes_per_char_h; /* 42 */
62 unsigned char bytes_per_char_v; /* 43 */
63 unsigned long pages_in_bank[4]; /* 44 */
64 unsigned long pages_in_vram; /* 60 */
65 unsigned long initrd_start; /* 64 */
66 unsigned long initrd_size; /* 68 */
67 unsigned long rd_start; /* 72 */
68 unsigned long system_rev; /* 76 */
69 unsigned long system_serial_low; /* 80 */
70 unsigned long system_serial_high; /* 84 */
71 unsigned long mem_fclk_21285; /* 88 */
72 } s;
73 char unused[256];
74 } u1;
75 union {
76 char paths[8][128];
77 struct {
78 unsigned long magic;
79 char n[1024 - sizeof(unsigned long)];
80 } s;
81 } u2;
82 char commandline[COMMAND_LINE_SIZE];
83};
84
85static struct tag * __init memtag(struct tag *tag, unsigned long start, unsigned long size)
86{
87 tag = tag_next(tag);
88 tag->hdr.tag = ATAG_MEM;
89 tag->hdr.size = tag_size(tag_mem32);
90 tag->u.mem.size = size;
91 tag->u.mem.start = start;
92
93 return tag;
94}
95
96static void __init build_tag_list(struct param_struct *params, void *taglist)
97{
98 struct tag *tag = taglist;
99
100 if (params->u1.s.page_size != PAGE_SIZE) {
101 printk(KERN_WARNING "Warning: bad configuration page, "
102 "trying to continue\n");
103 return;
104 }
105
106 printk(KERN_DEBUG "Converting old-style param struct to taglist\n");
107
108 tag->hdr.tag = ATAG_CORE;
109 tag->hdr.size = tag_size(tag_core);
110 tag->u.core.flags = params->u1.s.flags & FLAG_READONLY;
111 tag->u.core.pagesize = params->u1.s.page_size;
112 tag->u.core.rootdev = params->u1.s.rootdev;
113
114 tag = tag_next(tag);
115 tag->hdr.tag = ATAG_RAMDISK;
116 tag->hdr.size = tag_size(tag_ramdisk);
117 tag->u.ramdisk.flags = (params->u1.s.flags & FLAG_RDLOAD ? 1 : 0) |
118 (params->u1.s.flags & FLAG_RDPROMPT ? 2 : 0);
119 tag->u.ramdisk.size = params->u1.s.ramdisk_size;
120 tag->u.ramdisk.start = params->u1.s.rd_start;
121
122 tag = tag_next(tag);
123 tag->hdr.tag = ATAG_INITRD;
124 tag->hdr.size = tag_size(tag_initrd);
125 tag->u.initrd.start = params->u1.s.initrd_start;
126 tag->u.initrd.size = params->u1.s.initrd_size;
127
128 tag = tag_next(tag);
129 tag->hdr.tag = ATAG_SERIAL;
130 tag->hdr.size = tag_size(tag_serialnr);
131 tag->u.serialnr.low = params->u1.s.system_serial_low;
132 tag->u.serialnr.high = params->u1.s.system_serial_high;
133
134 tag = tag_next(tag);
135 tag->hdr.tag = ATAG_REVISION;
136 tag->hdr.size = tag_size(tag_revision);
137 tag->u.revision.rev = params->u1.s.system_rev;
138
139 tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE);
140
141 tag = tag_next(tag);
142 tag->hdr.tag = ATAG_ACORN;
143 tag->hdr.size = tag_size(tag_acorn);
144 tag->u.acorn.memc_control_reg = params->u1.s.memc_control_reg;
145 tag->u.acorn.vram_pages = params->u1.s.pages_in_vram;
146 tag->u.acorn.sounddefault = params->u1.s.sounddefault;
147 tag->u.acorn.adfsdrives = params->u1.s.adfsdrives;
148
149 tag = tag_next(tag);
150 tag->hdr.tag = ATAG_CMDLINE;
151 tag->hdr.size = (strlen(params->commandline) + 3 +
152 sizeof(struct tag_header)) >> 2;
153 strcpy(tag->u.cmdline.cmdline, params->commandline);
154
155 tag = tag_next(tag);
156 tag->hdr.tag = ATAG_NONE;
157 tag->hdr.size = 0;
158
159 memmove(params, taglist, ((int)tag) - ((int)taglist) +
160 sizeof(struct tag_header));
161}
162
163void __init convert_to_tag_list(struct tag *tags)
164{
165 struct param_struct *params = (struct param_struct *)tags;
166 build_tag_list(params, &params->u2);
167}
168
169void __init squash_mem_tags(struct tag *tag)
170{
171 for (; tag->hdr.size; tag = tag_next(tag))
172 if (tag->hdr.tag == ATAG_MEM)
173 tag->hdr.tag = ATAG_NONE;
174}
diff --git a/arch/arm26/kernel/dma.c b/arch/arm26/kernel/dma.c
new file mode 100644
index 000000000000..80b5a774d905
--- /dev/null
+++ b/arch/arm26/kernel/dma.c
@@ -0,0 +1,273 @@
1/*
2 * linux/arch/arm26/kernel/dma.c
3 *
4 * Copyright (C) 1995-2000 Russell King
5 * 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Front-end to the DMA handling. This handles the allocation/freeing
12 * of DMA channels, and provides a unified interface to the machines
13 * DMA facilities.
14 */
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/mman.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/errno.h>
22
23#include <asm/dma.h>
24
25DEFINE_SPINLOCK(dma_spin_lock);
26
27static dma_t dma_chan[MAX_DMA_CHANNELS];
28
29/*
30 * Get dma list for /proc/dma
31 */
32int get_dma_list(char *buf)
33{
34 dma_t *dma;
35 char *p = buf;
36 int i;
37
38 for (i = 0, dma = dma_chan; i < MAX_DMA_CHANNELS; i++, dma++)
39 if (dma->lock)
40 p += sprintf(p, "%2d: %14s %s\n", i,
41 dma->d_ops->type, dma->device_id);
42
43 return p - buf;
44}
45
46/*
47 * Request DMA channel
48 *
49 * On certain platforms, we have to allocate an interrupt as well...
50 */
51int request_dma(dmach_t channel, const char *device_id)
52{
53 dma_t *dma = dma_chan + channel;
54 int ret;
55
56 if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
57 goto bad_dma;
58
59 if (xchg(&dma->lock, 1) != 0)
60 goto busy;
61
62 dma->device_id = device_id;
63 dma->active = 0;
64 dma->invalid = 1;
65
66 ret = 0;
67 if (dma->d_ops->request)
68 ret = dma->d_ops->request(channel, dma);
69
70 if (ret)
71 xchg(&dma->lock, 0);
72
73 return ret;
74
75bad_dma:
76 printk(KERN_ERR "dma: trying to allocate DMA%d\n", channel);
77 return -EINVAL;
78
79busy:
80 return -EBUSY;
81}
82
83/*
84 * Free DMA channel
85 *
86 * On certain platforms, we have to free interrupt as well...
87 */
88void free_dma(dmach_t channel)
89{
90 dma_t *dma = dma_chan + channel;
91
92 if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
93 goto bad_dma;
94
95 if (dma->active) {
96 printk(KERN_ERR "dma%d: freeing active DMA\n", channel);
97 dma->d_ops->disable(channel, dma);
98 dma->active = 0;
99 }
100
101 if (xchg(&dma->lock, 0) != 0) {
102 if (dma->d_ops->free)
103 dma->d_ops->free(channel, dma);
104 return;
105 }
106
107 printk(KERN_ERR "dma%d: trying to free free DMA\n", channel);
108 return;
109
110bad_dma:
111 printk(KERN_ERR "dma: trying to free DMA%d\n", channel);
112}
113
114/* Set DMA Scatter-Gather list
115 */
116void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg)
117{
118 dma_t *dma = dma_chan + channel;
119
120 if (dma->active)
121 printk(KERN_ERR "dma%d: altering DMA SG while "
122 "DMA active\n", channel);
123
124 dma->sg = sg;
125 dma->sgcount = nr_sg;
126 dma->using_sg = 1;
127 dma->invalid = 1;
128}
129
130/* Set DMA address
131 *
132 * Copy address to the structure, and set the invalid bit
133 */
134void set_dma_addr (dmach_t channel, unsigned long physaddr)
135{
136 dma_t *dma = dma_chan + channel;
137
138 if (dma->active)
139 printk(KERN_ERR "dma%d: altering DMA address while "
140 "DMA active\n", channel);
141
142 dma->sg = &dma->buf;
143 dma->sgcount = 1;
144 dma->buf.__address = (char *)physaddr;//FIXME - not pretty
145 dma->using_sg = 0;
146 dma->invalid = 1;
147}
148
149/* Set DMA byte count
150 *
151 * Copy address to the structure, and set the invalid bit
152 */
153void set_dma_count (dmach_t channel, unsigned long count)
154{
155 dma_t *dma = dma_chan + channel;
156
157 if (dma->active)
158 printk(KERN_ERR "dma%d: altering DMA count while "
159 "DMA active\n", channel);
160
161 dma->sg = &dma->buf;
162 dma->sgcount = 1;
163 dma->buf.length = count;
164 dma->using_sg = 0;
165 dma->invalid = 1;
166}
167
168/* Set DMA direction mode
169 */
170void set_dma_mode (dmach_t channel, dmamode_t mode)
171{
172 dma_t *dma = dma_chan + channel;
173
174 if (dma->active)
175 printk(KERN_ERR "dma%d: altering DMA mode while "
176 "DMA active\n", channel);
177
178 dma->dma_mode = mode;
179 dma->invalid = 1;
180}
181
182/* Enable DMA channel
183 */
184void enable_dma (dmach_t channel)
185{
186 dma_t *dma = dma_chan + channel;
187
188 if (!dma->lock)
189 goto free_dma;
190
191 if (dma->active == 0) {
192 dma->active = 1;
193 dma->d_ops->enable(channel, dma);
194 }
195 return;
196
197free_dma:
198 printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel);
199 BUG();
200}
201
202/* Disable DMA channel
203 */
204void disable_dma (dmach_t channel)
205{
206 dma_t *dma = dma_chan + channel;
207
208 if (!dma->lock)
209 goto free_dma;
210
211 if (dma->active == 1) {
212 dma->active = 0;
213 dma->d_ops->disable(channel, dma);
214 }
215 return;
216
217free_dma:
218 printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel);
219 BUG();
220}
221
222/*
223 * Is the specified DMA channel active?
224 */
225int dma_channel_active(dmach_t channel)
226{
227 return dma_chan[channel].active;
228}
229
230void set_dma_page(dmach_t channel, char pagenr)
231{
232 printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel);
233}
234
235void set_dma_speed(dmach_t channel, int cycle_ns)
236{
237 dma_t *dma = dma_chan + channel;
238 int ret = 0;
239
240 if (dma->d_ops->setspeed)
241 ret = dma->d_ops->setspeed(channel, dma, cycle_ns);
242 dma->speed = ret;
243}
244
245int get_dma_residue(dmach_t channel)
246{
247 dma_t *dma = dma_chan + channel;
248 int ret = 0;
249
250 if (dma->d_ops->residue)
251 ret = dma->d_ops->residue(channel, dma);
252
253 return ret;
254}
255
256void __init init_dma(void)
257{
258 arch_dma_init(dma_chan);
259}
260
261EXPORT_SYMBOL(request_dma);
262EXPORT_SYMBOL(free_dma);
263EXPORT_SYMBOL(enable_dma);
264EXPORT_SYMBOL(disable_dma);
265EXPORT_SYMBOL(set_dma_addr);
266EXPORT_SYMBOL(set_dma_count);
267EXPORT_SYMBOL(set_dma_mode);
268EXPORT_SYMBOL(set_dma_page);
269EXPORT_SYMBOL(get_dma_residue);
270EXPORT_SYMBOL(set_dma_sg);
271EXPORT_SYMBOL(set_dma_speed);
272
273EXPORT_SYMBOL(dma_spin_lock);
diff --git a/arch/arm26/kernel/ecard.c b/arch/arm26/kernel/ecard.c
new file mode 100644
index 000000000000..824c6b571ad9
--- /dev/null
+++ b/arch/arm26/kernel/ecard.c
@@ -0,0 +1,850 @@
1/*
2 * linux/arch/arm26/kernel/ecard.c
3 *
4 * Copyright 1995-2001 Russell King
5 * Copyright 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Find all installed expansion cards, and handle interrupts from them.
12 *
13 * Created from information from Acorns RiscOS3 PRMs
14 * 15-Jun-2003 IM Modified from ARM32 (RiscPC capable) version
15 * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment.
16 * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work.
17 * 12-Sep-1997 RMK Created new handling of interrupt enables/disables
18 * - cards can now register their own routine to control
19 * interrupts (recommended).
20 * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled
21 * on reset from Linux. (Caused cards not to respond
22 * under RiscOS without hard reset).
23 *
24 */
25#define ECARD_C
26
27#include <linux/config.h>
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/types.h>
31#include <linux/sched.h>
32#include <linux/interrupt.h>
33#include <linux/reboot.h>
34#include <linux/mm.h>
35#include <linux/slab.h>
36#include <linux/proc_fs.h>
37#include <linux/device.h>
38#include <linux/init.h>
39
40#include <asm/dma.h>
41#include <asm/ecard.h>
42#include <asm/hardware.h>
43#include <asm/io.h>
44#include <asm/irq.h>
45#include <asm/mmu_context.h>
46#include <asm/irqchip.h>
47#include <asm/tlbflush.h>
48
49enum req {
50 req_readbytes,
51 req_reset
52};
53
54struct ecard_request {
55 enum req req;
56 ecard_t *ec;
57 unsigned int address;
58 unsigned int length;
59 unsigned int use_loader;
60 void *buffer;
61};
62
63struct expcard_blacklist {
64 unsigned short manufacturer;
65 unsigned short product;
66 const char *type;
67};
68
69static ecard_t *cards;
70static ecard_t *slot_to_expcard[MAX_ECARDS];
71static unsigned int ectcr;
72
73/* List of descriptions of cards which don't have an extended
74 * identification, or chunk directories containing a description.
75 */
76static struct expcard_blacklist __initdata blacklist[] = {
77 { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" }
78};
79
80asmlinkage extern int
81ecard_loader_reset(volatile unsigned char *pa, loader_t loader);
82asmlinkage extern int
83ecard_loader_read(int off, volatile unsigned char *pa, loader_t loader);
84
85static const struct ecard_id *
86ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec);
87
88static inline unsigned short
89ecard_getu16(unsigned char *v)
90{
91 return v[0] | v[1] << 8;
92}
93
94static inline signed long
95ecard_gets24(unsigned char *v)
96{
97 return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0);
98}
99
100static inline ecard_t *
101slot_to_ecard(unsigned int slot)
102{
103 return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL;
104}
105
106/* ===================== Expansion card daemon ======================== */
107/*
108 * Since the loader programs on the expansion cards need to be run
109 * in a specific environment, create a separate task with this
110 * environment up, and pass requests to this task as and when we
111 * need to.
112 *
113 * This should allow 99% of loaders to be called from Linux.
114 *
115 * From a security standpoint, we trust the card vendors. This
116 * may be a misplaced trust.
117 */
118#define BUS_ADDR(x) ((((unsigned long)(x)) << 2) + IO_BASE)
119#define POD_INT_ADDR(x) ((volatile unsigned char *)\
120 ((BUS_ADDR((x)) - IO_BASE) + IO_START))
121
122static inline void ecard_task_reset(struct ecard_request *req)
123{
124 struct expansion_card *ec = req->ec;
125 if (ec->loader)
126 ecard_loader_reset(POD_INT_ADDR(ec->podaddr), ec->loader);
127}
128
129static void
130ecard_task_readbytes(struct ecard_request *req)
131{
132 unsigned char *buf = (unsigned char *)req->buffer;
133 volatile unsigned char *base_addr =
134 (volatile unsigned char *)POD_INT_ADDR(req->ec->podaddr);
135 unsigned int len = req->length;
136 unsigned int off = req->address;
137
138 if (!req->use_loader || !req->ec->loader) {
139 off *= 4;
140 while (len--) {
141 *buf++ = base_addr[off];
142 off += 4;
143 }
144 } else {
145 while(len--) {
146 /*
147 * The following is required by some
148 * expansion card loader programs.
149 */
150 *(unsigned long *)0x108 = 0;
151 *buf++ = ecard_loader_read(off++, base_addr,
152 req->ec->loader);
153 }
154 }
155}
156
157static void ecard_do_request(struct ecard_request *req)
158{
159 switch (req->req) {
160 case req_readbytes:
161 ecard_task_readbytes(req);
162 break;
163
164 case req_reset:
165 ecard_task_reset(req);
166 break;
167 }
168}
169
170/*
171 * On 26-bit processors, we don't need the kcardd thread to access the
172 * expansion card loaders. We do it directly.
173 */
174#define ecard_call(req) ecard_do_request(req)
175
176/* ======================= Mid-level card control ===================== */
177
178static void
179ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld)
180{
181 struct ecard_request req;
182
183 req.req = req_readbytes;
184 req.ec = ec;
185 req.address = off;
186 req.length = len;
187 req.use_loader = useld;
188 req.buffer = addr;
189
190 ecard_call(&req);
191}
192
193int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
194{
195 struct ex_chunk_dir excd;
196 int index = 16;
197 int useld = 0;
198
199 if (!ec->cid.cd)
200 return 0;
201
202 while(1) {
203 ecard_readbytes(&excd, ec, index, 8, useld);
204 index += 8;
205 if (c_id(&excd) == 0) {
206 if (!useld && ec->loader) {
207 useld = 1;
208 index = 0;
209 continue;
210 }
211 return 0;
212 }
213 if (c_id(&excd) == 0xf0) { /* link */
214 index = c_start(&excd);
215 continue;
216 }
217 if (c_id(&excd) == 0x80) { /* loader */
218 if (!ec->loader) {
219 ec->loader = (loader_t)kmalloc(c_len(&excd),
220 GFP_KERNEL);
221 if (ec->loader)
222 ecard_readbytes(ec->loader, ec,
223 (int)c_start(&excd),
224 c_len(&excd), useld);
225 else
226 return 0;
227 }
228 continue;
229 }
230 if (c_id(&excd) == id && num-- == 0)
231 break;
232 }
233
234 if (c_id(&excd) & 0x80) {
235 switch (c_id(&excd) & 0x70) {
236 case 0x70:
237 ecard_readbytes((unsigned char *)excd.d.string, ec,
238 (int)c_start(&excd), c_len(&excd),
239 useld);
240 break;
241 case 0x00:
242 break;
243 }
244 }
245 cd->start_offset = c_start(&excd);
246 memcpy(cd->d.string, excd.d.string, 256);
247 return 1;
248}
249
250/* ======================= Interrupt control ============================ */
251
252static void ecard_def_irq_enable(ecard_t *ec, int irqnr)
253{
254}
255
256static void ecard_def_irq_disable(ecard_t *ec, int irqnr)
257{
258}
259
260static int ecard_def_irq_pending(ecard_t *ec)
261{
262 return !ec->irqmask || ec->irqaddr[0] & ec->irqmask;
263}
264
265static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr)
266{
267 panic("ecard_def_fiq_enable called - impossible");
268}
269
270static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr)
271{
272 panic("ecard_def_fiq_disable called - impossible");
273}
274
275static int ecard_def_fiq_pending(ecard_t *ec)
276{
277 return !ec->fiqmask || ec->fiqaddr[0] & ec->fiqmask;
278}
279
280static expansioncard_ops_t ecard_default_ops = {
281 ecard_def_irq_enable,
282 ecard_def_irq_disable,
283 ecard_def_irq_pending,
284 ecard_def_fiq_enable,
285 ecard_def_fiq_disable,
286 ecard_def_fiq_pending
287};
288
289/*
290 * Enable and disable interrupts from expansion cards.
291 * (interrupts are disabled for these functions).
292 *
293 * They are not meant to be called directly, but via enable/disable_irq.
294 */
295static void ecard_irq_unmask(unsigned int irqnr)
296{
297 ecard_t *ec = slot_to_ecard(irqnr - 32);
298
299 if (ec) {
300 if (!ec->ops)
301 ec->ops = &ecard_default_ops;
302
303 if (ec->claimed && ec->ops->irqenable)
304 ec->ops->irqenable(ec, irqnr);
305 else
306 printk(KERN_ERR "ecard: rejecting request to "
307 "enable IRQs for %d\n", irqnr);
308 }
309}
310
311static void ecard_irq_mask(unsigned int irqnr)
312{
313 ecard_t *ec = slot_to_ecard(irqnr - 32);
314
315 if (ec) {
316 if (!ec->ops)
317 ec->ops = &ecard_default_ops;
318
319 if (ec->ops && ec->ops->irqdisable)
320 ec->ops->irqdisable(ec, irqnr);
321 }
322}
323
324static struct irqchip ecard_chip = {
325 .ack = ecard_irq_mask,
326 .mask = ecard_irq_mask,
327 .unmask = ecard_irq_unmask,
328};
329
330void ecard_enablefiq(unsigned int fiqnr)
331{
332 ecard_t *ec = slot_to_ecard(fiqnr);
333
334 if (ec) {
335 if (!ec->ops)
336 ec->ops = &ecard_default_ops;
337
338 if (ec->claimed && ec->ops->fiqenable)
339 ec->ops->fiqenable(ec, fiqnr);
340 else
341 printk(KERN_ERR "ecard: rejecting request to "
342 "enable FIQs for %d\n", fiqnr);
343 }
344}
345
346void ecard_disablefiq(unsigned int fiqnr)
347{
348 ecard_t *ec = slot_to_ecard(fiqnr);
349
350 if (ec) {
351 if (!ec->ops)
352 ec->ops = &ecard_default_ops;
353
354 if (ec->ops->fiqdisable)
355 ec->ops->fiqdisable(ec, fiqnr);
356 }
357}
358
359static void
360ecard_dump_irq_state(ecard_t *ec)
361{
362 printk(" %d: %sclaimed, ",
363 ec->slot_no,
364 ec->claimed ? "" : "not ");
365
366 if (ec->ops && ec->ops->irqpending &&
367 ec->ops != &ecard_default_ops)
368 printk("irq %spending\n",
369 ec->ops->irqpending(ec) ? "" : "not ");
370 else
371 printk("irqaddr %p, mask = %02X, status = %02X\n",
372 ec->irqaddr, ec->irqmask, *ec->irqaddr);
373}
374
375static void ecard_check_lockup(struct irqdesc *desc)
376{
377 static int last, lockup;
378 ecard_t *ec;
379
380 /*
381 * If the timer interrupt has not run since the last million
382 * unrecognised expansion card interrupts, then there is
383 * something seriously wrong. Disable the expansion card
384 * interrupts so at least we can continue.
385 *
386 * Maybe we ought to start a timer to re-enable them some time
387 * later?
388 */
389 if (last == jiffies) {
390 lockup += 1;
391 if (lockup > 1000000) {
392 printk(KERN_ERR "\nInterrupt lockup detected - "
393 "disabling all expansion card interrupts\n");
394
395 desc->chip->mask(IRQ_EXPANSIONCARD);
396
397 printk("Expansion card IRQ state:\n");
398
399 for (ec = cards; ec; ec = ec->next)
400 ecard_dump_irq_state(ec);
401 }
402 } else
403 lockup = 0;
404
405 /*
406 * If we did not recognise the source of this interrupt,
407 * warn the user, but don't flood the user with these messages.
408 */
409 if (!last || time_after(jiffies, (unsigned long)(last + 5*HZ))) {
410 last = jiffies;
411 printk(KERN_WARNING "Unrecognised interrupt from backplane\n");
412 }
413}
414
415static void
416ecard_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
417{
418 ecard_t *ec;
419 int called = 0;
420
421 desc->chip->mask(irq);
422 for (ec = cards; ec; ec = ec->next) {
423 int pending;
424
425 if (!ec->claimed || ec->irq == NO_IRQ)
426 continue;
427
428 if (ec->ops && ec->ops->irqpending)
429 pending = ec->ops->irqpending(ec);
430 else
431 pending = ecard_default_ops.irqpending(ec);
432
433 if (pending) {
434 struct irqdesc *d = irq_desc + ec->irq;
435 d->handle(ec->irq, d, regs);
436 called ++;
437 }
438 }
439 desc->chip->unmask(irq);
440
441 if (called == 0)
442 ecard_check_lockup(desc);
443}
444
445#define ecard_irqexp_handler NULL
446#define ecard_probeirqhw() (0)
447
448unsigned int ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
449{
450 unsigned long address = 0;
451 int slot = ec->slot_no;
452
453 ectcr &= ~(1 << slot);
454
455 switch (type) {
456 case ECARD_MEMC:
457 address = IO_EC_MEMC_BASE + (slot << 12);
458 break;
459
460 case ECARD_IOC:
461 address = IO_EC_IOC_BASE + (slot << 12) + (speed << 17);
462 break;
463
464 default:
465 break;
466 }
467
468 return address;
469}
470
471static int ecard_prints(char *buffer, ecard_t *ec)
472{
473 char *start = buffer;
474
475 buffer += sprintf(buffer, " %d: ", ec->slot_no);
476
477 if (ec->cid.id == 0) {
478 struct in_chunk_dir incd;
479
480 buffer += sprintf(buffer, "[%04X:%04X] ",
481 ec->cid.manufacturer, ec->cid.product);
482
483 if (!ec->card_desc && ec->cid.cd &&
484 ecard_readchunk(&incd, ec, 0xf5, 0)) {
485 ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL);
486
487 if (ec->card_desc)
488 strcpy((char *)ec->card_desc, incd.d.string);
489 }
490
491 buffer += sprintf(buffer, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*");
492 } else
493 buffer += sprintf(buffer, "Simple card %d\n", ec->cid.id);
494
495 return buffer - start;
496}
497
498static int get_ecard_dev_info(char *buf, char **start, off_t pos, int count)
499{
500 ecard_t *ec = cards;
501 off_t at = 0;
502 int len, cnt;
503
504 cnt = 0;
505 while (ec && count > cnt) {
506 len = ecard_prints(buf, ec);
507 at += len;
508 if (at >= pos) {
509 if (!*start) {
510 *start = buf + (pos - (at - len));
511 cnt = at - pos;
512 } else
513 cnt += len;
514 buf += len;
515 }
516 ec = ec->next;
517 }
518 return (count > cnt) ? cnt : count;
519}
520
521static struct proc_dir_entry *proc_bus_ecard_dir = NULL;
522
523static void ecard_proc_init(void)
524{
525 proc_bus_ecard_dir = proc_mkdir("ecard", proc_bus);
526 create_proc_info_entry("devices", 0, proc_bus_ecard_dir,
527 get_ecard_dev_info);
528}
529
530#define ec_set_resource(ec,nr,st,sz,flg) \
531 do { \
532 (ec)->resource[nr].name = ec->dev.bus_id; \
533 (ec)->resource[nr].start = st; \
534 (ec)->resource[nr].end = (st) + (sz) - 1; \
535 (ec)->resource[nr].flags = flg; \
536 } while (0)
537
538static void __init ecard_init_resources(struct expansion_card *ec)
539{
540 unsigned long base = PODSLOT_IOC0_BASE;
541 unsigned int slot = ec->slot_no;
542 int i;
543
544 ec_set_resource(ec, ECARD_RES_MEMC,
545 PODSLOT_MEMC_BASE + (slot << 14),
546 PODSLOT_MEMC_SIZE, IORESOURCE_MEM);
547
548 for (i = 0; i < ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++) {
549 ec_set_resource(ec, i + ECARD_RES_IOCSLOW,
550 base + (slot << 14) + (i << 19),
551 PODSLOT_IOC_SIZE, IORESOURCE_MEM);
552 }
553
554 for (i = 0; i < ECARD_NUM_RESOURCES; i++) {
555 if (ec->resource[i].start &&
556 request_resource(&iomem_resource, &ec->resource[i])) {
557 printk(KERN_ERR "%s: resource(s) not available\n",
558 ec->dev.bus_id);
559 ec->resource[i].end -= ec->resource[i].start;
560 ec->resource[i].start = 0;
561 }
562 }
563}
564
565static ssize_t ecard_show_irq(struct device *dev, char *buf)
566{
567 struct expansion_card *ec = ECARD_DEV(dev);
568 return sprintf(buf, "%u\n", ec->irq);
569}
570
571static ssize_t ecard_show_vendor(struct device *dev, char *buf)
572{
573 struct expansion_card *ec = ECARD_DEV(dev);
574 return sprintf(buf, "%u\n", ec->cid.manufacturer);
575}
576
577static ssize_t ecard_show_device(struct device *dev, char *buf)
578{
579 struct expansion_card *ec = ECARD_DEV(dev);
580 return sprintf(buf, "%u\n", ec->cid.product);
581}
582
583static ssize_t ecard_show_dma(struct device *dev, char *buf)
584{
585 struct expansion_card *ec = ECARD_DEV(dev);
586 return sprintf(buf, "%u\n", ec->dma);
587}
588
589static ssize_t ecard_show_resources(struct device *dev, char *buf)
590{
591 struct expansion_card *ec = ECARD_DEV(dev);
592 char *str = buf;
593 int i;
594
595 for (i = 0; i < ECARD_NUM_RESOURCES; i++)
596 str += sprintf(str, "%08lx %08lx %08lx\n",
597 ec->resource[i].start,
598 ec->resource[i].end,
599 ec->resource[i].flags);
600
601 return str - buf;
602}
603
604static DEVICE_ATTR(irq, S_IRUGO, ecard_show_irq, NULL);
605static DEVICE_ATTR(vendor, S_IRUGO, ecard_show_vendor, NULL);
606static DEVICE_ATTR(device, S_IRUGO, ecard_show_device, NULL);
607static DEVICE_ATTR(dma, S_IRUGO, ecard_show_dma, NULL);
608static DEVICE_ATTR(resource, S_IRUGO, ecard_show_resources, NULL);
609
610/*
611 * Probe for an expansion card.
612 *
613 * If bit 1 of the first byte of the card is set, then the
614 * card does not exist.
615 */
616static int __init
617ecard_probe(int slot, card_type_t type)
618{
619 ecard_t **ecp;
620 ecard_t *ec;
621 struct ex_ecid cid;
622 int i, rc = -ENOMEM;
623
624 ec = kmalloc(sizeof(ecard_t), GFP_KERNEL);
625 if (!ec)
626 goto nomem;
627
628 memset(ec, 0, sizeof(ecard_t));
629
630 ec->slot_no = slot;
631 ec->type = type;
632 ec->irq = NO_IRQ;
633 ec->fiq = NO_IRQ;
634 ec->dma = NO_DMA;
635 ec->card_desc = NULL;
636 ec->ops = &ecard_default_ops;
637
638 rc = -ENODEV;
639 if ((ec->podaddr = ecard_address(ec, type, ECARD_SYNC)) == 0)
640 goto nodev;
641
642 cid.r_zero = 1;
643 ecard_readbytes(&cid, ec, 0, 16, 0);
644 if (cid.r_zero)
645 goto nodev;
646
647 ec->cid.id = cid.r_id;
648 ec->cid.cd = cid.r_cd;
649 ec->cid.is = cid.r_is;
650 ec->cid.w = cid.r_w;
651 ec->cid.manufacturer = ecard_getu16(cid.r_manu);
652 ec->cid.product = ecard_getu16(cid.r_prod);
653 ec->cid.country = cid.r_country;
654 ec->cid.irqmask = cid.r_irqmask;
655 ec->cid.irqoff = ecard_gets24(cid.r_irqoff);
656 ec->cid.fiqmask = cid.r_fiqmask;
657 ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff);
658 ec->fiqaddr =
659 ec->irqaddr = (unsigned char *)ioaddr(ec->podaddr);
660
661 if (ec->cid.is) {
662 ec->irqmask = ec->cid.irqmask;
663 ec->irqaddr += ec->cid.irqoff;
664 ec->fiqmask = ec->cid.fiqmask;
665 ec->fiqaddr += ec->cid.fiqoff;
666 } else {
667 ec->irqmask = 1;
668 ec->fiqmask = 4;
669 }
670
671 for (i = 0; i < sizeof(blacklist) / sizeof(*blacklist); i++)
672 if (blacklist[i].manufacturer == ec->cid.manufacturer &&
673 blacklist[i].product == ec->cid.product) {
674 ec->card_desc = blacklist[i].type;
675 break;
676 }
677
678 snprintf(ec->dev.bus_id, sizeof(ec->dev.bus_id), "ecard%d", slot);
679 ec->dev.parent = NULL;
680 ec->dev.bus = &ecard_bus_type;
681 ec->dev.dma_mask = &ec->dma_mask;
682 ec->dma_mask = (u64)0xffffffff;
683
684 ecard_init_resources(ec);
685
686 /*
687 * hook the interrupt handlers
688 */
689 ec->irq = 32 + slot;
690 set_irq_chip(ec->irq, &ecard_chip);
691 set_irq_handler(ec->irq, do_level_IRQ);
692 set_irq_flags(ec->irq, IRQF_VALID);
693
694 for (ecp = &cards; *ecp; ecp = &(*ecp)->next);
695
696 *ecp = ec;
697 slot_to_expcard[slot] = ec;
698
699 device_register(&ec->dev);
700 device_create_file(&ec->dev, &dev_attr_dma);
701 device_create_file(&ec->dev, &dev_attr_irq);
702 device_create_file(&ec->dev, &dev_attr_resource);
703 device_create_file(&ec->dev, &dev_attr_vendor);
704 device_create_file(&ec->dev, &dev_attr_device);
705
706 return 0;
707
708nodev:
709 kfree(ec);
710nomem:
711 return rc;
712}
713
714/*
715 * Initialise the expansion card system.
716 * Locate all hardware - interrupt management and
717 * actual cards.
718 */
719static int __init ecard_init(void)
720{
721 int slot, irqhw;
722
723 printk("Probing expansion cards\n");
724
725 for (slot = 0; slot < MAX_ECARDS; slot ++) {
726 ecard_probe(slot, ECARD_IOC);
727 }
728
729 irqhw = ecard_probeirqhw();
730
731 set_irq_chained_handler(IRQ_EXPANSIONCARD,
732 irqhw ? ecard_irqexp_handler : ecard_irq_handler);
733
734 ecard_proc_init();
735
736 return 0;
737}
738
739subsys_initcall(ecard_init);
740
741/*
742 * ECARD "bus"
743 */
744static const struct ecard_id *
745ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec)
746{
747 int i;
748
749 for (i = 0; ids[i].manufacturer != 65535; i++)
750 if (ec->cid.manufacturer == ids[i].manufacturer &&
751 ec->cid.product == ids[i].product)
752 return ids + i;
753
754 return NULL;
755}
756
757static int ecard_drv_probe(struct device *dev)
758{
759 struct expansion_card *ec = ECARD_DEV(dev);
760 struct ecard_driver *drv = ECARD_DRV(dev->driver);
761 const struct ecard_id *id;
762 int ret;
763
764 id = ecard_match_device(drv->id_table, ec);
765
766 ecard_claim(ec);
767 ret = drv->probe(ec, id);
768 if (ret)
769 ecard_release(ec);
770 return ret;
771}
772
773static int ecard_drv_remove(struct device *dev)
774{
775 struct expansion_card *ec = ECARD_DEV(dev);
776 struct ecard_driver *drv = ECARD_DRV(dev->driver);
777
778 drv->remove(ec);
779 ecard_release(ec);
780
781 return 0;
782}
783
784/*
785 * Before rebooting, we must make sure that the expansion card is in a
786 * sensible state, so it can be re-detected. This means that the first
787 * page of the ROM must be visible. We call the expansion cards reset
788 * handler, if any.
789 */
790static void ecard_drv_shutdown(struct device *dev)
791{
792 struct expansion_card *ec = ECARD_DEV(dev);
793 struct ecard_driver *drv = ECARD_DRV(dev->driver);
794 struct ecard_request req;
795
796 if (drv->shutdown)
797 drv->shutdown(ec);
798 ecard_release(ec);
799 req.req = req_reset;
800 req.ec = ec;
801 ecard_call(&req);
802}
803
804int ecard_register_driver(struct ecard_driver *drv)
805{
806 drv->drv.bus = &ecard_bus_type;
807 drv->drv.probe = ecard_drv_probe;
808 drv->drv.remove = ecard_drv_remove;
809 drv->drv.shutdown = ecard_drv_shutdown;
810
811 return driver_register(&drv->drv);
812}
813
814void ecard_remove_driver(struct ecard_driver *drv)
815{
816 driver_unregister(&drv->drv);
817}
818
819static int ecard_match(struct device *_dev, struct device_driver *_drv)
820{
821 struct expansion_card *ec = ECARD_DEV(_dev);
822 struct ecard_driver *drv = ECARD_DRV(_drv);
823 int ret;
824
825 if (drv->id_table) {
826 ret = ecard_match_device(drv->id_table, ec) != NULL;
827 } else {
828 ret = ec->cid.id == drv->id;
829 }
830
831 return ret;
832}
833
834struct bus_type ecard_bus_type = {
835 .name = "ecard",
836 .match = ecard_match,
837};
838
839static int ecard_bus_init(void)
840{
841 return bus_register(&ecard_bus_type);
842}
843
844postcore_initcall(ecard_bus_init);
845
846EXPORT_SYMBOL(ecard_readchunk);
847EXPORT_SYMBOL(ecard_address);
848EXPORT_SYMBOL(ecard_register_driver);
849EXPORT_SYMBOL(ecard_remove_driver);
850EXPORT_SYMBOL(ecard_bus_type);
diff --git a/arch/arm26/kernel/entry.S b/arch/arm26/kernel/entry.S
new file mode 100644
index 000000000000..a231dd88d0e1
--- /dev/null
+++ b/arch/arm26/kernel/entry.S
@@ -0,0 +1,961 @@
1/* arch/arm26/kernel/entry.S
2 *
3 * Assembled from chunks of code in arch/arm
4 *
5 * Copyright (C) 2003 Ian Molton
6 * Based on the work of RMK.
7 *
8 */
9
10#include <linux/linkage.h>
11
12#include <asm/assembler.h>
13#include <asm/asm_offsets.h>
14#include <asm/errno.h>
15#include <asm/hardware.h>
16#include <asm/sysirq.h>
17#include <asm/thread_info.h>
18#include <asm/page.h>
19#include <asm/ptrace.h>
20
21 .macro zero_fp
22#ifndef CONFIG_NO_FRAME_POINTER
23 mov fp, #0
24#endif
25 .endm
26
27 .text
28
29@ Bad Abort numbers
30@ -----------------
31@
32#define BAD_PREFETCH 0
33#define BAD_DATA 1
34#define BAD_ADDREXCPTN 2
35#define BAD_IRQ 3
36#define BAD_UNDEFINSTR 4
37
38@ OS version number used in SWIs
39@ RISC OS is 0
40@ RISC iX is 8
41@
42#define OS_NUMBER 9
43#define ARMSWI_OFFSET 0x000f0000
44
45@
46@ Stack format (ensured by USER_* and SVC_*)
47@ PSR and PC are comined on arm26
48@
49
50#define S_OFF 8
51
52#define S_OLD_R0 64
53#define S_PC 60
54#define S_LR 56
55#define S_SP 52
56#define S_IP 48
57#define S_FP 44
58#define S_R10 40
59#define S_R9 36
60#define S_R8 32
61#define S_R7 28
62#define S_R6 24
63#define S_R5 20
64#define S_R4 16
65#define S_R3 12
66#define S_R2 8
67#define S_R1 4
68#define S_R0 0
69
70 .macro save_user_regs
71 str r0, [sp, #-4]! @ Store SVC r0
72 str lr, [sp, #-4]! @ Store user mode PC
73 sub sp, sp, #15*4
74 stmia sp, {r0 - lr}^ @ Store the other user-mode regs
75 mov r0, r0
76 .endm
77
78 .macro slow_restore_user_regs
79 ldmia sp, {r0 - lr}^ @ restore the user regs not including PC
80 mov r0, r0
81 ldr lr, [sp, #15*4] @ get user PC
82 add sp, sp, #15*4+8 @ free stack
83 movs pc, lr @ return
84 .endm
85
86 .macro fast_restore_user_regs
87 add sp, sp, #S_OFF
88 ldmib sp, {r1 - lr}^
89 mov r0, r0
90 ldr lr, [sp, #15*4]
91 add sp, sp, #15*4+8
92 movs pc, lr
93 .endm
94
95 .macro save_svc_regs
96 str sp, [sp, #-16]!
97 str lr, [sp, #8]
98 str lr, [sp, #4]
99 stmfd sp!, {r0 - r12}
100 mov r0, #-1
101 str r0, [sp, #S_OLD_R0]
102 zero_fp
103 .endm
104
105 .macro save_svc_regs_irq
106 str sp, [sp, #-16]!
107 str lr, [sp, #4]
108 ldr lr, .LCirq
109 ldr lr, [lr]
110 str lr, [sp, #8]
111 stmfd sp!, {r0 - r12}
112 mov r0, #-1
113 str r0, [sp, #S_OLD_R0]
114 zero_fp
115 .endm
116
117 .macro restore_svc_regs
118 ldmfd sp, {r0 - pc}^
119 .endm
120
121 .macro mask_pc, rd, rm
122 bic \rd, \rm, #PCMASK
123 .endm
124
125 .macro disable_irqs, temp
126 mov \temp, pc
127 orr \temp, \temp, #PSR_I_BIT
128 teqp \temp, #0
129 .endm
130
131 .macro enable_irqs, temp
132 mov \temp, pc
133 and \temp, \temp, #~PSR_I_BIT
134 teqp \temp, #0
135 .endm
136
137 .macro initialise_traps_extra
138 .endm
139
140 .macro get_thread_info, rd
141 mov \rd, sp, lsr #13
142 mov \rd, \rd, lsl #13
143 .endm
144
145/*
146 * These are the registers used in the syscall handler, and allow us to
147 * have in theory up to 7 arguments to a function - r0 to r6.
148 *
149 * Note that tbl == why is intentional.
150 *
151 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
152 */
153scno .req r7 @ syscall number
154tbl .req r8 @ syscall table pointer
155why .req r8 @ Linux syscall (!= 0)
156tsk .req r9 @ current thread_info
157
158/*
159 * Get the system call number.
160 */
161 .macro get_scno
162 mask_pc lr, lr
163 ldr scno, [lr, #-4] @ get SWI instruction
164 .endm
165/*
166 * -----------------------------------------------------------------------
167 */
168
169/*
170 * We rely on the fact that R0 is at the bottom of the stack (due to
171 * slow/fast restore user regs).
172 */
173#if S_R0 != 0
174#error "Please fix"
175#endif
176
177/*
178 * This is the fast syscall return path. We do as little as
179 * possible here, and this includes saving r0 back into the SVC
180 * stack.
181 */
182ret_fast_syscall:
183 disable_irqs r1 @ disable interrupts
184 ldr r1, [tsk, #TI_FLAGS]
185 tst r1, #_TIF_WORK_MASK
186 bne fast_work_pending
187 fast_restore_user_regs
188
189/*
190 * Ok, we need to do extra processing, enter the slow path.
191 */
192fast_work_pending:
193 str r0, [sp, #S_R0+S_OFF]! @ returned r0
194work_pending:
195 tst r1, #_TIF_NEED_RESCHED
196 bne work_resched
197 tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
198 beq no_work_pending
199 mov r0, sp @ 'regs'
200 mov r2, why @ 'syscall'
201 bl do_notify_resume
202 disable_irqs r1 @ disable interrupts
203 b no_work_pending
204
205work_resched:
206 bl schedule
207/*
208 * "slow" syscall return path. "why" tells us if this was a real syscall.
209 */
210ENTRY(ret_to_user)
211ret_slow_syscall:
212 disable_irqs r1 @ disable interrupts
213 ldr r1, [tsk, #TI_FLAGS]
214 tst r1, #_TIF_WORK_MASK
215 bne work_pending
216no_work_pending:
217 slow_restore_user_regs
218
219/*
220 * This is how we return from a fork.
221 */
222ENTRY(ret_from_fork)
223 bl schedule_tail
224 get_thread_info tsk
225 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
226 mov why, #1
227 tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
228 beq ret_slow_syscall
229 mov r1, sp
230 mov r0, #1 @ trace exit [IP = 1]
231 bl syscall_trace
232 b ret_slow_syscall
233
234// FIXME - is this strictly necessary?
235#include "calls.S"
236
237/*=============================================================================
238 * SWI handler
239 *-----------------------------------------------------------------------------
240 */
241
242 .align 5
243ENTRY(vector_swi)
244 save_user_regs
245 zero_fp
246 get_scno
247
248#ifdef CONFIG_ALIGNMENT_TRAP
249 ldr ip, __cr_alignment
250 ldr ip, [ip]
251 mcr p15, 0, ip, c1, c0 @ update control register
252#endif
253 enable_irqs ip
254
255 str r4, [sp, #-S_OFF]! @ push fifth arg
256
257 get_thread_info tsk
258 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
259 bic scno, scno, #0xff000000 @ mask off SWI op-code
260 eor scno, scno, #OS_NUMBER << 20 @ check OS number
261 adr tbl, sys_call_table @ load syscall table pointer
262 tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
263 bne __sys_trace
264
265 adral lr, ret_fast_syscall @ set return address
266 orral lr, lr, #PSR_I_BIT | MODE_SVC26 @ Force SVC mode on return
267 cmp scno, #NR_syscalls @ check upper syscall limit
268 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
269
270 add r1, sp, #S_OFF
2712: mov why, #0 @ no longer a real syscall
272 cmp scno, #ARMSWI_OFFSET
273 eor r0, scno, #OS_NUMBER << 20 @ put OS number back
274 bcs arm_syscall
275 b sys_ni_syscall @ not private func
276
277 /*
278 * This is the really slow path. We're going to be doing
279 * context switches, and waiting for our parent to respond.
280 */
281__sys_trace:
282 add r1, sp, #S_OFF
283 mov r0, #0 @ trace entry [IP = 0]
284 bl syscall_trace
285
286 adral lr, __sys_trace_return @ set return address
287 orral lr, lr, #PSR_I_BIT | MODE_SVC26 @ Force SVC mode on return
288 add r1, sp, #S_R0 + S_OFF @ pointer to regs
289 cmp scno, #NR_syscalls @ check upper syscall limit
290 ldmccia r1, {r0 - r3} @ have to reload r0 - r3
291 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
292 b 2b
293
294__sys_trace_return:
295 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
296 mov r1, sp
297 mov r0, #1 @ trace exit [IP = 1]
298 bl syscall_trace
299 b ret_slow_syscall
300
301 .align 5
302#ifdef CONFIG_ALIGNMENT_TRAP
303 .type __cr_alignment, #object
304__cr_alignment:
305 .word cr_alignment
306#endif
307
308 .type sys_call_table, #object
309ENTRY(sys_call_table)
310#include "calls.S"
311
312/*============================================================================
313 * Special system call wrappers
314 */
315@ r0 = syscall number
316@ r5 = syscall table
317 .type sys_syscall, #function
318sys_syscall:
319 eor scno, r0, #OS_NUMBER << 20
320 cmp scno, #NR_syscalls @ check range
321 stmleia sp, {r5, r6} @ shuffle args
322 movle r0, r1
323 movle r1, r2
324 movle r2, r3
325 movle r3, r4
326 ldrle pc, [tbl, scno, lsl #2]
327 b sys_ni_syscall
328
329sys_fork_wrapper:
330 add r0, sp, #S_OFF
331 b sys_fork
332
333sys_vfork_wrapper:
334 add r0, sp, #S_OFF
335 b sys_vfork
336
337sys_execve_wrapper:
338 add r3, sp, #S_OFF
339 b sys_execve
340
341sys_clone_wapper:
342 add r2, sp, #S_OFF
343 b sys_clone
344
345sys_sigsuspend_wrapper:
346 add r3, sp, #S_OFF
347 b sys_sigsuspend
348
349sys_rt_sigsuspend_wrapper:
350 add r2, sp, #S_OFF
351 b sys_rt_sigsuspend
352
353sys_sigreturn_wrapper:
354 add r0, sp, #S_OFF
355 b sys_sigreturn
356
357sys_rt_sigreturn_wrapper:
358 add r0, sp, #S_OFF
359 b sys_rt_sigreturn
360
361sys_sigaltstack_wrapper:
362 ldr r2, [sp, #S_OFF + S_SP]
363 b do_sigaltstack
364
365/*
366 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
367 * offset, we return EINVAL. FIXME - this lost some stuff from arm32 to
368 * ifdefs. check it out.
369 */
370sys_mmap2:
371 tst r5, #((1 << (PAGE_SHIFT - 12)) - 1)
372 moveq r5, r5, lsr #PAGE_SHIFT - 12
373 streq r5, [sp, #4]
374 beq do_mmap2
375 mov r0, #-EINVAL
376 RETINSTR(mov,pc, lr)
377
378/*
379 * Design issues:
380 * - We have several modes that each vector can be called from,
381 * each with its own set of registers. On entry to any vector,
382 * we *must* save the registers used in *that* mode.
383 *
384 * - This code must be as fast as possible.
385 *
386 * There are a few restrictions on the vectors:
387 * - the SWI vector cannot be called from *any* non-user mode
388 *
389 * - the FP emulator is *never* called from *any* non-user mode undefined
390 * instruction.
391 *
392 */
393
394 .text
395
396 .macro handle_irq
3971: mov r4, #IOC_BASE
398 ldrb r6, [r4, #0x24] @ get high priority first
399 adr r5, irq_prio_h
400 teq r6, #0
401 ldreqb r6, [r4, #0x14] @ get low priority
402 adreq r5, irq_prio_l
403
404 teq r6, #0 @ If an IRQ happened...
405 ldrneb r0, [r5, r6] @ get IRQ number
406 movne r1, sp @ get struct pt_regs
407 adrne lr, 1b @ Set return address to 1b
408 orrne lr, lr, #PSR_I_BIT | MODE_SVC26 @ (and force SVC mode)
409 bne asm_do_IRQ @ process IRQ (if asserted)
410 .endm
411
412
413/*
414 * Interrupt table (incorporates priority)
415 */
416 .macro irq_prio_table
417irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
418 .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
419 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
420 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
421 .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
422 .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
423 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
424 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
425 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
426 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
427 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
428 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
429 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
430 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
431 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
432 .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
433irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
434 .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
435 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
436 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
437 .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
438 .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
439 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
440 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
441 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
442 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
443 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
444 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
445 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
446 .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
447 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
448 .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
449 .endm
450
451#if 1
452/*
453 * Uncomment these if you wish to get more debugging into about data aborts.
454 * FIXME - I bet we can find a way to encode these and keep performance.
455 */
456#define FAULT_CODE_LDRSTRPOST 0x80
457#define FAULT_CODE_LDRSTRPRE 0x40
458#define FAULT_CODE_LDRSTRREG 0x20
459#define FAULT_CODE_LDMSTM 0x10
460#define FAULT_CODE_LDCSTC 0x08
461#endif
462#define FAULT_CODE_PREFETCH 0x04
463#define FAULT_CODE_WRITE 0x02
464#define FAULT_CODE_FORCECOW 0x01
465
466/*=============================================================================
467 * Undefined FIQs
468 *-----------------------------------------------------------------------------
469 */
470_unexp_fiq: ldr sp, .LCfiq
471 mov r12, #IOC_BASE
472 strb r12, [r12, #0x38] @ Disable FIQ register
473 teqp pc, #PSR_I_BIT | PSR_F_BIT | MODE_SVC26
474 mov r0, r0
475 stmfd sp!, {r0 - r3, ip, lr}
476 adr r0, Lfiqmsg
477 bl printk
478 ldmfd sp!, {r0 - r3, ip, lr}
479 teqp pc, #PSR_I_BIT | PSR_F_BIT | MODE_FIQ26
480 mov r0, r0
481 movs pc, lr
482
483Lfiqmsg: .ascii "*** Unexpected FIQ\n\0"
484 .align
485
486.LCfiq: .word __temp_fiq
487.LCirq: .word __temp_irq
488
489/*=============================================================================
490 * Undefined instruction handler
491 *-----------------------------------------------------------------------------
492 * Handles floating point instructions
493 */
494vector_undefinstr:
495 tst lr, #MODE_SVC26 @ did we come from a non-user mode?
496 bne __und_svc @ yes - deal with it.
497/* Otherwise, fall through for the user-space (common) case. */
498 save_user_regs
499 zero_fp @ zero frame pointer
500 teqp pc, #PSR_I_BIT | MODE_SVC26 @ disable IRQs
501.Lbug_undef:
502 ldr r4, .LC2
503 ldr pc, [r4] @ Call FP module entry point
504/* FIXME - should we trap for a null pointer here? */
505
506/* The SVC mode case */
507__und_svc: save_svc_regs @ Non-user mode
508 mask_pc r0, lr
509 and r2, lr, #3
510 sub r0, r0, #4
511 mov r1, sp
512 bl do_undefinstr
513 restore_svc_regs
514
515/* We get here if the FP emulator doesnt handle the undef instr.
516 * If the insn WAS handled, the emulator jumps to ret_from_exception by itself/
517 */
518 .globl fpundefinstr
519fpundefinstr:
520 mov r0, lr
521 mov r1, sp
522 teqp pc, #MODE_SVC26
523 bl do_undefinstr
524 b ret_from_exception @ Normal FP exit
525
526#if defined CONFIG_FPE_NWFPE || defined CONFIG_FPE_FASTFPE
527 /* The FPE is always present */
528 .equ fpe_not_present, 0
529#else
530/* We get here if an undefined instruction happens and the floating
531 * point emulator is not present. If the offending instruction was
532 * a WFS, we just perform a normal return as if we had emulated the
533 * operation. This is a hack to allow some basic userland binaries
534 * to run so that the emulator module proper can be loaded. --philb
535 * FIXME - probably a broken useless hack...
536 */
537fpe_not_present:
538 adr r10, wfs_mask_data
539 ldmia r10, {r4, r5, r6, r7, r8}
540 ldr r10, [sp, #S_PC] @ Load PC
541 sub r10, r10, #4
542 mask_pc r10, r10
543 ldrt r10, [r10] @ get instruction
544 and r5, r10, r5
545 teq r5, r4 @ Is it WFS?
546 beq ret_from_exception
547 and r5, r10, r8
548 teq r5, r6 @ Is it LDF/STF on sp or fp?
549 teqne r5, r7
550 bne fpundefinstr
551 tst r10, #0x00200000 @ Does it have WB
552 beq ret_from_exception
553 and r4, r10, #255 @ get offset
554 and r6, r10, #0x000f0000
555 tst r10, #0x00800000 @ +/-
556 ldr r5, [sp, r6, lsr #14] @ Load reg
557 rsbeq r4, r4, #0
558 add r5, r5, r4, lsl #2
559 str r5, [sp, r6, lsr #14] @ Save reg
560 b ret_from_exception
561
562wfs_mask_data: .word 0x0e200110 @ WFS/RFS
563 .word 0x0fef0fff
564 .word 0x0d0d0100 @ LDF [sp]/STF [sp]
565 .word 0x0d0b0100 @ LDF [fp]/STF [fp]
566 .word 0x0f0f0f00
567#endif
568
569.LC2: .word fp_enter
570
571/*=============================================================================
572 * Prefetch abort handler
573 *-----------------------------------------------------------------------------
574 */
575#define DEBUG_UNDEF
576/* remember: lr = USR pc */
577vector_prefetch:
578 sub lr, lr, #4
579 tst lr, #MODE_SVC26
580 bne __pabt_invalid
581 save_user_regs
582 teqp pc, #MODE_SVC26 @ Enable IRQs...
583 mask_pc r0, lr @ Address of abort
584 mov r1, sp @ Tasks registers
585 bl do_PrefetchAbort
586 teq r0, #0 @ If non-zero, we believe this abort..
587 bne ret_from_exception
588#ifdef DEBUG_UNDEF
589 adr r0, t
590 bl printk
591#endif
592 ldr lr, [sp,#S_PC] @ FIXME program to test this on. I think its
593 b .Lbug_undef @ broken at the moment though!)
594
595__pabt_invalid: save_svc_regs
596 mov r0, sp @ Prefetch aborts are definitely *not*
597 mov r1, #BAD_PREFETCH @ allowed in non-user modes. We cant
598 and r2, lr, #3 @ recover from this problem.
599 b bad_mode
600
601#ifdef DEBUG_UNDEF
602t: .ascii "*** undef ***\r\n\0"
603 .align
604#endif
605
606/*=============================================================================
607 * Address exception handler
608 *-----------------------------------------------------------------------------
609 * These aren't too critical.
610 * (they're not supposed to happen).
611 * In order to debug the reason for address exceptions in non-user modes,
612 * we have to obtain all the registers so that we can see what's going on.
613 */
614
615vector_addrexcptn:
616 sub lr, lr, #8
617 tst lr, #3
618 bne Laddrexcptn_not_user
619 save_user_regs
620 teq pc, #MODE_SVC26
621 mask_pc r0, lr @ Point to instruction
622 mov r1, sp @ Point to registers
623 mov r2, #0x400
624 mov lr, pc
625 bl do_excpt
626 b ret_from_exception
627
628Laddrexcptn_not_user:
629 save_svc_regs
630 and r2, lr, #3
631 teq r2, #3
632 bne Laddrexcptn_illegal_mode
633 teqp pc, #MODE_SVC26
634 mask_pc r0, lr
635 mov r1, sp
636 orr r2, r2, #0x400
637 bl do_excpt
638 ldmia sp, {r0 - lr} @ I cant remember the reason I changed this...
639 add sp, sp, #15*4
640 movs pc, lr
641
642Laddrexcptn_illegal_mode:
643 mov r0, sp
644 str lr, [sp, #-4]!
645 orr r1, r2, #PSR_I_BIT | PSR_F_BIT
646 teqp r1, #0 @ change into mode (wont be user mode)
647 mov r0, r0
648 mov r1, r8 @ Any register from r8 - r14 can be banked
649 mov r2, r9
650 mov r3, r10
651 mov r4, r11
652 mov r5, r12
653 mov r6, r13
654 mov r7, r14
655 teqp pc, #PSR_F_BIT | MODE_SVC26 @ back to svc
656 mov r0, r0
657 stmfd sp!, {r1-r7}
658 ldmia r0, {r0-r7}
659 stmfd sp!, {r0-r7}
660 mov r0, sp
661 mov r1, #BAD_ADDREXCPTN
662 b bad_mode
663
664/*=============================================================================
665 * Interrupt (IRQ) handler
666 *-----------------------------------------------------------------------------
667 * Note: if the IRQ was taken whilst in user mode, then *no* kernel routine
668 * is running, so do not have to save svc lr.
669 *
670 * Entered in IRQ mode.
671 */
672
673vector_IRQ: ldr sp, .LCirq @ Setup some temporary stack
674 sub lr, lr, #4
675 str lr, [sp] @ push return address
676
677 tst lr, #3
678 bne __irq_non_usr
679
680__irq_usr: teqp pc, #PSR_I_BIT | MODE_SVC26 @ Enter SVC mode
681 mov r0, r0
682
683 ldr lr, .LCirq
684 ldr lr, [lr] @ Restore lr for jump back to USR
685
686 save_user_regs
687
688 handle_irq
689
690 mov why, #0
691 get_thread_info tsk
692 b ret_to_user
693
694@ Place the IRQ priority table here so that the handle_irq macros above
695@ and below here can access it.
696
697 irq_prio_table
698
699__irq_non_usr: teqp pc, #PSR_I_BIT | MODE_SVC26 @ Enter SVC mode
700 mov r0, r0
701
702 save_svc_regs_irq
703
704 and r2, lr, #3
705 teq r2, #3
706 bne __irq_invalid @ IRQ not from SVC mode
707
708 handle_irq
709
710 restore_svc_regs
711
712__irq_invalid: mov r0, sp
713 mov r1, #BAD_IRQ
714 b bad_mode
715
716/*=============================================================================
717 * Data abort handler code
718 *-----------------------------------------------------------------------------
719 *
720 * This handles both exceptions from user and SVC modes, computes the address
721 * range of the problem, and does any correction that is required. It then
722 * calls the kernel data abort routine.
723 *
724 * This is where I wish that the ARM would tell you which address aborted.
725 */
726
727vector_data: sub lr, lr, #8 @ Correct lr
728 tst lr, #3
729 bne Ldata_not_user
730 save_user_regs
731 teqp pc, #MODE_SVC26
732 mask_pc r0, lr
733 bl Ldata_do
734 b ret_from_exception
735
736Ldata_not_user:
737 save_svc_regs
738 and r2, lr, #3
739 teq r2, #3
740 bne Ldata_illegal_mode
741 tst lr, #PSR_I_BIT
742 teqeqp pc, #MODE_SVC26
743 mask_pc r0, lr
744 bl Ldata_do
745 restore_svc_regs
746
747Ldata_illegal_mode:
748 mov r0, sp
749 mov r1, #BAD_DATA
750 b bad_mode
751
752Ldata_do: mov r3, sp
753 ldr r4, [r0] @ Get instruction
754 mov r2, #0
755 tst r4, #1 << 20 @ Check to see if it is a write instruction
756 orreq r2, r2, #FAULT_CODE_WRITE @ Indicate write instruction
757 mov r1, r4, lsr #22 @ Now branch to the relevent processing routine
758 and r1, r1, #15 << 2
759 add pc, pc, r1
760 movs pc, lr
761 b Ldata_unknown
762 b Ldata_unknown
763 b Ldata_unknown
764 b Ldata_unknown
765 b Ldata_ldrstr_post @ ldr rd, [rn], #m
766 b Ldata_ldrstr_numindex @ ldr rd, [rn, #m] @ RegVal
767 b Ldata_ldrstr_post @ ldr rd, [rn], rm
768 b Ldata_ldrstr_regindex @ ldr rd, [rn, rm]
769 b Ldata_ldmstm @ ldm*a rn, <rlist>
770 b Ldata_ldmstm @ ldm*b rn, <rlist>
771 b Ldata_unknown
772 b Ldata_unknown
773 b Ldata_ldrstr_post @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
774 b Ldata_ldcstc_pre @ ldc rd, [rn, #m]
775 b Ldata_unknown
776Ldata_unknown: @ Part of jumptable
777 mov r0, r1
778 mov r1, r4
779 mov r2, r3
780 b baddataabort
781
782Ldata_ldrstr_post:
783 mov r0, r4, lsr #14 @ Get Rn
784 and r0, r0, #15 << 2 @ Mask out reg.
785 teq r0, #15 << 2
786 ldr r0, [r3, r0] @ Get register
787 biceq r0, r0, #PCMASK
788 mov r1, r0
789#ifdef FAULT_CODE_LDRSTRPOST
790 orr r2, r2, #FAULT_CODE_LDRSTRPOST
791#endif
792 b do_DataAbort
793
794Ldata_ldrstr_numindex:
795 mov r0, r4, lsr #14 @ Get Rn
796 and r0, r0, #15 << 2 @ Mask out reg.
797 teq r0, #15 << 2
798 ldr r0, [r3, r0] @ Get register
799 mov r1, r4, lsl #20
800 biceq r0, r0, #PCMASK
801 tst r4, #1 << 23
802 addne r0, r0, r1, lsr #20
803 subeq r0, r0, r1, lsr #20
804 mov r1, r0
805#ifdef FAULT_CODE_LDRSTRPRE
806 orr r2, r2, #FAULT_CODE_LDRSTRPRE
807#endif
808 b do_DataAbort
809
810Ldata_ldrstr_regindex:
811 mov r0, r4, lsr #14 @ Get Rn
812 and r0, r0, #15 << 2 @ Mask out reg.
813 teq r0, #15 << 2
814 ldr r0, [r3, r0] @ Get register
815 and r7, r4, #15
816 biceq r0, r0, #PCMASK
817 teq r7, #15 @ Check for PC
818 ldr r7, [r3, r7, lsl #2] @ Get Rm
819 and r8, r4, #0x60 @ Get shift types
820 biceq r7, r7, #PCMASK
821 mov r9, r4, lsr #7 @ Get shift amount
822 and r9, r9, #31
823 teq r8, #0
824 moveq r7, r7, lsl r9
825 teq r8, #0x20 @ LSR shift
826 moveq r7, r7, lsr r9
827 teq r8, #0x40 @ ASR shift
828 moveq r7, r7, asr r9
829 teq r8, #0x60 @ ROR shift
830 moveq r7, r7, ror r9
831 tst r4, #1 << 23
832 addne r0, r0, r7
833 subeq r0, r0, r7 @ Apply correction
834 mov r1, r0
835#ifdef FAULT_CODE_LDRSTRREG
836 orr r2, r2, #FAULT_CODE_LDRSTRREG
837#endif
838 b do_DataAbort
839
840Ldata_ldmstm:
841 mov r7, #0x11
842 orr r7, r7, r7, lsl #8
843 and r0, r4, r7
844 and r1, r4, r7, lsl #1
845 add r0, r0, r1, lsr #1
846 and r1, r4, r7, lsl #2
847 add r0, r0, r1, lsr #2
848 and r1, r4, r7, lsl #3
849 add r0, r0, r1, lsr #3
850 add r0, r0, r0, lsr #8
851 add r0, r0, r0, lsr #4
852 and r7, r0, #15 @ r7 = no. of registers to transfer.
853 mov r5, r4, lsr #14 @ Get Rn
854 and r5, r5, #15 << 2
855 ldr r0, [r3, r5] @ Get reg
856 eor r6, r4, r4, lsl #2
857 tst r6, #1 << 23 @ Check inc/dec ^ writeback
858 rsbeq r7, r7, #0
859 add r7, r0, r7, lsl #2 @ Do correction (signed)
860 subne r1, r7, #1
861 subeq r1, r0, #1
862 moveq r0, r7
863 tst r4, #1 << 21 @ Check writeback
864 strne r7, [r3, r5]
865 eor r6, r4, r4, lsl #1
866 tst r6, #1 << 24 @ Check Pre/Post ^ inc/dec
867 addeq r0, r0, #4
868 addeq r1, r1, #4
869 teq r5, #15*4 @ CHECK FOR PC
870 biceq r1, r1, #PCMASK
871 biceq r0, r0, #PCMASK
872#ifdef FAULT_CODE_LDMSTM
873 orr r2, r2, #FAULT_CODE_LDMSTM
874#endif
875 b do_DataAbort
876
877Ldata_ldcstc_pre:
878 mov r0, r4, lsr #14 @ Get Rn
879 and r0, r0, #15 << 2 @ Mask out reg.
880 teq r0, #15 << 2
881 ldr r0, [r3, r0] @ Get register
882 mov r1, r4, lsl #24 @ Get offset
883 biceq r0, r0, #PCMASK
884 tst r4, #1 << 23
885 addne r0, r0, r1, lsr #24
886 subeq r0, r0, r1, lsr #24
887 mov r1, r0
888#ifdef FAULT_CODE_LDCSTC
889 orr r2, r2, #FAULT_CODE_LDCSTC
890#endif
891 b do_DataAbort
892
893
894/*
895 * This is the return code to user mode for abort handlers
896 */
897ENTRY(ret_from_exception)
898 get_thread_info tsk
899 mov why, #0
900 b ret_to_user
901
902 .data
903ENTRY(fp_enter)
904 .word fpe_not_present
905 .text
906/*
907 * Register switch for older 26-bit only ARMs
908 */
909ENTRY(__switch_to)
910 add r0, r0, #TI_CPU_SAVE
911 stmia r0, {r4 - sl, fp, sp, lr}
912 add r1, r1, #TI_CPU_SAVE
913 ldmia r1, {r4 - sl, fp, sp, pc}^
914
915/*
916 *=============================================================================
917 * Low-level interface code
918 *-----------------------------------------------------------------------------
919 * Trap initialisation
920 *-----------------------------------------------------------------------------
921 *
922 * Note - FIQ code has changed. The default is a couple of words in 0x1c, 0x20
923 * that call _unexp_fiq. Nowever, we now copy the FIQ routine to 0x1c (removes
924 * some excess cycles).
925 *
926 * What we need to put into 0-0x1c are branches to branch to the kernel.
927 */
928
929 .section ".init.text",#alloc,#execinstr
930
931.Ljump_addresses:
932 swi SYS_ERROR0
933 .word vector_undefinstr - 12
934 .word vector_swi - 16
935 .word vector_prefetch - 20
936 .word vector_data - 24
937 .word vector_addrexcptn - 28
938 .word vector_IRQ - 32
939 .word _unexp_fiq - 36
940 b . + 8
941/*
942 * initialise the trap system
943 */
944ENTRY(__trap_init)
945 stmfd sp!, {r4 - r7, lr}
946 adr r1, .Ljump_addresses
947 ldmia r1, {r1 - r7, ip, lr}
948 orr r2, lr, r2, lsr #2
949 orr r3, lr, r3, lsr #2
950 orr r4, lr, r4, lsr #2
951 orr r5, lr, r5, lsr #2
952 orr r6, lr, r6, lsr #2
953 orr r7, lr, r7, lsr #2
954 orr ip, lr, ip, lsr #2
955 mov r0, #0
956 stmia r0, {r1 - r7, ip}
957 ldmfd sp!, {r4 - r7, pc}^
958
959 .bss
960__temp_irq: .space 4 @ saved lr_irq
961__temp_fiq: .space 128
diff --git a/arch/arm26/kernel/fiq.c b/arch/arm26/kernel/fiq.c
new file mode 100644
index 000000000000..08a97c9498ff
--- /dev/null
+++ b/arch/arm26/kernel/fiq.c
@@ -0,0 +1,202 @@
1/*
2 * linux/arch/arm26/kernel/fiq.c
3 *
4 * Copyright (C) 1998 Russell King
5 * Copyright (C) 1998, 1999 Phil Blundell
6 * Copyright (C) 2003 Ian Molton
7 *
8 * FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
9 *
10 * FIQ support re-written by Russell King to be more generic
11 *
12 * We now properly support a method by which the FIQ handlers can
13 * be stacked onto the vector. We still do not support sharing
14 * the FIQ vector itself.
15 *
16 * Operation is as follows:
17 * 1. Owner A claims FIQ:
18 * - default_fiq relinquishes control.
19 * 2. Owner A:
20 * - inserts code.
21 * - sets any registers,
22 * - enables FIQ.
23 * 3. Owner B claims FIQ:
24 * - if owner A has a relinquish function.
25 * - disable FIQs.
26 * - saves any registers.
27 * - returns zero.
28 * 4. Owner B:
29 * - inserts code.
30 * - sets any registers,
31 * - enables FIQ.
32 * 5. Owner B releases FIQ:
33 * - Owner A is asked to reacquire FIQ:
34 * - inserts code.
35 * - restores saved registers.
36 * - enables FIQ.
37 * 6. Goto 3
38 */
39#include <linux/config.h>
40#include <linux/module.h>
41#include <linux/mm.h>
42#include <linux/mman.h>
43#include <linux/init.h>
44#include <linux/seq_file.h>
45
46#include <asm/fiq.h>
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/pgalloc.h>
50#include <asm/system.h>
51#include <asm/uaccess.h>
52
53#define FIQ_VECTOR (vectors_base() + 0x1c)
54
55static unsigned long no_fiq_insn;
56
57#define unprotect_page_0()
58#define protect_page_0()
59
60/* Default reacquire function
61 * - we always relinquish FIQ control
62 * - we always reacquire FIQ control
63 */
64static int fiq_def_op(void *ref, int relinquish)
65{
66 if (!relinquish) {
67 unprotect_page_0();
68 *(unsigned long *)FIQ_VECTOR = no_fiq_insn;
69 protect_page_0();
70 }
71
72 return 0;
73}
74
75static struct fiq_handler default_owner = {
76 .name = "default",
77 .fiq_op = fiq_def_op,
78};
79
80static struct fiq_handler *current_fiq = &default_owner;
81
82int show_fiq_list(struct seq_file *p, void *v)
83{
84 if (current_fiq != &default_owner)
85 seq_printf(p, "FIQ: %s\n", current_fiq->name);
86
87 return 0;
88}
89
90void set_fiq_handler(void *start, unsigned int length)
91{
92 unprotect_page_0();
93
94 memcpy((void *)FIQ_VECTOR, start, length);
95
96 protect_page_0();
97}
98
99/*
100 * Taking an interrupt in FIQ mode is death, so both these functions
101 * disable irqs for the duration.
102 */
103void set_fiq_regs(struct pt_regs *regs)
104{
105 register unsigned long tmp, tmp2;
106 __asm__ volatile (
107 "mov %0, pc
108 bic %1, %0, #0x3
109 orr %1, %1, %3
110 teqp %1, #0 @ select FIQ mode
111 mov r0, r0
112 ldmia %2, {r8 - r14}
113 teqp %0, #0 @ return to SVC mode
114 mov r0, r0"
115 : "=&r" (tmp), "=&r" (tmp2)
116 : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | MODE_FIQ26)
117 /* These registers aren't modified by the above code in a way
118 visible to the compiler, but we mark them as clobbers anyway
119 so that GCC won't put any of the input or output operands in
120 them. */
121 : "r8", "r9", "r10", "r11", "r12", "r13", "r14");
122}
123
124void get_fiq_regs(struct pt_regs *regs)
125{
126 register unsigned long tmp, tmp2;
127 __asm__ volatile (
128 "mov %0, pc
129 bic %1, %0, #0x3
130 orr %1, %1, %3
131 teqp %1, #0 @ select FIQ mode
132 mov r0, r0
133 stmia %2, {r8 - r14}
134 teqp %0, #0 @ return to SVC mode
135 mov r0, r0"
136 : "=&r" (tmp), "=&r" (tmp2)
137 : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | MODE_FIQ26)
138 /* These registers aren't modified by the above code in a way
139 visible to the compiler, but we mark them as clobbers anyway
140 so that GCC won't put any of the input or output operands in
141 them. */
142 : "r8", "r9", "r10", "r11", "r12", "r13", "r14");
143}
144
145int claim_fiq(struct fiq_handler *f)
146{
147 int ret = 0;
148
149 if (current_fiq) {
150 ret = -EBUSY;
151
152 if (current_fiq->fiq_op != NULL)
153 ret = current_fiq->fiq_op(current_fiq->dev_id, 1);
154 }
155
156 if (!ret) {
157 f->next = current_fiq;
158 current_fiq = f;
159 }
160
161 return ret;
162}
163
164void release_fiq(struct fiq_handler *f)
165{
166 if (current_fiq != f) {
167 printk(KERN_ERR "%s FIQ trying to release %s FIQ\n",
168 f->name, current_fiq->name);
169#ifdef CONFIG_DEBUG_ERRORS
170 __backtrace();
171#endif
172 return;
173 }
174
175 do
176 current_fiq = current_fiq->next;
177 while (current_fiq->fiq_op(current_fiq->dev_id, 0));
178}
179
180void enable_fiq(int fiq)
181{
182 enable_irq(fiq + FIQ_START);
183}
184
185void disable_fiq(int fiq)
186{
187 disable_irq(fiq + FIQ_START);
188}
189
190EXPORT_SYMBOL(set_fiq_handler);
191EXPORT_SYMBOL(set_fiq_regs);
192EXPORT_SYMBOL(get_fiq_regs);
193EXPORT_SYMBOL(claim_fiq);
194EXPORT_SYMBOL(release_fiq);
195EXPORT_SYMBOL(enable_fiq);
196EXPORT_SYMBOL(disable_fiq);
197
198void __init init_FIQ(void)
199{
200 no_fiq_insn = *(unsigned long *)FIQ_VECTOR;
201 set_fs(get_fs());
202}
diff --git a/arch/arm26/kernel/head.S b/arch/arm26/kernel/head.S
new file mode 100644
index 000000000000..8bfc62539ba6
--- /dev/null
+++ b/arch/arm26/kernel/head.S
@@ -0,0 +1,113 @@
1/*
2 * linux/arch/arm26/kernel/head.S
3 *
4 * Copyright (C) 1994-2000 Russell King
5 * Copyright (C) 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * 26-bit kernel startup code
12 */
13#include <linux/config.h>
14#include <linux/linkage.h>
15#include <asm/mach-types.h>
16
17 .globl swapper_pg_dir
18 .equ swapper_pg_dir, 0x0207d000
19
20/*
21 * Entry point.
22 */
23 .section ".init.text",#alloc,#execinstr
24ENTRY(stext)
25
26__entry:
27 cmp pc, #0x02000000
28 ldrlt pc, LC0 @ if 0x01800000, call at 0x02080000
29 teq r0, #0 @ Check for old calling method
30 blne oldparams @ Move page if old
31
32 adr r0, LC0
33 ldmib r0, {r2-r5, sp} @ Setup stack (and fetch other values)
34
35 mov r0, #0 @ Clear BSS
361: cmp r2, r3
37 strcc r0, [r2], #4
38 bcc 1b
39
40 bl detect_proc_type
41 str r0, [r4]
42 bl detect_arch_type
43 str r0, [r5]
44
45#ifdef CONFIG_XIP_KERNEL
46 ldr r3, ETEXT @ data section copy
47 ldr r4, SDATA
48 ldr r5, EDATA
491:
50 ldr r6, [r3], #4
51 str r6, [r4], #4
52 cmp r4, r5
53 blt 1b
54#endif
55 mov fp, #0
56 b start_kernel
57
58LC0: .word _stext
59 .word __bss_start @ r2
60 .word _end @ r3
61 .word processor_id @ r4
62 .word __machine_arch_type @ r5
63 .word init_thread_union+8192 @ sp
64#ifdef CONFIG_XIP_KERNEL
65ETEXT: .word _endtext
66SDATA: .word _sdata
67EDATA: .word __bss_start
68#endif
69
70arm2_id: .long 0x41560200 @ ARM2 and 250 dont have a CPUID
71arm250_id: .long 0x41560250 @ So we create some after probing for them
72 .align
73
74oldparams: mov r4, #0x02000000
75 add r3, r4, #0x00080000
76 add r4, r4, #0x0007c000
771: ldmia r0!, {r5 - r12}
78 stmia r4!, {r5 - r12}
79 cmp r4, r3
80 blt 1b
81 mov pc, lr
82
83/*
84 * We need some way to automatically detect the difference between
85 * these two machines. Unfortunately, it is not possible to detect
86 * the presence of the SuperIO chip, because that will hang the old
87 * Archimedes machines solid.
88 */
89/* DAG: Outdated, these have been combined !!!!!!! */
90detect_arch_type:
91#if defined(CONFIG_ARCH_ARC)
92 mov r0, #MACH_TYPE_ARCHIMEDES
93#elif defined(CONFIG_ARCH_A5K)
94 mov r0, #MACH_TYPE_A5K
95#endif
96 mov pc, lr
97
98detect_proc_type:
99 mov ip, lr
100 mov r2, #0xea000000 @ Point undef instr to continuation
101 adr r0, continue - 12
102 orr r0, r2, r0, lsr #2
103 mov r1, #0
104 str r0, [r1, #4]
105 ldr r0, arm2_id
106 swp r2, r2, [r1] @ check for swp (ARM2 cant)
107 ldr r0, arm250_id
108 mrc 15, 0, r3, c0, c0 @ check for CP#15 (ARM250 cant)
109 mov r0, r3
110continue: mov r2, #0xeb000000 @ Make undef vector loop
111 sub r2, r2, #2
112 str r2, [r1, #4]
113 mov pc, ip
diff --git a/arch/arm26/kernel/init_task.c b/arch/arm26/kernel/init_task.c
new file mode 100644
index 000000000000..4191565b889b
--- /dev/null
+++ b/arch/arm26/kernel/init_task.c
@@ -0,0 +1,49 @@
1/*
2 * linux/arch/arm26/kernel/init_task.c
3 *
4 * Copyright (C) 2003 Ian Molton
5 *
6 */
7#include <linux/mm.h>
8#include <linux/module.h>
9#include <linux/fs.h>
10#include <linux/sched.h>
11#include <linux/init.h>
12#include <linux/init_task.h>
13#include <linux/mqueue.h>
14
15#include <asm/uaccess.h>
16#include <asm/pgtable.h>
17
18static struct fs_struct init_fs = INIT_FS;
19static struct files_struct init_files = INIT_FILES;
20static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
21static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
22struct mm_struct init_mm = INIT_MM(init_mm);
23
24EXPORT_SYMBOL(init_mm);
25
26/*
27 * Initial thread structure.
28 *
29 * We need to make sure that this is 8192-byte aligned due to the
30 * way process stacks are handled. This is done by making sure
31 * the linker maps this in the .text segment right after head.S,
32 * and making the linker scripts ensure the proper alignment.
33 *
34 * FIXME - should this be 32K alignment on arm26?
35 *
36 * The things we do for performance...
37 */
38union thread_union init_thread_union
39 __attribute__((__section__(".init.task"))) =
40 { INIT_THREAD_INFO(init_task) };
41
42/*
43 * Initial task structure.
44 *
45 * All other task structs will be allocated on slabs in fork.c
46 */
47struct task_struct init_task = INIT_TASK(init_task);
48
49EXPORT_SYMBOL(init_task);
diff --git a/arch/arm26/kernel/irq.c b/arch/arm26/kernel/irq.c
new file mode 100644
index 000000000000..f3cc1036e5bc
--- /dev/null
+++ b/arch/arm26/kernel/irq.c
@@ -0,0 +1,716 @@
1/*
2 * linux/arch/arm/kernel/irq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6 * 'Borrowed' for ARM26 and (C) 2003 Ian Molton.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This file contains the code used by various IRQ handling routines:
13 * asking for different IRQ's should be done through these routines
14 * instead of just grabbing them. Thus setups with different IRQ numbers
15 * shouldn't result in any weird surprises, and installing new handlers
16 * should be easier.
17 *
18 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
19 * Naturally it's not a 1:1 relation, but there are similarities.
20 */
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/ptrace.h>
24#include <linux/kernel_stat.h>
25#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/ioport.h>
28#include <linux/interrupt.h>
29#include <linux/slab.h>
30#include <linux/random.h>
31#include <linux/smp.h>
32#include <linux/init.h>
33#include <linux/seq_file.h>
34#include <linux/errno.h>
35
36#include <asm/irq.h>
37#include <asm/system.h>
38#include <asm/irqchip.h>
39
40//FIXME - this ought to be in a header IMO
41void __init arc_init_irq(void);
42
43/*
44 * Maximum IRQ count. Currently, this is arbitary. However, it should
45 * not be set too low to prevent false triggering. Conversely, if it
46 * is set too high, then you could miss a stuck IRQ.
47 *
48 * FIXME Maybe we ought to set a timer and re-enable the IRQ at a later time?
49 */
50#define MAX_IRQ_CNT 100000
51
52static volatile unsigned long irq_err_count;
53static DEFINE_SPINLOCK(irq_controller_lock);
54
55struct irqdesc irq_desc[NR_IRQS];
56
57/*
58 * Dummy mask/unmask handler
59 */
60void dummy_mask_unmask_irq(unsigned int irq)
61{
62}
63
64void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
65{
66 irq_err_count += 1;
67 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
68}
69
70static struct irqchip bad_chip = {
71 .ack = dummy_mask_unmask_irq,
72 .mask = dummy_mask_unmask_irq,
73 .unmask = dummy_mask_unmask_irq,
74};
75
76static struct irqdesc bad_irq_desc = {
77 .chip = &bad_chip,
78 .handle = do_bad_IRQ,
79 .depth = 1,
80};
81
82/**
83 * disable_irq - disable an irq and wait for completion
84 * @irq: Interrupt to disable
85 *
86 * Disable the selected interrupt line. We do this lazily.
87 *
88 * This function may be called from IRQ context.
89 */
90void disable_irq(unsigned int irq)
91{
92 struct irqdesc *desc = irq_desc + irq;
93 unsigned long flags;
94 spin_lock_irqsave(&irq_controller_lock, flags);
95 if (!desc->depth++)
96 desc->enabled = 0;
97 spin_unlock_irqrestore(&irq_controller_lock, flags);
98}
99
100/**
101 * enable_irq - enable interrupt handling on an irq
102 * @irq: Interrupt to enable
103 *
104 * Re-enables the processing of interrupts on this IRQ line.
105 * Note that this may call the interrupt handler, so you may
106 * get unexpected results if you hold IRQs disabled.
107 *
108 * This function may be called from IRQ context.
109 */
110void enable_irq(unsigned int irq)
111{
112 struct irqdesc *desc = irq_desc + irq;
113 unsigned long flags;
114 int pending = 0;
115
116 spin_lock_irqsave(&irq_controller_lock, flags);
117 if (unlikely(!desc->depth)) {
118 printk("enable_irq(%u) unbalanced from %p\n", irq,
119 __builtin_return_address(0)); //FIXME bum addresses reported - why?
120 } else if (!--desc->depth) {
121 desc->probing = 0;
122 desc->enabled = 1;
123 desc->chip->unmask(irq);
124 pending = desc->pending;
125 desc->pending = 0;
126 /*
127 * If the interrupt was waiting to be processed,
128 * retrigger it.
129 */
130 if (pending)
131 desc->chip->rerun(irq);
132 }
133 spin_unlock_irqrestore(&irq_controller_lock, flags);
134}
135
136int show_interrupts(struct seq_file *p, void *v)
137{
138 int i = *(loff_t *) v;
139 struct irqaction * action;
140
141 if (i < NR_IRQS) {
142 action = irq_desc[i].action;
143 if (!action)
144 continue;
145 seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
146 seq_printf(p, " %s", action->name);
147 for (action = action->next; action; action = action->next) {
148 seq_printf(p, ", %s", action->name);
149 }
150 seq_putc(p, '\n');
151 } else if (i == NR_IRQS) {
152 show_fiq_list(p, v);
153 seq_printf(p, "Err: %10lu\n", irq_err_count);
154 }
155 return 0;
156}
157
158/*
159 * IRQ lock detection.
160 *
161 * Hopefully, this should get us out of a few locked situations.
162 * However, it may take a while for this to happen, since we need
163 * a large number if IRQs to appear in the same jiffie with the
164 * same instruction pointer (or within 2 instructions).
165 */
166static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
167{
168 unsigned long instr_ptr = instruction_pointer(regs);
169
170 if (desc->lck_jif == jiffies &&
171 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
172 desc->lck_cnt += 1;
173
174 if (desc->lck_cnt > MAX_IRQ_CNT) {
175 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
176 return 1;
177 }
178 } else {
179 desc->lck_cnt = 0;
180 desc->lck_pc = instruction_pointer(regs);
181 desc->lck_jif = jiffies;
182 }
183 return 0;
184}
185
186static void
187__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
188{
189 unsigned int status;
190 int ret;
191
192 spin_unlock(&irq_controller_lock);
193 if (!(action->flags & SA_INTERRUPT))
194 local_irq_enable();
195
196 status = 0;
197 do {
198 ret = action->handler(irq, action->dev_id, regs);
199 if (ret == IRQ_HANDLED)
200 status |= action->flags;
201 action = action->next;
202 } while (action);
203
204 if (status & SA_SAMPLE_RANDOM)
205 add_interrupt_randomness(irq);
206
207 spin_lock_irq(&irq_controller_lock);
208}
209
210/*
211 * This is for software-decoded IRQs. The caller is expected to
212 * handle the ack, clear, mask and unmask issues.
213 */
214void
215do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
216{
217 struct irqaction *action;
218 const int cpu = smp_processor_id();
219
220 desc->triggered = 1;
221
222 kstat_cpu(cpu).irqs[irq]++;
223
224 action = desc->action;
225 if (action)
226 __do_irq(irq, desc->action, regs);
227}
228
229/*
230 * Most edge-triggered IRQ implementations seem to take a broken
231 * approach to this. Hence the complexity.
232 */
233void
234do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
235{
236 const int cpu = smp_processor_id();
237
238 desc->triggered = 1;
239
240 /*
241 * If we're currently running this IRQ, or its disabled,
242 * we shouldn't process the IRQ. Instead, turn on the
243 * hardware masks.
244 */
245 if (unlikely(desc->running || !desc->enabled))
246 goto running;
247
248 /*
249 * Acknowledge and clear the IRQ, but don't mask it.
250 */
251 desc->chip->ack(irq);
252
253 /*
254 * Mark the IRQ currently in progress.
255 */
256 desc->running = 1;
257
258 kstat_cpu(cpu).irqs[irq]++;
259
260 do {
261 struct irqaction *action;
262
263 action = desc->action;
264 if (!action)
265 break;
266
267 if (desc->pending && desc->enabled) {
268 desc->pending = 0;
269 desc->chip->unmask(irq);
270 }
271
272 __do_irq(irq, action, regs);
273 } while (desc->pending);
274
275 desc->running = 0;
276
277 /*
278 * If we were disabled or freed, shut down the handler.
279 */
280 if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
281 return;
282
283 running:
284 /*
285 * We got another IRQ while this one was masked or
286 * currently running. Delay it.
287 */
288 desc->pending = 1;
289 desc->chip->mask(irq);
290 desc->chip->ack(irq);
291}
292
293/*
294 * Level-based IRQ handler. Nice and simple.
295 */
296void
297do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
298{
299 struct irqaction *action;
300 const int cpu = smp_processor_id();
301
302 desc->triggered = 1;
303
304 /*
305 * Acknowledge, clear _AND_ disable the interrupt.
306 */
307 desc->chip->ack(irq);
308
309 if (likely(desc->enabled)) {
310 kstat_cpu(cpu).irqs[irq]++;
311
312 /*
313 * Return with this interrupt masked if no action
314 */
315 action = desc->action;
316 if (action) {
317 __do_irq(irq, desc->action, regs);
318
319 if (likely(desc->enabled &&
320 !check_irq_lock(desc, irq, regs)))
321 desc->chip->unmask(irq);
322 }
323 }
324}
325
326/*
327 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
328 * come via this function. Instead, they should provide their
329 * own 'handler'
330 */
331asmlinkage void asm_do_IRQ(int irq, struct pt_regs *regs)
332{
333 struct irqdesc *desc = irq_desc + irq;
334
335 /*
336 * Some hardware gives randomly wrong interrupts. Rather
337 * than crashing, do something sensible.
338 */
339 if (irq >= NR_IRQS)
340 desc = &bad_irq_desc;
341
342 irq_enter();
343 spin_lock(&irq_controller_lock);
344 desc->handle(irq, desc, regs);
345 spin_unlock(&irq_controller_lock);
346 irq_exit();
347}
348
349void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
350{
351 struct irqdesc *desc;
352 unsigned long flags;
353
354 if (irq >= NR_IRQS) {
355 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
356 return;
357 }
358
359 if (handle == NULL)
360 handle = do_bad_IRQ;
361
362 desc = irq_desc + irq;
363
364 if (is_chained && desc->chip == &bad_chip)
365 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
366
367 spin_lock_irqsave(&irq_controller_lock, flags);
368 if (handle == do_bad_IRQ) {
369 desc->chip->mask(irq);
370 desc->chip->ack(irq);
371 desc->depth = 1;
372 desc->enabled = 0;
373 }
374 desc->handle = handle;
375 if (handle != do_bad_IRQ && is_chained) {
376 desc->valid = 0;
377 desc->probe_ok = 0;
378 desc->depth = 0;
379 desc->chip->unmask(irq);
380 }
381 spin_unlock_irqrestore(&irq_controller_lock, flags);
382}
383
384void set_irq_chip(unsigned int irq, struct irqchip *chip)
385{
386 struct irqdesc *desc;
387 unsigned long flags;
388
389 if (irq >= NR_IRQS) {
390 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
391 return;
392 }
393
394 if (chip == NULL)
395 chip = &bad_chip;
396
397 desc = irq_desc + irq;
398 spin_lock_irqsave(&irq_controller_lock, flags);
399 desc->chip = chip;
400 spin_unlock_irqrestore(&irq_controller_lock, flags);
401}
402
403int set_irq_type(unsigned int irq, unsigned int type)
404{
405 struct irqdesc *desc;
406 unsigned long flags;
407 int ret = -ENXIO;
408
409 if (irq >= NR_IRQS) {
410 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
411 return -ENODEV;
412 }
413
414 desc = irq_desc + irq;
415 if (desc->chip->type) {
416 spin_lock_irqsave(&irq_controller_lock, flags);
417 ret = desc->chip->type(irq, type);
418 spin_unlock_irqrestore(&irq_controller_lock, flags);
419 }
420
421 return ret;
422}
423
424void set_irq_flags(unsigned int irq, unsigned int iflags)
425{
426 struct irqdesc *desc;
427 unsigned long flags;
428
429 if (irq >= NR_IRQS) {
430 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
431 return;
432 }
433
434 desc = irq_desc + irq;
435 spin_lock_irqsave(&irq_controller_lock, flags);
436 desc->valid = (iflags & IRQF_VALID) != 0;
437 desc->probe_ok = (iflags & IRQF_PROBE) != 0;
438 desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
439 spin_unlock_irqrestore(&irq_controller_lock, flags);
440}
441
442int setup_irq(unsigned int irq, struct irqaction *new)
443{
444 int shared = 0;
445 struct irqaction *old, **p;
446 unsigned long flags;
447 struct irqdesc *desc;
448
449 /*
450 * Some drivers like serial.c use request_irq() heavily,
451 * so we have to be careful not to interfere with a
452 * running system.
453 */
454 if (new->flags & SA_SAMPLE_RANDOM) {
455 /*
456 * This function might sleep, we want to call it first,
457 * outside of the atomic block.
458 * Yes, this might clear the entropy pool if the wrong
459 * driver is attempted to be loaded, without actually
460 * installing a new handler, but is this really a problem,
461 * only the sysadmin is able to do this.
462 */
463 rand_initialize_irq(irq);
464 }
465
466 /*
467 * The following block of code has to be executed atomically
468 */
469 desc = irq_desc + irq;
470 spin_lock_irqsave(&irq_controller_lock, flags);
471 p = &desc->action;
472 if ((old = *p) != NULL) {
473 /* Can't share interrupts unless both agree to */
474 if (!(old->flags & new->flags & SA_SHIRQ)) {
475 spin_unlock_irqrestore(&irq_controller_lock, flags);
476 return -EBUSY;
477 }
478
479 /* add new interrupt at end of irq queue */
480 do {
481 p = &old->next;
482 old = *p;
483 } while (old);
484 shared = 1;
485 }
486
487 *p = new;
488
489 if (!shared) {
490 desc->probing = 0;
491 desc->running = 0;
492 desc->pending = 0;
493 desc->depth = 1;
494 if (!desc->noautoenable) {
495 desc->depth = 0;
496 desc->enabled = 1;
497 desc->chip->unmask(irq);
498 }
499 }
500
501 spin_unlock_irqrestore(&irq_controller_lock, flags);
502 return 0;
503}
504
505/**
506 * request_irq - allocate an interrupt line
507 * @irq: Interrupt line to allocate
508 * @handler: Function to be called when the IRQ occurs
509 * @irqflags: Interrupt type flags
510 * @devname: An ascii name for the claiming device
511 * @dev_id: A cookie passed back to the handler function
512 *
513 * This call allocates interrupt resources and enables the
514 * interrupt line and IRQ handling. From the point this
515 * call is made your handler function may be invoked. Since
516 * your handler function must clear any interrupt the board
517 * raises, you must take care both to initialise your hardware
518 * and to set up the interrupt handler in the right order.
519 *
520 * Dev_id must be globally unique. Normally the address of the
521 * device data structure is used as the cookie. Since the handler
522 * receives this value it makes sense to use it.
523 *
524 * If your interrupt is shared you must pass a non NULL dev_id
525 * as this is required when freeing the interrupt.
526 *
527 * Flags:
528 *
529 * SA_SHIRQ Interrupt is shared
530 *
531 * SA_INTERRUPT Disable local interrupts while processing
532 *
533 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
534 *
535 */
536
537//FIXME - handler used to return void - whats the significance of the change?
538int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
539 unsigned long irq_flags, const char * devname, void *dev_id)
540{
541 unsigned long retval;
542 struct irqaction *action;
543
544 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
545 (irq_flags & SA_SHIRQ && !dev_id))
546 return -EINVAL;
547
548 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
549 if (!action)
550 return -ENOMEM;
551
552 action->handler = handler;
553 action->flags = irq_flags;
554 cpus_clear(action->mask);
555 action->name = devname;
556 action->next = NULL;
557 action->dev_id = dev_id;
558
559 retval = setup_irq(irq, action);
560
561 if (retval)
562 kfree(action);
563 return retval;
564}
565
566EXPORT_SYMBOL(request_irq);
567
568/**
569 * free_irq - free an interrupt
570 * @irq: Interrupt line to free
571 * @dev_id: Device identity to free
572 *
573 * Remove an interrupt handler. The handler is removed and if the
574 * interrupt line is no longer in use by any driver it is disabled.
575 * On a shared IRQ the caller must ensure the interrupt is disabled
576 * on the card it drives before calling this function.
577 *
578 * This function may be called from interrupt context.
579 */
580void free_irq(unsigned int irq, void *dev_id)
581{
582 struct irqaction * action, **p;
583 unsigned long flags;
584
585 if (irq >= NR_IRQS || !irq_desc[irq].valid) {
586 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
587#ifdef CONFIG_DEBUG_ERRORS
588 __backtrace();
589#endif
590 return;
591 }
592
593 spin_lock_irqsave(&irq_controller_lock, flags);
594 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
595 if (action->dev_id != dev_id)
596 continue;
597
598 /* Found it - now free it */
599 *p = action->next;
600 kfree(action);
601 goto out;
602 }
603 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
604#ifdef CONFIG_DEBUG_ERRORS
605 __backtrace();
606#endif
607out:
608 spin_unlock_irqrestore(&irq_controller_lock, flags);
609}
610
611EXPORT_SYMBOL(free_irq);
612
613/* Start the interrupt probing. Unlike other architectures,
614 * we don't return a mask of interrupts from probe_irq_on,
615 * but return the number of interrupts enabled for the probe.
616 * The interrupts which have been enabled for probing is
617 * instead recorded in the irq_desc structure.
618 */
619unsigned long probe_irq_on(void)
620{
621 unsigned int i, irqs = 0;
622 unsigned long delay;
623
624 /*
625 * first snaffle up any unassigned but
626 * probe-able interrupts
627 */
628 spin_lock_irq(&irq_controller_lock);
629 for (i = 0; i < NR_IRQS; i++) {
630 if (!irq_desc[i].probe_ok || irq_desc[i].action)
631 continue;
632
633 irq_desc[i].probing = 1;
634 irq_desc[i].triggered = 0;
635 if (irq_desc[i].chip->type)
636 irq_desc[i].chip->type(i, IRQT_PROBE);
637 irq_desc[i].chip->unmask(i);
638 irqs += 1;
639 }
640 spin_unlock_irq(&irq_controller_lock);
641
642 /*
643 * wait for spurious interrupts to mask themselves out again
644 */
645 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
646 /* min 100ms delay */;
647
648 /*
649 * now filter out any obviously spurious interrupts
650 */
651 spin_lock_irq(&irq_controller_lock);
652 for (i = 0; i < NR_IRQS; i++) {
653 if (irq_desc[i].probing && irq_desc[i].triggered) {
654 irq_desc[i].probing = 0;
655 irqs -= 1;
656 }
657 }
658 spin_unlock_irq(&irq_controller_lock);
659
660 return irqs;
661}
662
663EXPORT_SYMBOL(probe_irq_on);
664
665/*
666 * Possible return values:
667 * >= 0 - interrupt number
668 * -1 - no interrupt/many interrupts
669 */
670int probe_irq_off(unsigned long irqs)
671{
672 unsigned int i;
673 int irq_found = NO_IRQ;
674
675 /*
676 * look at the interrupts, and find exactly one
677 * that we were probing has been triggered
678 */
679 spin_lock_irq(&irq_controller_lock);
680 for (i = 0; i < NR_IRQS; i++) {
681 if (irq_desc[i].probing &&
682 irq_desc[i].triggered) {
683 if (irq_found != NO_IRQ) {
684 irq_found = NO_IRQ;
685 goto out;
686 }
687 irq_found = i;
688 }
689 }
690
691 if (irq_found == -1)
692 irq_found = NO_IRQ;
693out:
694 spin_unlock_irq(&irq_controller_lock);
695
696 return irq_found;
697}
698
699EXPORT_SYMBOL(probe_irq_off);
700
701void __init init_irq_proc(void)
702{
703}
704
705void __init init_IRQ(void)
706{
707 struct irqdesc *desc;
708 extern void init_dma(void);
709 int irq;
710
711 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++)
712 *desc = bad_irq_desc;
713
714 arc_init_irq();
715 init_dma();
716}
diff --git a/arch/arm26/kernel/process.c b/arch/arm26/kernel/process.c
new file mode 100644
index 000000000000..46aea6ac194d
--- /dev/null
+++ b/arch/arm26/kernel/process.c
@@ -0,0 +1,401 @@
1/*
2 * linux/arch/arm26/kernel/process.c
3 *
4 * Copyright (C) 2003 Ian Molton - adapted for ARM26
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Origional Copyright (C) 1995 Linus Torvalds
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <stdarg.h>
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/stddef.h>
20#include <linux/unistd.h>
21#include <linux/ptrace.h>
22#include <linux/slab.h>
23#include <linux/user.h>
24#include <linux/a.out.h>
25#include <linux/delay.h>
26#include <linux/reboot.h>
27#include <linux/interrupt.h>
28#include <linux/init.h>
29
30#include <asm/system.h>
31#include <asm/io.h>
32#include <asm/leds.h>
33#include <asm/processor.h>
34#include <asm/uaccess.h>
35
36extern const char *processor_modes[];
37extern void setup_mm_for_reboot(char mode);
38
39static volatile int hlt_counter;
40
41void disable_hlt(void)
42{
43 hlt_counter++;
44}
45
46EXPORT_SYMBOL(disable_hlt);
47
48void enable_hlt(void)
49{
50 hlt_counter--;
51}
52
53EXPORT_SYMBOL(enable_hlt);
54
55static int __init nohlt_setup(char *__unused)
56{
57 hlt_counter = 1;
58 return 1;
59}
60
61static int __init hlt_setup(char *__unused)
62{
63 hlt_counter = 0;
64 return 1;
65}
66
67__setup("nohlt", nohlt_setup);
68__setup("hlt", hlt_setup);
69
70/*
71 * This is our default idle handler. We need to disable
72 * interrupts here to ensure we don't miss a wakeup call.
73 */
74void cpu_idle(void)
75{
76 /* endless idle loop with no priority at all */
77 preempt_disable();
78 while (1) {
79 while (!need_resched()) {
80 local_irq_disable();
81 if (!need_resched() && !hlt_counter)
82 local_irq_enable();
83 }
84 }
85 schedule();
86}
87
88static char reboot_mode = 'h';
89
90int __init reboot_setup(char *str)
91{
92 reboot_mode = str[0];
93 return 1;
94}
95
96__setup("reboot=", reboot_setup);
97
98/* ARM26 cant do these but we still need to define them. */
99void machine_halt(void)
100{
101}
102void machine_power_off(void)
103{
104}
105
106EXPORT_SYMBOL(machine_halt);
107EXPORT_SYMBOL(machine_power_off);
108
109void machine_restart(char * __unused)
110{
111 /*
112 * Clean and disable cache, and turn off interrupts
113 */
114 cpu_proc_fin();
115
116 /*
117 * Tell the mm system that we are going to reboot -
118 * we may need it to insert some 1:1 mappings so that
119 * soft boot works.
120 */
121 setup_mm_for_reboot(reboot_mode);
122
123 /*
124 * copy branch instruction to reset location and call it
125 */
126
127 *(unsigned long *)0 = *(unsigned long *)0x03800000;
128 ((void(*)(void))0)();
129
130 /*
131 * Whoops - the architecture was unable to reboot.
132 * Tell the user! Should never happen...
133 */
134 mdelay(1000);
135 printk("Reboot failed -- System halted\n");
136 while (1);
137}
138
139EXPORT_SYMBOL(machine_restart);
140
141void show_regs(struct pt_regs * regs)
142{
143 unsigned long flags;
144
145 flags = condition_codes(regs);
146
147 printk("pc : [<%08lx>] lr : [<%08lx>] %s\n"
148 "sp : %08lx ip : %08lx fp : %08lx\n",
149 instruction_pointer(regs),
150 regs->ARM_lr, print_tainted(), regs->ARM_sp,
151 regs->ARM_ip, regs->ARM_fp);
152 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
153 regs->ARM_r10, regs->ARM_r9,
154 regs->ARM_r8);
155 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
156 regs->ARM_r7, regs->ARM_r6,
157 regs->ARM_r5, regs->ARM_r4);
158 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
159 regs->ARM_r3, regs->ARM_r2,
160 regs->ARM_r1, regs->ARM_r0);
161 printk("Flags: %c%c%c%c",
162 flags & PSR_N_BIT ? 'N' : 'n',
163 flags & PSR_Z_BIT ? 'Z' : 'z',
164 flags & PSR_C_BIT ? 'C' : 'c',
165 flags & PSR_V_BIT ? 'V' : 'v');
166 printk(" IRQs o%s FIQs o%s Mode %s Segment %s\n",
167 interrupts_enabled(regs) ? "n" : "ff",
168 fast_interrupts_enabled(regs) ? "n" : "ff",
169 processor_modes[processor_mode(regs)],
170 get_fs() == get_ds() ? "kernel" : "user");
171}
172
173void show_fpregs(struct user_fp *regs)
174{
175 int i;
176
177 for (i = 0; i < 8; i++) {
178 unsigned long *p;
179 char type;
180
181 p = (unsigned long *)(regs->fpregs + i);
182
183 switch (regs->ftype[i]) {
184 case 1: type = 'f'; break;
185 case 2: type = 'd'; break;
186 case 3: type = 'e'; break;
187 default: type = '?'; break;
188 }
189 if (regs->init_flag)
190 type = '?';
191
192 printk(" f%d(%c): %08lx %08lx %08lx%c",
193 i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' ');
194 }
195
196
197 printk("FPSR: %08lx FPCR: %08lx\n",
198 (unsigned long)regs->fpsr,
199 (unsigned long)regs->fpcr);
200}
201
202/*
203 * Task structure and kernel stack allocation.
204 */
205static unsigned long *thread_info_head;
206static unsigned int nr_thread_info;
207
208extern unsigned long get_page_8k(int priority);
209extern void free_page_8k(unsigned long page);
210
211// FIXME - is this valid?
212#define EXTRA_TASK_STRUCT 0
213#define ll_alloc_task_struct() ((struct thread_info *)get_page_8k(GFP_KERNEL))
214#define ll_free_task_struct(p) free_page_8k((unsigned long)(p))
215
216//FIXME - do we use *task param below looks like we dont, which is ok?
217//FIXME - if EXTRA_TASK_STRUCT is zero we can optimise the below away permanently. *IF* its supposed to be zero.
218struct thread_info *alloc_thread_info(struct task_struct *task)
219{
220 struct thread_info *thread = NULL;
221
222 if (EXTRA_TASK_STRUCT) {
223 unsigned long *p = thread_info_head;
224
225 if (p) {
226 thread_info_head = (unsigned long *)p[0];
227 nr_thread_info -= 1;
228 }
229 thread = (struct thread_info *)p;
230 }
231
232 if (!thread)
233 thread = ll_alloc_task_struct();
234
235#ifdef CONFIG_MAGIC_SYSRQ
236 /*
237 * The stack must be cleared if you want SYSRQ-T to
238 * give sensible stack usage information
239 */
240 if (thread) {
241 char *p = (char *)thread;
242 memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
243 }
244#endif
245 return thread;
246}
247
248void free_thread_info(struct thread_info *thread)
249{
250 if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) {
251 unsigned long *p = (unsigned long *)thread;
252 p[0] = (unsigned long)thread_info_head;
253 thread_info_head = p;
254 nr_thread_info += 1;
255 } else
256 ll_free_task_struct(thread);
257}
258
259/*
260 * Free current thread data structures etc..
261 */
262void exit_thread(void)
263{
264}
265
266void flush_thread(void)
267{
268 struct thread_info *thread = current_thread_info();
269 struct task_struct *tsk = current;
270
271 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
272 memset(&thread->fpstate, 0, sizeof(union fp_state));
273
274 clear_used_math();
275}
276
277void release_thread(struct task_struct *dead_task)
278{
279}
280
281asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
282
283int
284copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
285 unsigned long unused, struct task_struct *p, struct pt_regs *regs)
286{
287 struct thread_info *thread = p->thread_info;
288 struct pt_regs *childregs;
289
290 childregs = __get_user_regs(thread);
291 *childregs = *regs;
292 childregs->ARM_r0 = 0;
293 childregs->ARM_sp = stack_start;
294
295 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
296 thread->cpu_context.sp = (unsigned long)childregs;
297 thread->cpu_context.pc = (unsigned long)ret_from_fork | MODE_SVC26 | PSR_I_BIT;
298
299 return 0;
300}
301
302/*
303 * fill in the fpe structure for a core dump...
304 */
305int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
306{
307 struct thread_info *thread = current_thread_info();
308 int used_math = !!used_math();
309
310 if (used_math)
311 memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
312
313 return used_math;
314}
315
316/*
317 * fill in the user structure for a core dump..
318 */
319void dump_thread(struct pt_regs * regs, struct user * dump)
320{
321 struct task_struct *tsk = current;
322
323 dump->magic = CMAGIC;
324 dump->start_code = tsk->mm->start_code;
325 dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
326
327 dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
328 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
329 dump->u_ssize = 0;
330
331 dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
332 dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
333 dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn;
334 dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn;
335 dump->u_debugreg[4] = tsk->thread.debug.nsaved;
336
337 if (dump->start_stack < 0x04000000)
338 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
339
340 dump->regs = *regs;
341 dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
342}
343
344/*
345 * Shuffle the argument into the correct register before calling the
346 * thread function. r1 is the thread argument, r2 is the pointer to
347 * the thread function, and r3 points to the exit function.
348 * FIXME - make sure this is right - the older code used to zero fp
349 * and cause the parent to call sys_exit (do_exit in this version)
350 */
351extern void kernel_thread_helper(void);
352
353asm( ".section .text\n"
354" .align\n"
355" .type kernel_thread_helper, #function\n"
356"kernel_thread_helper:\n"
357" mov r0, r1\n"
358" mov lr, r3\n"
359" mov pc, r2\n"
360" .size kernel_thread_helper, . - kernel_thread_helper\n"
361" .previous");
362
363/*
364 * Create a kernel thread.
365 */
366pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
367{
368 struct pt_regs regs;
369
370 memset(&regs, 0, sizeof(regs));
371
372 regs.ARM_r1 = (unsigned long)arg;
373 regs.ARM_r2 = (unsigned long)fn;
374 regs.ARM_r3 = (unsigned long)do_exit;
375 regs.ARM_pc = (unsigned long)kernel_thread_helper | MODE_SVC26;
376
377 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
378}
379EXPORT_SYMBOL(kernel_thread);
380
381
382unsigned long get_wchan(struct task_struct *p)
383{
384 unsigned long fp, lr;
385 unsigned long stack_page;
386 int count = 0;
387 if (!p || p == current || p->state == TASK_RUNNING)
388 return 0;
389
390 stack_page = 4096 + (unsigned long)p;
391 fp = thread_saved_fp(p);
392 do {
393 if (fp < stack_page || fp > 4092+stack_page)
394 return 0;
395 lr = pc_pointer (((unsigned long *)fp)[-1]);
396 if (!in_sched_functions(lr))
397 return lr;
398 fp = *(unsigned long *) (fp - 12);
399 } while (count ++ < 16);
400 return 0;
401}
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c
new file mode 100644
index 000000000000..2a137146a77c
--- /dev/null
+++ b/arch/arm26/kernel/ptrace.c
@@ -0,0 +1,744 @@
1/*
2 * linux/arch/arm26/kernel/ptrace.c
3 *
4 * By Ross Biro 1/23/92
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/config.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/smp_lock.h>
18#include <linux/ptrace.h>
19#include <linux/user.h>
20#include <linux/security.h>
21
22#include <asm/uaccess.h>
23#include <asm/pgtable.h>
24#include <asm/system.h>
25//#include <asm/processor.h>
26
27#include "ptrace.h"
28
29#define REG_PC 15
30#define REG_PSR 15
31/*
32 * does not yet catch signals sent when the child dies.
33 * in exit.c or in signal.c.
34 */
35
36/*
37 * Breakpoint SWI instruction: SWI &9F0001
38 */
39#define BREAKINST_ARM 0xef9f0001
40
41/*
42 * Get the address of the live pt_regs for the specified task.
43 * These are saved onto the top kernel stack when the process
44 * is not running.
45 *
46 * Note: if a user thread is execve'd from kernel space, the
47 * kernel stack will not be empty on entry to the kernel, so
48 * ptracing these tasks will fail.
49 */
50static inline struct pt_regs *
51get_user_regs(struct task_struct *task)
52{
53 return __get_user_regs(task->thread_info);
54}
55
56/*
57 * this routine will get a word off of the processes privileged stack.
58 * the offset is how far from the base addr as stored in the THREAD.
59 * this routine assumes that all the privileged stacks are in our
60 * data space.
61 */
62static inline long get_user_reg(struct task_struct *task, int offset)
63{
64 return get_user_regs(task)->uregs[offset];
65}
66
67/*
68 * this routine will put a word on the processes privileged stack.
69 * the offset is how far from the base addr as stored in the THREAD.
70 * this routine assumes that all the privileged stacks are in our
71 * data space.
72 */
73static inline int
74put_user_reg(struct task_struct *task, int offset, long data)
75{
76 struct pt_regs newregs, *regs = get_user_regs(task);
77 int ret = -EINVAL;
78
79 newregs = *regs;
80 newregs.uregs[offset] = data;
81
82 if (valid_user_regs(&newregs)) {
83 regs->uregs[offset] = data;
84 ret = 0;
85 }
86
87 return ret;
88}
89
90static inline int
91read_u32(struct task_struct *task, unsigned long addr, u32 *res)
92{
93 int ret;
94
95 ret = access_process_vm(task, addr, res, sizeof(*res), 0);
96
97 return ret == sizeof(*res) ? 0 : -EIO;
98}
99
100static inline int
101read_instr(struct task_struct *task, unsigned long addr, u32 *res)
102{
103 int ret;
104 u32 val;
105 ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0);
106 ret = ret == sizeof(val) ? 0 : -EIO;
107 *res = val;
108 return ret;
109}
110
111/*
112 * Get value of register `rn' (in the instruction)
113 */
114static unsigned long
115ptrace_getrn(struct task_struct *child, unsigned long insn)
116{
117 unsigned int reg = (insn >> 16) & 15;
118 unsigned long val;
119
120 val = get_user_reg(child, reg);
121 if (reg == 15)
122 val = pc_pointer(val + 8); //FIXME - correct for arm26?
123
124 return val;
125}
126
127/*
128 * Get value of operand 2 (in an ALU instruction)
129 */
130static unsigned long
131ptrace_getaluop2(struct task_struct *child, unsigned long insn)
132{
133 unsigned long val;
134 int shift;
135 int type;
136
137 if (insn & 1 << 25) {
138 val = insn & 255;
139 shift = (insn >> 8) & 15;
140 type = 3;
141 } else {
142 val = get_user_reg (child, insn & 15);
143
144 if (insn & (1 << 4))
145 shift = (int)get_user_reg (child, (insn >> 8) & 15);
146 else
147 shift = (insn >> 7) & 31;
148
149 type = (insn >> 5) & 3;
150 }
151
152 switch (type) {
153 case 0: val <<= shift; break;
154 case 1: val >>= shift; break;
155 case 2:
156 val = (((signed long)val) >> shift);
157 break;
158 case 3:
159 val = (val >> shift) | (val << (32 - shift));
160 break;
161 }
162 return val;
163}
164
165/*
166 * Get value of operand 2 (in a LDR instruction)
167 */
168static unsigned long
169ptrace_getldrop2(struct task_struct *child, unsigned long insn)
170{
171 unsigned long val;
172 int shift;
173 int type;
174
175 val = get_user_reg(child, insn & 15);
176 shift = (insn >> 7) & 31;
177 type = (insn >> 5) & 3;
178
179 switch (type) {
180 case 0: val <<= shift; break;
181 case 1: val >>= shift; break;
182 case 2:
183 val = (((signed long)val) >> shift);
184 break;
185 case 3:
186 val = (val >> shift) | (val << (32 - shift));
187 break;
188 }
189 return val;
190}
191
192#define OP_MASK 0x01e00000
193#define OP_AND 0x00000000
194#define OP_EOR 0x00200000
195#define OP_SUB 0x00400000
196#define OP_RSB 0x00600000
197#define OP_ADD 0x00800000
198#define OP_ADC 0x00a00000
199#define OP_SBC 0x00c00000
200#define OP_RSC 0x00e00000
201#define OP_ORR 0x01800000
202#define OP_MOV 0x01a00000
203#define OP_BIC 0x01c00000
204#define OP_MVN 0x01e00000
205
206static unsigned long
207get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn)
208{
209 u32 alt = 0;
210
211 switch (insn & 0x0e000000) {
212 case 0x00000000:
213 case 0x02000000: {
214 /*
215 * data processing
216 */
217 long aluop1, aluop2, ccbit;
218
219 if ((insn & 0xf000) != 0xf000)
220 break;
221
222 aluop1 = ptrace_getrn(child, insn);
223 aluop2 = ptrace_getaluop2(child, insn);
224 ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0;
225
226 switch (insn & OP_MASK) {
227 case OP_AND: alt = aluop1 & aluop2; break;
228 case OP_EOR: alt = aluop1 ^ aluop2; break;
229 case OP_SUB: alt = aluop1 - aluop2; break;
230 case OP_RSB: alt = aluop2 - aluop1; break;
231 case OP_ADD: alt = aluop1 + aluop2; break;
232 case OP_ADC: alt = aluop1 + aluop2 + ccbit; break;
233 case OP_SBC: alt = aluop1 - aluop2 + ccbit; break;
234 case OP_RSC: alt = aluop2 - aluop1 + ccbit; break;
235 case OP_ORR: alt = aluop1 | aluop2; break;
236 case OP_MOV: alt = aluop2; break;
237 case OP_BIC: alt = aluop1 & ~aluop2; break;
238 case OP_MVN: alt = ~aluop2; break;
239 }
240 break;
241 }
242
243 case 0x04000000:
244 case 0x06000000:
245 /*
246 * ldr
247 */
248 if ((insn & 0x0010f000) == 0x0010f000) {
249 unsigned long base;
250
251 base = ptrace_getrn(child, insn);
252 if (insn & 1 << 24) {
253 long aluop2;
254
255 if (insn & 0x02000000)
256 aluop2 = ptrace_getldrop2(child, insn);
257 else
258 aluop2 = insn & 0xfff;
259
260 if (insn & 1 << 23)
261 base += aluop2;
262 else
263 base -= aluop2;
264 }
265 if (read_u32(child, base, &alt) == 0)
266 alt = pc_pointer(alt);
267 }
268 break;
269
270 case 0x08000000:
271 /*
272 * ldm
273 */
274 if ((insn & 0x00108000) == 0x00108000) {
275 unsigned long base;
276 unsigned int nr_regs;
277
278 if (insn & (1 << 23)) {
279 nr_regs = hweight16(insn & 65535) << 2;
280
281 if (!(insn & (1 << 24)))
282 nr_regs -= 4;
283 } else {
284 if (insn & (1 << 24))
285 nr_regs = -4;
286 else
287 nr_regs = 0;
288 }
289
290 base = ptrace_getrn(child, insn);
291
292 if (read_u32(child, base + nr_regs, &alt) == 0)
293 alt = pc_pointer(alt);
294 break;
295 }
296 break;
297
298 case 0x0a000000: {
299 /*
300 * bl or b
301 */
302 signed long displ;
303 /* It's a branch/branch link: instead of trying to
304 * figure out whether the branch will be taken or not,
305 * we'll put a breakpoint at both locations. This is
306 * simpler, more reliable, and probably not a whole lot
307 * slower than the alternative approach of emulating the
308 * branch.
309 */
310 displ = (insn & 0x00ffffff) << 8;
311 displ = (displ >> 6) + 8;
312 if (displ != 0 && displ != 4)
313 alt = pc + displ;
314 }
315 break;
316 }
317
318 return alt;
319}
320
321static int
322swap_insn(struct task_struct *task, unsigned long addr,
323 void *old_insn, void *new_insn, int size)
324{
325 int ret;
326
327 ret = access_process_vm(task, addr, old_insn, size, 0);
328 if (ret == size)
329 ret = access_process_vm(task, addr, new_insn, size, 1);
330 return ret;
331}
332
333static void
334add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr)
335{
336 int nr = dbg->nsaved;
337
338 if (nr < 2) {
339 u32 new_insn = BREAKINST_ARM;
340 int res;
341
342 res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4);
343
344 if (res == 4) {
345 dbg->bp[nr].address = addr;
346 dbg->nsaved += 1;
347 }
348 } else
349 printk(KERN_ERR "ptrace: too many breakpoints\n");
350}
351
352/*
353 * Clear one breakpoint in the user program. We copy what the hardware
354 * does and use bit 0 of the address to indicate whether this is a Thumb
355 * breakpoint or an ARM breakpoint.
356 */
357static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
358{
359 unsigned long addr = bp->address;
360 u32 old_insn;
361 int ret;
362
363 ret = swap_insn(task, addr & ~3, &old_insn,
364 &bp->insn, 4);
365
366 if (ret != 4 || old_insn != BREAKINST_ARM)
367 printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
368 "0x%08lx (0x%08x)\n", task->comm, task->pid,
369 addr, old_insn);
370}
371
372void ptrace_set_bpt(struct task_struct *child)
373{
374 struct pt_regs *regs;
375 unsigned long pc;
376 u32 insn;
377 int res;
378
379 regs = get_user_regs(child);
380 pc = instruction_pointer(regs);
381
382 res = read_instr(child, pc, &insn);
383 if (!res) {
384 struct debug_info *dbg = &child->thread.debug;
385 unsigned long alt;
386
387 dbg->nsaved = 0;
388
389 alt = get_branch_address(child, pc, insn);
390 if (alt)
391 add_breakpoint(child, dbg, alt);
392
393 /*
394 * Note that we ignore the result of setting the above
395 * breakpoint since it may fail. When it does, this is
396 * not so much an error, but a forewarning that we may
397 * be receiving a prefetch abort shortly.
398 *
399 * If we don't set this breakpoint here, then we can
400 * lose control of the thread during single stepping.
401 */
402 if (!alt || predicate(insn) != PREDICATE_ALWAYS)
403 add_breakpoint(child, dbg, pc + 4);
404 }
405}
406
407/*
408 * Ensure no single-step breakpoint is pending. Returns non-zero
409 * value if child was being single-stepped.
410 */
411void ptrace_cancel_bpt(struct task_struct *child)
412{
413 int i, nsaved = child->thread.debug.nsaved;
414
415 child->thread.debug.nsaved = 0;
416
417 if (nsaved > 2) {
418 printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
419 nsaved = 2;
420 }
421
422 for (i = 0; i < nsaved; i++)
423 clear_breakpoint(child, &child->thread.debug.bp[i]);
424}
425
426/*
427 * Called by kernel/ptrace.c when detaching..
428 *
429 * Make sure the single step bit is not set.
430 */
431void ptrace_disable(struct task_struct *child)
432{
433 child->ptrace &= ~PT_SINGLESTEP;
434 ptrace_cancel_bpt(child);
435}
436
437/*
438 * Handle hitting a breakpoint.
439 */
440void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
441{
442 siginfo_t info;
443
444 /*
445 * The PC is always left pointing at the next instruction. Fix this.
446 */
447 regs->ARM_pc -= 4;
448
449 if (tsk->thread.debug.nsaved == 0)
450 printk(KERN_ERR "ptrace: bogus breakpoint trap\n");
451
452 ptrace_cancel_bpt(tsk);
453
454 info.si_signo = SIGTRAP;
455 info.si_errno = 0;
456 info.si_code = TRAP_BRKPT;
457 info.si_addr = (void *)instruction_pointer(regs) - 4;
458
459 force_sig_info(SIGTRAP, &info, tsk);
460}
461
462/*
463 * Read the word at offset "off" into the "struct user". We
464 * actually access the pt_regs stored on the kernel stack.
465 */
466static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
467 unsigned long *ret)
468{
469 unsigned long tmp;
470
471 if (off & 3 || off >= sizeof(struct user))
472 return -EIO;
473
474 tmp = 0;
475 if (off < sizeof(struct pt_regs))
476 tmp = get_user_reg(tsk, off >> 2);
477
478 return put_user(tmp, ret);
479}
480
481/*
482 * Write the word at offset "off" into "struct user". We
483 * actually access the pt_regs stored on the kernel stack.
484 */
485static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
486 unsigned long val)
487{
488 if (off & 3 || off >= sizeof(struct user))
489 return -EIO;
490
491 if (off >= sizeof(struct pt_regs))
492 return 0;
493
494 return put_user_reg(tsk, off >> 2, val);
495}
496
497/*
498 * Get all user integer registers.
499 */
500static int ptrace_getregs(struct task_struct *tsk, void *uregs)
501{
502 struct pt_regs *regs = get_user_regs(tsk);
503
504 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
505}
506
507/*
508 * Set all user integer registers.
509 */
510static int ptrace_setregs(struct task_struct *tsk, void *uregs)
511{
512 struct pt_regs newregs;
513 int ret;
514
515 ret = -EFAULT;
516 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
517 struct pt_regs *regs = get_user_regs(tsk);
518
519 ret = -EINVAL;
520 if (valid_user_regs(&newregs)) {
521 *regs = newregs;
522 ret = 0;
523 }
524 }
525
526 return ret;
527}
528
529/*
530 * Get the child FPU state.
531 */
532static int ptrace_getfpregs(struct task_struct *tsk, void *ufp)
533{
534 return copy_to_user(ufp, &tsk->thread_info->fpstate,
535 sizeof(struct user_fp)) ? -EFAULT : 0;
536}
537
538/*
539 * Set the child FPU state.
540 */
541static int ptrace_setfpregs(struct task_struct *tsk, void *ufp)
542{
543 set_stopped_child_used_math(tsk);
544 return copy_from_user(&tsk->thread_info->fpstate, ufp,
545 sizeof(struct user_fp)) ? -EFAULT : 0;
546}
547
548static int do_ptrace(int request, struct task_struct *child, long addr, long data)
549{
550 unsigned long tmp;
551 int ret;
552
553 switch (request) {
554 /*
555 * read word at location "addr" in the child process.
556 */
557 case PTRACE_PEEKTEXT:
558 case PTRACE_PEEKDATA:
559 ret = access_process_vm(child, addr, &tmp,
560 sizeof(unsigned long), 0);
561 if (ret == sizeof(unsigned long))
562 ret = put_user(tmp, (unsigned long *) data);
563 else
564 ret = -EIO;
565 break;
566
567 case PTRACE_PEEKUSR:
568 ret = ptrace_read_user(child, addr, (unsigned long *)data);
569 break;
570
571 /*
572 * write the word at location addr.
573 */
574 case PTRACE_POKETEXT:
575 case PTRACE_POKEDATA:
576 ret = access_process_vm(child, addr, &data,
577 sizeof(unsigned long), 1);
578 if (ret == sizeof(unsigned long))
579 ret = 0;
580 else
581 ret = -EIO;
582 break;
583
584 case PTRACE_POKEUSR:
585 ret = ptrace_write_user(child, addr, data);
586 break;
587
588 /*
589 * continue/restart and stop at next (return from) syscall
590 */
591 case PTRACE_SYSCALL:
592 case PTRACE_CONT:
593 ret = -EIO;
594 if ((unsigned long) data > _NSIG)
595 break;
596 if (request == PTRACE_SYSCALL)
597 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
598 else
599 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
600 child->exit_code = data;
601 /* make sure single-step breakpoint is gone. */
602 child->ptrace &= ~PT_SINGLESTEP;
603 ptrace_cancel_bpt(child);
604 wake_up_process(child);
605 ret = 0;
606 break;
607
608 /*
609 * make the child exit. Best I can do is send it a sigkill.
610 * perhaps it should be put in the status that it wants to
611 * exit.
612 */
613 case PTRACE_KILL:
614 /* make sure single-step breakpoint is gone. */
615 child->ptrace &= ~PT_SINGLESTEP;
616 ptrace_cancel_bpt(child);
617 if (child->exit_state != EXIT_ZOMBIE) {
618 child->exit_code = SIGKILL;
619 wake_up_process(child);
620 }
621 ret = 0;
622 break;
623
624 /*
625 * execute single instruction.
626 */
627 case PTRACE_SINGLESTEP:
628 ret = -EIO;
629 if ((unsigned long) data > _NSIG)
630 break;
631 child->ptrace |= PT_SINGLESTEP;
632 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
633 child->exit_code = data;
634 /* give it a chance to run. */
635 wake_up_process(child);
636 ret = 0;
637 break;
638
639 case PTRACE_DETACH:
640 ret = ptrace_detach(child, data);
641 break;
642
643 case PTRACE_GETREGS:
644 ret = ptrace_getregs(child, (void *)data);
645 break;
646
647 case PTRACE_SETREGS:
648 ret = ptrace_setregs(child, (void *)data);
649 break;
650
651 case PTRACE_GETFPREGS:
652 ret = ptrace_getfpregs(child, (void *)data);
653 break;
654
655 case PTRACE_SETFPREGS:
656 ret = ptrace_setfpregs(child, (void *)data);
657 break;
658
659 default:
660 ret = ptrace_request(child, request, addr, data);
661 break;
662 }
663
664 return ret;
665}
666
667asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
668{
669 struct task_struct *child;
670 int ret;
671
672 lock_kernel();
673 ret = -EPERM;
674 if (request == PTRACE_TRACEME) {
675 /* are we already being traced? */
676 if (current->ptrace & PT_PTRACED)
677 goto out;
678 ret = security_ptrace(current->parent, current);
679 if (ret)
680 goto out;
681 /* set the ptrace bit in the process flags. */
682 current->ptrace |= PT_PTRACED;
683 ret = 0;
684 goto out;
685 }
686 ret = -ESRCH;
687 read_lock(&tasklist_lock);
688 child = find_task_by_pid(pid);
689 if (child)
690 get_task_struct(child);
691 read_unlock(&tasklist_lock);
692 if (!child)
693 goto out;
694
695 ret = -EPERM;
696 if (pid == 1) /* you may not mess with init */
697 goto out_tsk;
698
699 if (request == PTRACE_ATTACH) {
700 ret = ptrace_attach(child);
701 goto out_tsk;
702 }
703 ret = ptrace_check_attach(child, request == PTRACE_KILL);
704 if (ret == 0)
705 ret = do_ptrace(request, child, addr, data);
706
707out_tsk:
708 put_task_struct(child);
709out:
710 unlock_kernel();
711 return ret;
712}
713
714asmlinkage void syscall_trace(int why, struct pt_regs *regs)
715{
716 unsigned long ip;
717
718 if (!test_thread_flag(TIF_SYSCALL_TRACE))
719 return;
720 if (!(current->ptrace & PT_PTRACED))
721 return;
722
723 /*
724 * Save IP. IP is used to denote syscall entry/exit:
725 * IP = 0 -> entry, = 1 -> exit
726 */
727 ip = regs->ARM_ip;
728 regs->ARM_ip = why;
729
730 /* the 0x80 provides a way for the tracing parent to distinguish
731 between a syscall stop and SIGTRAP delivery */
732 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
733 ? 0x80 : 0));
734 /*
735 * this isn't the same as continuing with a signal, but it will do
736 * for normal use. strace only continues with a signal if the
737 * stopping signal is not SIGTRAP. -brl
738 */
739 if (current->exit_code) {
740 send_sig(current->exit_code, current, 1);
741 current->exit_code = 0;
742 }
743 regs->ARM_ip = ip;
744}
diff --git a/arch/arm26/kernel/ptrace.h b/arch/arm26/kernel/ptrace.h
new file mode 100644
index 000000000000..846c9d8d36ed
--- /dev/null
+++ b/arch/arm26/kernel/ptrace.h
@@ -0,0 +1,13 @@
1/*
2 * linux/arch/arm26/kernel/ptrace.h
3 *
4 * Copyright (C) 2000-2003 Russell King
5 * Copyright (C) 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11extern void ptrace_cancel_bpt(struct task_struct *);
12extern void ptrace_set_bpt(struct task_struct *);
13extern void ptrace_break(struct task_struct *, struct pt_regs *);
diff --git a/arch/arm26/kernel/semaphore.c b/arch/arm26/kernel/semaphore.c
new file mode 100644
index 000000000000..3023a53431ff
--- /dev/null
+++ b/arch/arm26/kernel/semaphore.c
@@ -0,0 +1,223 @@
1/*
2 * ARM semaphore implementation, taken from
3 *
4 * i386 semaphore implementation.
5 *
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 2003 Ian Molton (ARM26 mods)
8 *
9 * Modified for ARM by Russell King
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#include <linux/module.h>
16#include <linux/config.h>
17#include <linux/sched.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20
21#include <asm/semaphore.h>
22
23/*
24 * Semaphores are implemented using a two-way counter:
25 * The "count" variable is decremented for each process
26 * that tries to acquire the semaphore, while the "sleeping"
27 * variable is a count of such acquires.
28 *
29 * Notably, the inline "up()" and "down()" functions can
30 * efficiently test if they need to do any extra work (up
31 * needs to do something only if count was negative before
32 * the increment operation.
33 *
34 * "sleeping" and the contention routine ordering is
35 * protected by the semaphore spinlock.
36 *
37 * Note that these functions are only called when there is
38 * contention on the lock, and as such all this is the
39 * "non-critical" part of the whole semaphore business. The
40 * critical part is the inline stuff in <asm/semaphore.h>
41 * where we want to avoid any extra jumps and calls.
42 */
43
44/*
45 * Logic:
46 * - only on a boundary condition do we need to care. When we go
47 * from a negative count to a non-negative, we wake people up.
48 * - when we go from a non-negative count to a negative do we
49 * (a) synchronize with the "sleeper" count and (b) make sure
50 * that we're on the wakeup list before we synchronize so that
51 * we cannot lose wakeup events.
52 */
53
54void __up(struct semaphore *sem)
55{
56 wake_up(&sem->wait);
57}
58
59static DEFINE_SPINLOCK(semaphore_lock);
60
61void __sched __down(struct semaphore * sem)
62{
63 struct task_struct *tsk = current;
64 DECLARE_WAITQUEUE(wait, tsk);
65 tsk->state = TASK_UNINTERRUPTIBLE;
66 add_wait_queue_exclusive(&sem->wait, &wait);
67
68 spin_lock_irq(&semaphore_lock);
69 sem->sleepers++;
70 for (;;) {
71 int sleepers = sem->sleepers;
72
73 /*
74 * Add "everybody else" into it. They aren't
75 * playing, because we own the spinlock.
76 */
77 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
78 sem->sleepers = 0;
79 break;
80 }
81 sem->sleepers = 1; /* us - see -1 above */
82 spin_unlock_irq(&semaphore_lock);
83
84 schedule();
85 tsk->state = TASK_UNINTERRUPTIBLE;
86 spin_lock_irq(&semaphore_lock);
87 }
88 spin_unlock_irq(&semaphore_lock);
89 remove_wait_queue(&sem->wait, &wait);
90 tsk->state = TASK_RUNNING;
91 wake_up(&sem->wait);
92}
93
94int __sched __down_interruptible(struct semaphore * sem)
95{
96 int retval = 0;
97 struct task_struct *tsk = current;
98 DECLARE_WAITQUEUE(wait, tsk);
99 tsk->state = TASK_INTERRUPTIBLE;
100 add_wait_queue_exclusive(&sem->wait, &wait);
101
102 spin_lock_irq(&semaphore_lock);
103 sem->sleepers ++;
104 for (;;) {
105 int sleepers = sem->sleepers;
106
107 /*
108 * With signals pending, this turns into
109 * the trylock failure case - we won't be
110 * sleeping, and we* can't get the lock as
111 * it has contention. Just correct the count
112 * and exit.
113 */
114 if (signal_pending(current)) {
115 retval = -EINTR;
116 sem->sleepers = 0;
117 atomic_add(sleepers, &sem->count);
118 break;
119 }
120
121 /*
122 * Add "everybody else" into it. They aren't
123 * playing, because we own the spinlock. The
124 * "-1" is because we're still hoping to get
125 * the lock.
126 */
127 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
128 sem->sleepers = 0;
129 break;
130 }
131 sem->sleepers = 1; /* us - see -1 above */
132 spin_unlock_irq(&semaphore_lock);
133
134 schedule();
135 tsk->state = TASK_INTERRUPTIBLE;
136 spin_lock_irq(&semaphore_lock);
137 }
138 spin_unlock_irq(&semaphore_lock);
139 tsk->state = TASK_RUNNING;
140 remove_wait_queue(&sem->wait, &wait);
141 wake_up(&sem->wait);
142 return retval;
143}
144
145/*
146 * Trylock failed - make sure we correct for
147 * having decremented the count.
148 *
149 * We could have done the trylock with a
150 * single "cmpxchg" without failure cases,
151 * but then it wouldn't work on a 386.
152 */
153int __down_trylock(struct semaphore * sem)
154{
155 int sleepers;
156 unsigned long flags;
157
158 spin_lock_irqsave(&semaphore_lock, flags);
159 sleepers = sem->sleepers + 1;
160 sem->sleepers = 0;
161
162 /*
163 * Add "everybody else" and us into it. They aren't
164 * playing, because we own the spinlock.
165 */
166 if (!atomic_add_negative(sleepers, &sem->count))
167 wake_up(&sem->wait);
168
169 spin_unlock_irqrestore(&semaphore_lock, flags);
170 return 1;
171}
172
173/*
174 * The semaphore operations have a special calling sequence that
175 * allow us to do a simpler in-line version of them. These routines
176 * need to convert that sequence back into the C sequence when
177 * there is contention on the semaphore.
178 *
179 * ip contains the semaphore pointer on entry. Save the C-clobbered
180 * registers (r0 to r3 and lr), but not ip, as we use it as a return
181 * value in some cases..
182 */
183asm(" .section .sched.text , #alloc, #execinstr \n\
184 .align 5 \n\
185 .globl __down_failed \n\
186__down_failed: \n\
187 stmfd sp!, {r0 - r3, lr} \n\
188 mov r0, ip \n\
189 bl __down \n\
190 ldmfd sp!, {r0 - r3, pc}^ \n\
191 \n\
192 .align 5 \n\
193 .globl __down_interruptible_failed \n\
194__down_interruptible_failed: \n\
195 stmfd sp!, {r0 - r3, lr} \n\
196 mov r0, ip \n\
197 bl __down_interruptible \n\
198 mov ip, r0 \n\
199 ldmfd sp!, {r0 - r3, pc}^ \n\
200 \n\
201 .align 5 \n\
202 .globl __down_trylock_failed \n\
203__down_trylock_failed: \n\
204 stmfd sp!, {r0 - r3, lr} \n\
205 mov r0, ip \n\
206 bl __down_trylock \n\
207 mov ip, r0 \n\
208 ldmfd sp!, {r0 - r3, pc}^ \n\
209 \n\
210 .align 5 \n\
211 .globl __up_wakeup \n\
212__up_wakeup: \n\
213 stmfd sp!, {r0 - r3, lr} \n\
214 mov r0, ip \n\
215 bl __up \n\
216 ldmfd sp!, {r0 - r3, pc}^ \n\
217 ");
218
219EXPORT_SYMBOL(__down_failed);
220EXPORT_SYMBOL(__down_interruptible_failed);
221EXPORT_SYMBOL(__down_trylock_failed);
222EXPORT_SYMBOL(__up_wakeup);
223
diff --git a/arch/arm26/kernel/setup.c b/arch/arm26/kernel/setup.c
new file mode 100644
index 000000000000..4eb329e3828a
--- /dev/null
+++ b/arch/arm26/kernel/setup.c
@@ -0,0 +1,573 @@
1/*
2 * linux/arch/arm26/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/config.h>
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/blkdev.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
21#include <linux/tty.h>
22#include <linux/init.h>
23#include <linux/root_dev.h>
24
25#include <asm/elf.h>
26#include <asm/hardware.h>
27#include <asm/io.h>
28#include <asm/procinfo.h>
29#include <asm/setup.h>
30#include <asm/mach-types.h>
31#include <asm/tlbflush.h>
32
33#include <asm/irqchip.h>
34
35#ifndef MEM_SIZE
36#define MEM_SIZE (16*1024*1024)
37#endif
38
39#ifdef CONFIG_PREEMPT
40DEFINE_SPINLOCK(kernel_flag);
41#endif
42
43#if defined(CONFIG_FPE_NWFPE)
44char fpe_type[8];
45
46static int __init fpe_setup(char *line)
47{
48 memcpy(fpe_type, line, 8);
49 return 1;
50}
51
52__setup("fpe=", fpe_setup);
53#endif
54
55extern void paging_init(struct meminfo *);
56extern void convert_to_tag_list(struct tag *tags);
57extern void squash_mem_tags(struct tag *tag);
58extern void bootmem_init(struct meminfo *);
59extern int root_mountflags;
60extern int _stext, _text, _etext, _edata, _end;
61#ifdef CONFIG_XIP_KERNEL
62extern int _endtext, _sdata;
63#endif
64
65
66unsigned int processor_id;
67unsigned int __machine_arch_type;
68unsigned int system_rev;
69unsigned int system_serial_low;
70unsigned int system_serial_high;
71unsigned int elf_hwcap;
72unsigned int memc_ctrl_reg;
73unsigned int number_mfm_drives;
74
75struct processor processor;
76
77char elf_platform[ELF_PLATFORM_SIZE];
78
79unsigned long phys_initrd_start __initdata = 0;
80unsigned long phys_initrd_size __initdata = 0;
81static struct meminfo meminfo __initdata = { 0, };
82static struct proc_info_item proc_info;
83static const char *machine_name;
84static char command_line[COMMAND_LINE_SIZE];
85
86static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
87
88/*
89 * Standard memory resources
90 */
91static struct resource mem_res[] = {
92 { "Video RAM", 0, 0, IORESOURCE_MEM },
93 { "Kernel code", 0, 0, IORESOURCE_MEM },
94 { "Kernel data", 0, 0, IORESOURCE_MEM }
95};
96
97#define video_ram mem_res[0]
98#define kernel_code mem_res[1]
99#define kernel_data mem_res[2]
100
101static struct resource io_res[] = {
102 { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
103 { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
104 { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
105};
106
107#define lp0 io_res[0]
108#define lp1 io_res[1]
109#define lp2 io_res[2]
110
111#define dump_cpu_info() do { } while (0)
112
113static void __init setup_processor(void)
114{
115 extern struct proc_info_list __proc_info_begin, __proc_info_end;
116 struct proc_info_list *list;
117
118 /*
119 * locate processor in the list of supported processor
120 * types. The linker builds this table for us from the
121 * entries in arch/arm26/mm/proc-*.S
122 */
123 for (list = &__proc_info_begin; list < &__proc_info_end ; list++)
124 if ((processor_id & list->cpu_mask) == list->cpu_val)
125 break;
126
127 /*
128 * If processor type is unrecognised, then we
129 * can do nothing...
130 */
131 if (list >= &__proc_info_end) {
132 printk("CPU configuration botched (ID %08x), unable "
133 "to continue.\n", processor_id);
134 while (1);
135 }
136
137 proc_info = *list->info;
138 processor = *list->proc;
139
140
141 printk("CPU: %s %s revision %d\n",
142 proc_info.manufacturer, proc_info.cpu_name,
143 (int)processor_id & 15);
144
145 dump_cpu_info();
146
147 sprintf(system_utsname.machine, "%s", list->arch_name);
148 sprintf(elf_platform, "%s", list->elf_name);
149 elf_hwcap = list->elf_hwcap;
150
151 cpu_proc_init();
152}
153
154/*
155 * Initial parsing of the command line. We need to pick out the
156 * memory size. We look for mem=size@start, where start and size
157 * are "size[KkMm]"
158 */
159static void __init
160parse_cmdline(struct meminfo *mi, char **cmdline_p, char *from)
161{
162 char c = ' ', *to = command_line;
163 int usermem = 0, len = 0;
164
165 for (;;) {
166 if (c == ' ' && !memcmp(from, "mem=", 4)) {
167 unsigned long size, start;
168
169 if (to != command_line)
170 to -= 1;
171
172 /*
173 * If the user specifies memory size, we
174 * blow away any automatically generated
175 * size.
176 */
177 if (usermem == 0) {
178 usermem = 1;
179 mi->nr_banks = 0;
180 }
181
182 start = PHYS_OFFSET;
183 size = memparse(from + 4, &from);
184 if (*from == '@')
185 start = memparse(from + 1, &from);
186
187 mi->bank[mi->nr_banks].start = start;
188 mi->bank[mi->nr_banks].size = size;
189 mi->bank[mi->nr_banks].node = PHYS_TO_NID(start);
190 mi->nr_banks += 1;
191 }
192 c = *from++;
193 if (!c)
194 break;
195 if (COMMAND_LINE_SIZE <= ++len)
196 break;
197 *to++ = c;
198 }
199 *to = '\0';
200 *cmdline_p = command_line;
201}
202
203static void __init
204setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
205{
206#ifdef CONFIG_BLK_DEV_RAM
207 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
208
209 rd_image_start = image_start;
210 rd_prompt = prompt;
211 rd_doload = doload;
212
213 if (rd_sz)
214 rd_size = rd_sz;
215#endif
216}
217
218static void __init
219request_standard_resources(struct meminfo *mi)
220{
221 struct resource *res;
222 int i;
223
224 kernel_code.start = init_mm.start_code;
225 kernel_code.end = init_mm.end_code - 1;
226#ifdef CONFIG_XIP_KERNEL
227 kernel_data.start = init_mm.start_data;
228#else
229 kernel_data.start = init_mm.end_code;
230#endif
231 kernel_data.end = init_mm.brk - 1;
232
233 for (i = 0; i < mi->nr_banks; i++) {
234 unsigned long virt_start, virt_end;
235
236 if (mi->bank[i].size == 0)
237 continue;
238
239 virt_start = mi->bank[i].start;
240 virt_end = virt_start + mi->bank[i].size - 1;
241
242 res = alloc_bootmem_low(sizeof(*res));
243 res->name = "System RAM";
244 res->start = virt_start;
245 res->end = virt_end;
246 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
247
248 request_resource(&iomem_resource, res);
249
250 if (kernel_code.start >= res->start &&
251 kernel_code.end <= res->end)
252 request_resource(res, &kernel_code);
253 if (kernel_data.start >= res->start &&
254 kernel_data.end <= res->end)
255 request_resource(res, &kernel_data);
256 }
257
258/* FIXME - needed? if (mdesc->video_start) {
259 video_ram.start = mdesc->video_start;
260 video_ram.end = mdesc->video_end;
261 request_resource(&iomem_resource, &video_ram);
262 }*/
263
264 /*
265 * Some machines don't have the possibility of ever
266 * possessing lp1 or lp2
267 */
268 if (0) /* FIXME - need to do this for A5k at least */
269 request_resource(&ioport_resource, &lp0);
270}
271
272/*
273 * Tag parsing.
274 *
275 * This is the new way of passing data to the kernel at boot time. Rather
276 * than passing a fixed inflexible structure to the kernel, we pass a list
277 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
278 * tag for the list to be recognised (to distinguish the tagged list from
279 * a param_struct). The list is terminated with a zero-length tag (this tag
280 * is not parsed in any way).
281 */
282static int __init parse_tag_core(const struct tag *tag)
283{
284 if (tag->hdr.size > 2) {
285 if ((tag->u.core.flags & 1) == 0)
286 root_mountflags &= ~MS_RDONLY;
287 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
288 }
289 return 0;
290}
291
292__tagtable(ATAG_CORE, parse_tag_core);
293
294static int __init parse_tag_mem32(const struct tag *tag)
295{
296 if (meminfo.nr_banks >= NR_BANKS) {
297 printk(KERN_WARNING
298 "Ignoring memory bank 0x%08x size %dKB\n",
299 tag->u.mem.start, tag->u.mem.size / 1024);
300 return -EINVAL;
301 }
302 meminfo.bank[meminfo.nr_banks].start = tag->u.mem.start;
303 meminfo.bank[meminfo.nr_banks].size = tag->u.mem.size;
304 meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(tag->u.mem.start);
305 meminfo.nr_banks += 1;
306
307 return 0;
308}
309
310__tagtable(ATAG_MEM, parse_tag_mem32);
311
312#if defined(CONFIG_DUMMY_CONSOLE)
313struct screen_info screen_info = {
314 .orig_video_lines = 30,
315 .orig_video_cols = 80,
316 .orig_video_mode = 0,
317 .orig_video_ega_bx = 0,
318 .orig_video_isVGA = 1,
319 .orig_video_points = 8
320};
321
322static int __init parse_tag_videotext(const struct tag *tag)
323{
324 screen_info.orig_x = tag->u.videotext.x;
325 screen_info.orig_y = tag->u.videotext.y;
326 screen_info.orig_video_page = tag->u.videotext.video_page;
327 screen_info.orig_video_mode = tag->u.videotext.video_mode;
328 screen_info.orig_video_cols = tag->u.videotext.video_cols;
329 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
330 screen_info.orig_video_lines = tag->u.videotext.video_lines;
331 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
332 screen_info.orig_video_points = tag->u.videotext.video_points;
333 return 0;
334}
335
336__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
337#endif
338
339static int __init parse_tag_acorn(const struct tag *tag)
340{
341 memc_ctrl_reg = tag->u.acorn.memc_control_reg;
342 number_mfm_drives = tag->u.acorn.adfsdrives;
343 return 0;
344}
345
346__tagtable(ATAG_ACORN, parse_tag_acorn);
347
348static int __init parse_tag_ramdisk(const struct tag *tag)
349{
350 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
351 (tag->u.ramdisk.flags & 2) == 0,
352 tag->u.ramdisk.start, tag->u.ramdisk.size);
353 return 0;
354}
355
356__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
357
358static int __init parse_tag_initrd(const struct tag *tag)
359{
360 printk(KERN_WARNING "ATAG_INITRD is deprecated; please update your bootloader. \n");
361 phys_initrd_start = (unsigned long)tag->u.initrd.start;
362 phys_initrd_size = (unsigned long)tag->u.initrd.size;
363 return 0;
364}
365
366__tagtable(ATAG_INITRD, parse_tag_initrd);
367
368static int __init parse_tag_initrd2(const struct tag *tag)
369{
370 printk(KERN_WARNING "ATAG_INITRD is deprecated; please update your bootloader. \n");
371 phys_initrd_start = (unsigned long)tag->u.initrd.start;
372 phys_initrd_size = (unsigned long)tag->u.initrd.size;
373 return 0;
374}
375
376__tagtable(ATAG_INITRD2, parse_tag_initrd2);
377
378static int __init parse_tag_serialnr(const struct tag *tag)
379{
380 system_serial_low = tag->u.serialnr.low;
381 system_serial_high = tag->u.serialnr.high;
382 return 0;
383}
384
385__tagtable(ATAG_SERIAL, parse_tag_serialnr);
386
387static int __init parse_tag_revision(const struct tag *tag)
388{
389 system_rev = tag->u.revision.rev;
390 return 0;
391}
392
393__tagtable(ATAG_REVISION, parse_tag_revision);
394
395static int __init parse_tag_cmdline(const struct tag *tag)
396{
397 strncpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
398 default_command_line[COMMAND_LINE_SIZE - 1] = '\0';
399 return 0;
400}
401
402__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
403
404/*
405 * Scan the tag table for this tag, and call its parse function.
406 * The tag table is built by the linker from all the __tagtable
407 * declarations.
408 */
409static int __init parse_tag(const struct tag *tag)
410{
411 extern struct tagtable __tagtable_begin, __tagtable_end;
412 struct tagtable *t;
413
414 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
415 if (tag->hdr.tag == t->tag) {
416 t->parse(tag);
417 break;
418 }
419
420 return t < &__tagtable_end;
421}
422
423/*
424 * Parse all tags in the list, checking both the global and architecture
425 * specific tag tables.
426 */
427static void __init parse_tags(const struct tag *t)
428{
429 for (; t->hdr.size; t = tag_next(t))
430 if (!parse_tag(t))
431 printk(KERN_WARNING
432 "Ignoring unrecognised tag 0x%08x\n",
433 t->hdr.tag);
434}
435
436/*
437 * This holds our defaults.
438 */
439static struct init_tags {
440 struct tag_header hdr1;
441 struct tag_core core;
442 struct tag_header hdr2;
443 struct tag_mem32 mem;
444 struct tag_header hdr3;
445} init_tags __initdata = {
446 { tag_size(tag_core), ATAG_CORE },
447 { 1, PAGE_SIZE, 0xff },
448 { tag_size(tag_mem32), ATAG_MEM },
449 { MEM_SIZE, PHYS_OFFSET },
450 { 0, ATAG_NONE }
451};
452
453void __init setup_arch(char **cmdline_p)
454{
455 struct tag *tags = (struct tag *)&init_tags;
456 char *from = default_command_line;
457
458 setup_processor();
459 if(machine_arch_type == MACH_TYPE_A5K)
460 machine_name = "A5000";
461 else if(machine_arch_type == MACH_TYPE_ARCHIMEDES)
462 machine_name = "Archimedes";
463 else
464 machine_name = "UNKNOWN";
465
466 //FIXME - the tag struct is always copied here but this is a block
467 // of RAM that is accidentally reserved along with video RAM. perhaps
468 // it would be a good idea to explicitly reserve this?
469
470 tags = (struct tag *)0x0207c000;
471
472 /*
473 * If we have the old style parameters, convert them to
474 * a tag list.
475 */
476 if (tags->hdr.tag != ATAG_CORE)
477 convert_to_tag_list(tags);
478 if (tags->hdr.tag != ATAG_CORE)
479 tags = (struct tag *)&init_tags;
480 if (tags->hdr.tag == ATAG_CORE) {
481 if (meminfo.nr_banks != 0)
482 squash_mem_tags(tags);
483 parse_tags(tags);
484 }
485
486 init_mm.start_code = (unsigned long) &_text;
487#ifndef CONFIG_XIP_KERNEL
488 init_mm.end_code = (unsigned long) &_etext;
489#else
490 init_mm.end_code = (unsigned long) &_endtext;
491 init_mm.start_data = (unsigned long) &_sdata;
492#endif
493 init_mm.end_data = (unsigned long) &_edata;
494 init_mm.brk = (unsigned long) &_end;
495
496 memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
497 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
498 parse_cmdline(&meminfo, cmdline_p, from);
499 bootmem_init(&meminfo);
500 paging_init(&meminfo);
501 request_standard_resources(&meminfo);
502
503#ifdef CONFIG_VT
504#if defined(CONFIG_DUMMY_CONSOLE)
505 conswitchp = &dummy_con;
506#endif
507#endif
508}
509
510static const char *hwcap_str[] = {
511 "swp",
512 "half",
513 "thumb",
514 "26bit",
515 "fastmult",
516 "fpa",
517 "vfp",
518 "edsp",
519 NULL
520};
521
522static int c_show(struct seq_file *m, void *v)
523{
524 int i;
525
526 seq_printf(m, "Processor\t: %s %s rev %d (%s)\n",
527 proc_info.manufacturer, proc_info.cpu_name,
528 (int)processor_id & 15, elf_platform);
529
530 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
531 loops_per_jiffy / (500000/HZ),
532 (loops_per_jiffy / (5000/HZ)) % 100);
533
534 /* dump out the processor features */
535 seq_puts(m, "Features\t: ");
536
537 for (i = 0; hwcap_str[i]; i++)
538 if (elf_hwcap & (1 << i))
539 seq_printf(m, "%s ", hwcap_str[i]);
540
541 seq_puts(m, "\n");
542
543 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
544 seq_printf(m, "CPU revision\t: %d\n\n", processor_id & 15);
545 seq_printf(m, "Hardware\t: %s\n", machine_name);
546 seq_printf(m, "Revision\t: %04x\n", system_rev);
547 seq_printf(m, "Serial\t\t: %08x%08x\n",
548 system_serial_high, system_serial_low);
549
550 return 0;
551}
552
553static void *c_start(struct seq_file *m, loff_t *pos)
554{
555 return *pos < 1 ? (void *)1 : NULL;
556}
557
558static void *c_next(struct seq_file *m, void *v, loff_t *pos)
559{
560 ++*pos;
561 return NULL;
562}
563
564static void c_stop(struct seq_file *m, void *v)
565{
566}
567
568struct seq_operations cpuinfo_op = {
569 .start = c_start,
570 .next = c_next,
571 .stop = c_stop,
572 .show = c_show
573};
diff --git a/arch/arm26/kernel/signal.c b/arch/arm26/kernel/signal.c
new file mode 100644
index 000000000000..356d9809cc0b
--- /dev/null
+++ b/arch/arm26/kernel/signal.c
@@ -0,0 +1,540 @@
1/*
2 * linux/arch/arm26/kernel/signal.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 * Copyright (C) 2003 Ian Molton (ARM26)
6 *
7 * FIXME!!! This is probably very broken (13/05/2003)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/config.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/smp_lock.h>
18#include <linux/kernel.h>
19#include <linux/errno.h>
20#include <linux/signal.h>
21#include <linux/wait.h>
22#include <linux/ptrace.h>
23#include <linux/personality.h>
24#include <linux/tty.h>
25#include <linux/binfmts.h>
26#include <linux/elf.h>
27
28#include <asm/pgalloc.h>
29#include <asm/ucontext.h>
30#include <asm/uaccess.h>
31#include <asm/unistd.h>
32
33#include "ptrace.h"
34
35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
36
37/*
38 * For ARM syscalls, we encode the syscall number into the instruction.
39 */
40#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn))
41#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn))
42
43static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
44
45/*
46 * atomically swap in the new signal mask, and wait for a signal.
47 */
48asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
49{
50 sigset_t saveset;
51
52 mask &= _BLOCKABLE;
53 spin_lock_irq(&current->sighand->siglock);
54 saveset = current->blocked;
55 siginitset(&current->blocked, mask);
56 recalc_sigpending();
57 spin_unlock_irq(&current->sighand->siglock);
58 regs->ARM_r0 = -EINTR;
59
60 while (1) {
61 current->state = TASK_INTERRUPTIBLE;
62 schedule();
63 if (do_signal(&saveset, regs, 0))
64 return regs->ARM_r0;
65 }
66}
67
68asmlinkage int
69sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, struct pt_regs *regs)
70{
71 sigset_t saveset, newset;
72
73 /* XXX: Don't preclude handling different sized sigset_t's. */
74 if (sigsetsize != sizeof(sigset_t))
75 return -EINVAL;
76
77 if (copy_from_user(&newset, unewset, sizeof(newset)))
78 return -EFAULT;
79 sigdelsetmask(&newset, ~_BLOCKABLE);
80
81 spin_lock_irq(&current->sighand->siglock);
82 saveset = current->blocked;
83 current->blocked = newset;
84 recalc_sigpending();
85 spin_unlock_irq(&current->sighand->siglock);
86 regs->ARM_r0 = -EINTR;
87
88 while (1) {
89 current->state = TASK_INTERRUPTIBLE;
90 schedule();
91 if (do_signal(&saveset, regs, 0))
92 return regs->ARM_r0;
93 }
94}
95
96asmlinkage int
97sys_sigaction(int sig, const struct old_sigaction *act,
98 struct old_sigaction *oact)
99{
100 struct k_sigaction new_ka, old_ka;
101 int ret;
102
103 if (act) {
104 old_sigset_t mask;
105 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
106 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
107 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
108 return -EFAULT;
109 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
110 __get_user(mask, &act->sa_mask);
111 siginitset(&new_ka.sa.sa_mask, mask);
112 }
113
114 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
115
116 if (!ret && oact) {
117 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
118 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
119 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
120 return -EFAULT;
121 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
122 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
123 }
124
125 return ret;
126}
127
128/*
129 * Do a signal return; undo the signal stack.
130 */
131struct sigframe
132{
133 struct sigcontext sc;
134 unsigned long extramask[_NSIG_WORDS-1];
135 unsigned long retcode;
136};
137
138struct rt_sigframe
139{
140 struct siginfo *pinfo;
141 void *puc;
142 struct siginfo info;
143 struct ucontext uc;
144 unsigned long retcode;
145};
146
147static int
148restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
149{
150 int err = 0;
151
152 __get_user_error(regs->ARM_r0, &sc->arm_r0, err);
153 __get_user_error(regs->ARM_r1, &sc->arm_r1, err);
154 __get_user_error(regs->ARM_r2, &sc->arm_r2, err);
155 __get_user_error(regs->ARM_r3, &sc->arm_r3, err);
156 __get_user_error(regs->ARM_r4, &sc->arm_r4, err);
157 __get_user_error(regs->ARM_r5, &sc->arm_r5, err);
158 __get_user_error(regs->ARM_r6, &sc->arm_r6, err);
159 __get_user_error(regs->ARM_r7, &sc->arm_r7, err);
160 __get_user_error(regs->ARM_r8, &sc->arm_r8, err);
161 __get_user_error(regs->ARM_r9, &sc->arm_r9, err);
162 __get_user_error(regs->ARM_r10, &sc->arm_r10, err);
163 __get_user_error(regs->ARM_fp, &sc->arm_fp, err);
164 __get_user_error(regs->ARM_ip, &sc->arm_ip, err);
165 __get_user_error(regs->ARM_sp, &sc->arm_sp, err);
166 __get_user_error(regs->ARM_lr, &sc->arm_lr, err);
167 __get_user_error(regs->ARM_pc, &sc->arm_pc, err);
168
169 err |= !valid_user_regs(regs);
170
171 return err;
172}
173
174asmlinkage int sys_sigreturn(struct pt_regs *regs)
175{
176 struct sigframe *frame;
177 sigset_t set;
178
179 /*
180 * Since we stacked the signal on a 64-bit boundary,
181 * then 'sp' should be word aligned here. If it's
182 * not, then the user is trying to mess with us.
183 */
184 if (regs->ARM_sp & 7)
185 goto badframe;
186
187 frame = (struct sigframe *)regs->ARM_sp;
188
189 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
190 goto badframe;
191 if (__get_user(set.sig[0], &frame->sc.oldmask)
192 || (_NSIG_WORDS > 1
193 && __copy_from_user(&set.sig[1], &frame->extramask,
194 sizeof(frame->extramask))))
195 goto badframe;
196
197 sigdelsetmask(&set, ~_BLOCKABLE);
198 spin_lock_irq(&current->sighand->siglock);
199 current->blocked = set;
200 recalc_sigpending();
201 spin_unlock_irq(&current->sighand->siglock);
202
203 if (restore_sigcontext(regs, &frame->sc))
204 goto badframe;
205
206 /* Send SIGTRAP if we're single-stepping */
207 if (current->ptrace & PT_SINGLESTEP) {
208 ptrace_cancel_bpt(current);
209 send_sig(SIGTRAP, current, 1);
210 }
211
212 return regs->ARM_r0;
213
214badframe:
215 force_sig(SIGSEGV, current);
216 return 0;
217}
218
219asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
220{
221 struct rt_sigframe *frame;
222 sigset_t set;
223
224 /*
225 * Since we stacked the signal on a 64-bit boundary,
226 * then 'sp' should be word aligned here. If it's
227 * not, then the user is trying to mess with us.
228 */
229 if (regs->ARM_sp & 7)
230 goto badframe;
231
232 frame = (struct rt_sigframe *)regs->ARM_sp;
233
234 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
235 goto badframe;
236 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
237 goto badframe;
238
239 sigdelsetmask(&set, ~_BLOCKABLE);
240 spin_lock_irq(&current->sighand->siglock);
241 current->blocked = set;
242 recalc_sigpending();
243 spin_unlock_irq(&current->sighand->siglock);
244
245 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
246 goto badframe;
247
248 /* Send SIGTRAP if we're single-stepping */
249 if (current->ptrace & PT_SINGLESTEP) {
250 ptrace_cancel_bpt(current);
251 send_sig(SIGTRAP, current, 1);
252 }
253
254 return regs->ARM_r0;
255
256badframe:
257 force_sig(SIGSEGV, current);
258 return 0;
259}
260
261static int
262setup_sigcontext(struct sigcontext *sc, /*struct _fpstate *fpstate,*/
263 struct pt_regs *regs, unsigned long mask)
264{
265 int err = 0;
266
267 __put_user_error(regs->ARM_r0, &sc->arm_r0, err);
268 __put_user_error(regs->ARM_r1, &sc->arm_r1, err);
269 __put_user_error(regs->ARM_r2, &sc->arm_r2, err);
270 __put_user_error(regs->ARM_r3, &sc->arm_r3, err);
271 __put_user_error(regs->ARM_r4, &sc->arm_r4, err);
272 __put_user_error(regs->ARM_r5, &sc->arm_r5, err);
273 __put_user_error(regs->ARM_r6, &sc->arm_r6, err);
274 __put_user_error(regs->ARM_r7, &sc->arm_r7, err);
275 __put_user_error(regs->ARM_r8, &sc->arm_r8, err);
276 __put_user_error(regs->ARM_r9, &sc->arm_r9, err);
277 __put_user_error(regs->ARM_r10, &sc->arm_r10, err);
278 __put_user_error(regs->ARM_fp, &sc->arm_fp, err);
279 __put_user_error(regs->ARM_ip, &sc->arm_ip, err);
280 __put_user_error(regs->ARM_sp, &sc->arm_sp, err);
281 __put_user_error(regs->ARM_lr, &sc->arm_lr, err);
282 __put_user_error(regs->ARM_pc, &sc->arm_pc, err);
283
284 __put_user_error(current->thread.trap_no, &sc->trap_no, err);
285 __put_user_error(current->thread.error_code, &sc->error_code, err);
286 __put_user_error(current->thread.address, &sc->fault_address, err);
287 __put_user_error(mask, &sc->oldmask, err);
288
289 return err;
290}
291
292static inline void *
293get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
294{
295 unsigned long sp = regs->ARM_sp;
296
297 /*
298 * This is the X/Open sanctioned signal stack switching.
299 */
300 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
301 sp = current->sas_ss_sp + current->sas_ss_size;
302
303 /*
304 * ATPCS B01 mandates 8-byte alignment
305 */
306 return (void *)((sp - framesize) & ~7);
307}
308
309static int
310setup_return(struct pt_regs *regs, struct k_sigaction *ka,
311 unsigned long *rc, void *frame, int usig)
312{
313 unsigned long handler = (unsigned long)ka->sa.sa_handler;
314 unsigned long retcode;
315
316 if (ka->sa.sa_flags & SA_RESTORER) {
317 retcode = (unsigned long)ka->sa.sa_restorer;
318 } else {
319
320 if (__put_user((ka->sa.sa_flags & SA_SIGINFO)?SWI_SYS_RT_SIGRETURN:SWI_SYS_SIGRETURN, rc))
321 return 1;
322
323 retcode = ((unsigned long)rc);
324 }
325
326 regs->ARM_r0 = usig;
327 regs->ARM_sp = (unsigned long)frame;
328 regs->ARM_lr = retcode;
329 regs->ARM_pc = handler & ~3;
330
331 return 0;
332}
333
334static int
335setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs)
336{
337 struct sigframe *frame = get_sigframe(ka, regs, sizeof(*frame));
338 int err = 0;
339
340 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
341 return 1;
342
343 err |= setup_sigcontext(&frame->sc, /*&frame->fpstate,*/ regs, set->sig[0]);
344
345 if (_NSIG_WORDS > 1) {
346 err |= __copy_to_user(frame->extramask, &set->sig[1],
347 sizeof(frame->extramask));
348 }
349
350 if (err == 0)
351 err = setup_return(regs, ka, &frame->retcode, frame, usig);
352
353 return err;
354}
355
356static int
357setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
358 sigset_t *set, struct pt_regs *regs)
359{
360 struct rt_sigframe *frame = get_sigframe(ka, regs, sizeof(*frame));
361 int err = 0;
362
363 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
364 return 1;
365
366 __put_user_error(&frame->info, &frame->pinfo, err);
367 __put_user_error(&frame->uc, &frame->puc, err);
368 err |= copy_siginfo_to_user(&frame->info, info);
369
370 /* Clear all the bits of the ucontext we don't use. */
371 err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
372
373 err |= setup_sigcontext(&frame->uc.uc_mcontext, /*&frame->fpstate,*/
374 regs, set->sig[0]);
375 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
376
377 if (err == 0)
378 err = setup_return(regs, ka, &frame->retcode, frame, usig);
379
380 if (err == 0) {
381 /*
382 * For realtime signals we must also set the second and third
383 * arguments for the signal handler.
384 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
385 */
386 regs->ARM_r1 = (unsigned long)frame->pinfo;
387 regs->ARM_r2 = (unsigned long)frame->puc;
388 }
389
390 return err;
391}
392
393static inline void restart_syscall(struct pt_regs *regs)
394{
395 regs->ARM_r0 = regs->ARM_ORIG_r0;
396 regs->ARM_pc -= 4;
397}
398
399/*
400 * OK, we're invoking a handler
401 */
402static void
403handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
404 struct pt_regs * regs, int syscall)
405{
406 struct thread_info *thread = current_thread_info();
407 struct task_struct *tsk = current;
408 struct k_sigaction *ka = &tsk->sighand->action[sig-1];
409 int usig = sig;
410 int ret;
411
412 /*
413 * If we were from a system call, check for system call restarting...
414 */
415 if (syscall) {
416 switch (regs->ARM_r0) {
417 case -ERESTART_RESTARTBLOCK:
418 current_thread_info()->restart_block.fn =
419 do_no_restart_syscall;
420 case -ERESTARTNOHAND:
421 regs->ARM_r0 = -EINTR;
422 break;
423 case -ERESTARTSYS:
424 if (!(ka->sa.sa_flags & SA_RESTART)) {
425 regs->ARM_r0 = -EINTR;
426 break;
427 }
428 /* fallthrough */
429 case -ERESTARTNOINTR:
430 restart_syscall(regs);
431 }
432 }
433
434 /*
435 * translate the signal
436 */
437 if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
438 usig = thread->exec_domain->signal_invmap[usig];
439
440 /*
441 * Set up the stack frame
442 */
443 if (ka->sa.sa_flags & SA_SIGINFO)
444 ret = setup_rt_frame(usig, ka, info, oldset, regs);
445 else
446 ret = setup_frame(usig, ka, oldset, regs);
447
448 /*
449 * Check that the resulting registers are actually sane.
450 */
451 ret |= !valid_user_regs(regs);
452
453 if (ret == 0) {
454 if (ka->sa.sa_flags & SA_ONESHOT)
455 ka->sa.sa_handler = SIG_DFL;
456
457 if (!(ka->sa.sa_flags & SA_NODEFER)) {
458 spin_lock_irq(&tsk->sighand->siglock);
459 sigorsets(&tsk->blocked, &tsk->blocked,
460 &ka->sa.sa_mask);
461 sigaddset(&tsk->blocked, sig);
462 recalc_sigpending();
463 spin_unlock_irq(&tsk->sighand->siglock);
464 }
465 return;
466 }
467
468 force_sigsegv(sig, tsk);
469}
470
471/*
472 * Note that 'init' is a special process: it doesn't get signals it doesn't
473 * want to handle. Thus you cannot kill init even with a SIGKILL even by
474 * mistake.
475 *
476 * Note that we go through the signals twice: once to check the signals that
477 * the kernel can handle, and then we build all the user-level signal handling
478 * stack-frames in one go after that.
479 */
480static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
481{
482 siginfo_t info;
483 int signr;
484
485 /*
486 * We want the common case to go fast, which
487 * is why we may in certain cases get here from
488 * kernel mode. Just return without doing anything
489 * if so.
490 */
491 if (!user_mode(regs))
492 return 0;
493
494 if (current->ptrace & PT_SINGLESTEP)
495 ptrace_cancel_bpt(current);
496
497 signr = get_signal_to_deliver(&info, regs, NULL);
498 if (signr > 0) {
499 handle_signal(signr, &info, oldset, regs, syscall);
500 if (current->ptrace & PT_SINGLESTEP)
501 ptrace_set_bpt(current);
502 return 1;
503 }
504
505 /*
506 * No signal to deliver to the process - restart the syscall.
507 */
508 if (syscall) {
509 if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
510 u32 *usp;
511
512 regs->ARM_sp -= 12;
513 usp = (u32 *)regs->ARM_sp;
514
515 put_user(regs->ARM_pc, &usp[0]);
516 /* swi __NR_restart_syscall */
517 put_user(0xef000000 | __NR_restart_syscall, &usp[1]);
518 /* ldr pc, [sp], #12 */
519// FIXME!!! is #12 correct there?
520 put_user(0xe49df00c, &usp[2]);
521
522 regs->ARM_pc = regs->ARM_sp + 4;
523 }
524 if (regs->ARM_r0 == -ERESTARTNOHAND ||
525 regs->ARM_r0 == -ERESTARTSYS ||
526 regs->ARM_r0 == -ERESTARTNOINTR) {
527 restart_syscall(regs);
528 }
529 }
530 if (current->ptrace & PT_SINGLESTEP)
531 ptrace_set_bpt(current);
532 return 0;
533}
534
535asmlinkage void
536do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
537{
538 if (thread_flags & _TIF_SIGPENDING)
539 do_signal(&current->blocked, regs, syscall);
540}
diff --git a/arch/arm26/kernel/sys_arm.c b/arch/arm26/kernel/sys_arm.c
new file mode 100644
index 000000000000..e7edd201579a
--- /dev/null
+++ b/arch/arm26/kernel/sys_arm.c
@@ -0,0 +1,324 @@
1/*
2 * linux/arch/arm26/kernel/sys_arm.c
3 *
4 * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
5 * Copyright (C) 1995, 1996 Russell King.
6 * Copyright (C) 2003 Ian Molton.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This file contains various random system calls that
13 * have a non-standard calling sequence on the Linux/arm
14 * platform.
15 */
16#include <linux/module.h>
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/sem.h>
22#include <linux/msg.h>
23#include <linux/shm.h>
24#include <linux/stat.h>
25#include <linux/syscalls.h>
26#include <linux/mman.h>
27#include <linux/fs.h>
28#include <linux/file.h>
29#include <linux/utsname.h>
30
31#include <asm/uaccess.h>
32#include <asm/ipc.h>
33
34extern unsigned long do_mremap(unsigned long addr, unsigned long old_len,
35 unsigned long new_len, unsigned long flags,
36 unsigned long new_addr);
37
38/*
39 * sys_pipe() is the normal C calling standard for creating
40 * a pipe. It's not the way unix traditionally does this, though.
41 */
42asmlinkage int sys_pipe(unsigned long * fildes)
43{
44 int fd[2];
45 int error;
46
47 error = do_pipe(fd);
48 if (!error) {
49 if (copy_to_user(fildes, fd, 2*sizeof(int)))
50 error = -EFAULT;
51 }
52 return error;
53}
54
55/* common code for old and new mmaps */
56inline long do_mmap2(
57 unsigned long addr, unsigned long len,
58 unsigned long prot, unsigned long flags,
59 unsigned long fd, unsigned long pgoff)
60{
61 int error = -EINVAL;
62 struct file * file = NULL;
63
64 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
65
66 /*
67 * If we are doing a fixed mapping, and address < PAGE_SIZE,
68 * then deny it.
69 */
70 if (flags & MAP_FIXED && addr < PAGE_SIZE && vectors_base() == 0)
71 goto out;
72
73 error = -EBADF;
74 if (!(flags & MAP_ANONYMOUS)) {
75 file = fget(fd);
76 if (!file)
77 goto out;
78 }
79
80 down_write(&current->mm->mmap_sem);
81 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
82 up_write(&current->mm->mmap_sem);
83
84 if (file)
85 fput(file);
86out:
87 return error;
88}
89
90struct mmap_arg_struct {
91 unsigned long addr;
92 unsigned long len;
93 unsigned long prot;
94 unsigned long flags;
95 unsigned long fd;
96 unsigned long offset;
97};
98
99asmlinkage int old_mmap(struct mmap_arg_struct *arg)
100{
101 int error = -EFAULT;
102 struct mmap_arg_struct a;
103
104 if (copy_from_user(&a, arg, sizeof(a)))
105 goto out;
106
107 error = -EINVAL;
108 if (a.offset & ~PAGE_MASK)
109 goto out;
110
111 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
112out:
113 return error;
114}
115
116asmlinkage unsigned long
117sys_arm_mremap(unsigned long addr, unsigned long old_len,
118 unsigned long new_len, unsigned long flags,
119 unsigned long new_addr)
120{
121 unsigned long ret = -EINVAL;
122
123 /*
124 * If we are doing a fixed mapping, and address < PAGE_SIZE,
125 * then deny it.
126 */
127 if (flags & MREMAP_FIXED && new_addr < PAGE_SIZE &&
128 vectors_base() == 0)
129 goto out;
130
131 down_write(&current->mm->mmap_sem);
132 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
133 up_write(&current->mm->mmap_sem);
134
135out:
136 return ret;
137}
138
139/*
140 * Perform the select(nd, in, out, ex, tv) and mmap() system
141 * calls.
142 */
143
144struct sel_arg_struct {
145 unsigned long n;
146 fd_set *inp, *outp, *exp;
147 struct timeval *tvp;
148};
149
150asmlinkage int old_select(struct sel_arg_struct *arg)
151{
152 struct sel_arg_struct a;
153
154 if (copy_from_user(&a, arg, sizeof(a)))
155 return -EFAULT;
156 /* sys_select() does the appropriate kernel locking */
157 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
158}
159
160/*
161 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
162 *
163 * This is really horribly ugly.
164 */
165asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
166{
167 int version, ret;
168
169 version = call >> 16; /* hack for backward compatibility */
170 call &= 0xffff;
171
172 switch (call) {
173 case SEMOP:
174 return sys_semop (first, (struct sembuf *)ptr, second);
175 case SEMGET:
176 return sys_semget (first, second, third);
177 case SEMCTL: {
178 union semun fourth;
179 if (!ptr)
180 return -EINVAL;
181 if (get_user(fourth.__pad, (void **) ptr))
182 return -EFAULT;
183 return sys_semctl (first, second, third, fourth);
184 }
185
186 case MSGSND:
187 return sys_msgsnd (first, (struct msgbuf *) ptr,
188 second, third);
189 case MSGRCV:
190 switch (version) {
191 case 0: {
192 struct ipc_kludge tmp;
193 if (!ptr)
194 return -EINVAL;
195 if (copy_from_user(&tmp,(struct ipc_kludge *) ptr,
196 sizeof (tmp)))
197 return -EFAULT;
198 return sys_msgrcv (first, tmp.msgp, second,
199 tmp.msgtyp, third);
200 }
201 default:
202 return sys_msgrcv (first,
203 (struct msgbuf *) ptr,
204 second, fifth, third);
205 }
206 case MSGGET:
207 return sys_msgget ((key_t) first, second);
208 case MSGCTL:
209 return sys_msgctl (first, second, (struct msqid_ds *) ptr);
210
211 case SHMAT:
212 switch (version) {
213 default: {
214 ulong raddr;
215 ret = do_shmat (first, (char *) ptr, second, &raddr);
216 if (ret)
217 return ret;
218 return put_user (raddr, (ulong *) third);
219 }
220 case 1: /* iBCS2 emulator entry point */
221 if (!segment_eq(get_fs(), get_ds()))
222 return -EINVAL;
223 return do_shmat (first, (char *) ptr,
224 second, (ulong *) third);
225 }
226 case SHMDT:
227 return sys_shmdt ((char *)ptr);
228 case SHMGET:
229 return sys_shmget (first, second, third);
230 case SHMCTL:
231 return sys_shmctl (first, second,
232 (struct shmid_ds *) ptr);
233 default:
234 return -EINVAL;
235 }
236}
237
238/* Fork a new task - this creates a new program thread.
239 * This is called indirectly via a small wrapper
240 */
241asmlinkage int sys_fork(struct pt_regs *regs)
242{
243 return do_fork(SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
244}
245
246/* Clone a task - this clones the calling program thread.
247 * This is called indirectly via a small wrapper
248 */
249asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, struct pt_regs *regs)
250{
251 /*
252 * We don't support SETTID / CLEARTID (FIXME!!! (nicked from arm32))
253 */
254 if (clone_flags & (CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID))
255 return -EINVAL;
256
257 if (!newsp)
258 newsp = regs->ARM_sp;
259
260 return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
261}
262
263asmlinkage int sys_vfork(struct pt_regs *regs)
264{
265 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
266}
267
268/* sys_execve() executes a new program.
269 * This is called indirectly via a small wrapper
270 */
271asmlinkage int sys_execve(char *filenamei, char **argv, char **envp, struct pt_regs *regs)
272{
273 int error;
274 char * filename;
275
276 filename = getname(filenamei);
277 error = PTR_ERR(filename);
278 if (IS_ERR(filename))
279 goto out;
280 error = do_execve(filename, argv, envp, regs);
281 putname(filename);
282out:
283 return error;
284}
285
286/* FIXME - see if this is correct for arm26 */
287long execve(const char *filename, char **argv, char **envp)
288{
289 struct pt_regs regs;
290 int ret;
291 memset(&regs, 0, sizeof(struct pt_regs));
292 ret = do_execve((char *)filename, (char __user * __user *)argv, (char __user * __user *)envp, &regs);
293 if (ret < 0)
294 goto out;
295
296 /*
297 * Save argc to the register structure for userspace.
298 */
299 regs.ARM_r0 = ret;
300
301 /*
302 * We were successful. We won't be returning to our caller, but
303 * instead to user space by manipulating the kernel stack.
304 */
305 asm( "add r0, %0, %1\n\t"
306 "mov r1, %2\n\t"
307 "mov r2, %3\n\t"
308 "bl memmove\n\t" /* copy regs to top of stack */
309 "mov r8, #0\n\t" /* not a syscall */
310 "mov r9, %0\n\t" /* thread structure */
311 "mov sp, r0\n\t" /* reposition stack pointer */
312 "b ret_to_user"
313 :
314 : "r" (current_thread_info()),
315 "Ir" (THREAD_SIZE - 8 - sizeof(regs)),
316 "r" (&regs),
317 "Ir" (sizeof(regs))
318 : "r0", "r1", "r2", "r3", "ip", "memory");
319
320 out:
321 return ret;
322}
323
324EXPORT_SYMBOL(execve);
diff --git a/arch/arm26/kernel/time.c b/arch/arm26/kernel/time.c
new file mode 100644
index 000000000000..549a6b2e177e
--- /dev/null
+++ b/arch/arm26/kernel/time.c
@@ -0,0 +1,234 @@
1/*
2 * linux/arch/arm26/kernel/time.c
3 *
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 * Modifications for ARM (C) 1994-2001 Russell King
6 * Mods for ARM26 (C) 2003 Ian Molton
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This file contains the ARM-specific time handling details:
13 * reading the RTC at bootup, etc...
14 *
15 * 1994-07-02 Alan Modra
16 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
17 * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
18 * "A Kernel Model for Precision Timekeeping" by Dave Mills
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/interrupt.h>
25#include <linux/time.h>
26#include <linux/init.h>
27#include <linux/smp.h>
28#include <linux/timex.h>
29#include <linux/errno.h>
30#include <linux/profile.h>
31
32#include <asm/hardware.h>
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/ioc.h>
36
37u64 jiffies_64 = INITIAL_JIFFIES;
38
39EXPORT_SYMBOL(jiffies_64);
40
41extern unsigned long wall_jiffies;
42
43/* this needs a better home */
44DEFINE_SPINLOCK(rtc_lock);
45
46/* change this if you have some constant time drift */
47#define USECS_PER_JIFFY (1000000/HZ)
48
49static int dummy_set_rtc(void)
50{
51 return 0;
52}
53
54/*
55 * hook for setting the RTC's idea of the current time.
56 */
57int (*set_rtc)(void) = dummy_set_rtc;
58
59/*
60 * Get time offset based on IOCs timer.
61 * FIXME - if this is called with interrutps off, why the shennanigans
62 * below ?
63 */
64static unsigned long gettimeoffset(void)
65{
66 unsigned int count1, count2, status;
67 long offset;
68
69 ioc_writeb (0, IOC_T0LATCH);
70 barrier ();
71 count1 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8);
72 barrier ();
73 status = ioc_readb(IOC_IRQREQA);
74 barrier ();
75 ioc_writeb (0, IOC_T0LATCH);
76 barrier ();
77 count2 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8);
78
79 offset = count2;
80 if (count2 < count1) {
81 /*
82 * We have not had an interrupt between reading count1
83 * and count2.
84 */
85 if (status & (1 << 5))
86 offset -= LATCH;
87 } else if (count2 > count1) {
88 /*
89 * We have just had another interrupt between reading
90 * count1 and count2.
91 */
92 offset -= LATCH;
93 }
94
95 offset = (LATCH - offset) * (tick_nsec / 1000);
96 return (offset + LATCH/2) / LATCH;
97}
98
99/*
100 * Scheduler clock - returns current time in nanosec units.
101 */
102unsigned long long sched_clock(void)
103{
104 return (unsigned long long)jiffies * (1000000000 / HZ);
105}
106
107static unsigned long next_rtc_update;
108
109/*
110 * If we have an externally synchronized linux clock, then update
111 * CMOS clock accordingly every ~11 minutes. set_rtc() has to be
112 * called as close as possible to 500 ms before the new second
113 * starts.
114 */
115static inline void do_set_rtc(void)
116{
117 if (time_status & STA_UNSYNC || set_rtc == NULL)
118 return;
119
120//FIXME - timespec.tv_sec is a time_t not unsigned long
121 if (next_rtc_update &&
122 time_before((unsigned long)xtime.tv_sec, next_rtc_update))
123 return;
124
125 if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) &&
126 xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1))
127 return;
128
129 if (set_rtc())
130 /*
131 * rtc update failed. Try again in 60s
132 */
133 next_rtc_update = xtime.tv_sec + 60;
134 else
135 next_rtc_update = xtime.tv_sec + 660;
136}
137
138#define do_leds()
139
140void do_gettimeofday(struct timeval *tv)
141{
142 unsigned long flags;
143 unsigned long seq;
144 unsigned long usec, sec, lost;
145
146 do {
147 seq = read_seqbegin_irqsave(&xtime_lock, flags);
148 usec = gettimeoffset();
149
150 lost = jiffies - wall_jiffies;
151 if (lost)
152 usec += lost * USECS_PER_JIFFY;
153
154 sec = xtime.tv_sec;
155 usec += xtime.tv_nsec / 1000;
156 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
157
158 /* usec may have gone up a lot: be safe */
159 while (usec >= 1000000) {
160 usec -= 1000000;
161 sec++;
162 }
163
164 tv->tv_sec = sec;
165 tv->tv_usec = usec;
166}
167
168EXPORT_SYMBOL(do_gettimeofday);
169
170int do_settimeofday(struct timespec *tv)
171{
172 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
173 return -EINVAL;
174
175 write_seqlock_irq(&xtime_lock);
176 /*
177 * This is revolting. We need to set "xtime" correctly. However, the
178 * value in this location is the value at the most recent update of
179 * wall time. Discover what correction gettimeofday() would have
180 * done, and then undo it!
181 */
182 tv->tv_nsec -= 1000 * (gettimeoffset() +
183 (jiffies - wall_jiffies) * USECS_PER_JIFFY);
184
185 while (tv->tv_nsec < 0) {
186 tv->tv_nsec += NSEC_PER_SEC;
187 tv->tv_sec--;
188 }
189
190 xtime.tv_sec = tv->tv_sec;
191 xtime.tv_nsec = tv->tv_nsec;
192 time_adjust = 0; /* stop active adjtime() */
193 time_status |= STA_UNSYNC;
194 time_maxerror = NTP_PHASE_LIMIT;
195 time_esterror = NTP_PHASE_LIMIT;
196 write_sequnlock_irq(&xtime_lock);
197 clock_was_set();
198 return 0;
199}
200
201EXPORT_SYMBOL(do_settimeofday);
202
203static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
204{
205 do_timer(regs);
206#ifndef CONFIG_SMP
207 update_process_times(user_mode(regs));
208#endif
209 do_set_rtc(); //FIME - EVERY timer IRQ?
210 profile_tick(CPU_PROFILING, regs);
211 return IRQ_HANDLED; //FIXME - is this right?
212}
213
214static struct irqaction timer_irq = {
215 .name = "timer",
216 .flags = SA_INTERRUPT,
217 .handler = timer_interrupt,
218};
219
220extern void ioctime_init(void);
221
222/*
223 * Set up timer interrupt.
224 */
225void __init time_init(void)
226{
227 ioc_writeb(LATCH & 255, IOC_T0LTCHL);
228 ioc_writeb(LATCH >> 8, IOC_T0LTCHH);
229 ioc_writeb(0, IOC_T0GO);
230
231
232 setup_irq(IRQ_TIMER, &timer_irq);
233}
234
diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c
new file mode 100644
index 000000000000..f64f59022392
--- /dev/null
+++ b/arch/arm26/kernel/traps.c
@@ -0,0 +1,548 @@
1/*
2 * linux/arch/arm26/kernel/traps.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
6 * Copyright (C) 2003 Ian Molton (ARM26)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * 'traps.c' handles hardware exceptions after we have saved some state in
13 * 'linux/arch/arm26/lib/traps.S'. Mostly a debugging aid, but will probably
14 * kill the offending process.
15 */
16
17#include <linux/module.h>
18#include <linux/config.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/spinlock.h>
25#include <linux/personality.h>
26#include <linux/ptrace.h>
27#include <linux/elf.h>
28#include <linux/interrupt.h>
29#include <linux/init.h>
30
31#include <asm/atomic.h>
32#include <asm/io.h>
33#include <asm/pgtable.h>
34#include <asm/system.h>
35#include <asm/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/semaphore.h>
38
39#include "ptrace.h"
40
41extern void c_backtrace (unsigned long fp, int pmode);
42extern void show_pte(struct mm_struct *mm, unsigned long addr);
43
44const char *processor_modes[] = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" };
45
46static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" "*bad reason*"};
47
48/*
49 * Stack pointers should always be within the kernels view of
50 * physical memory. If it is not there, then we can't dump
51 * out any information relating to the stack.
52 */
53static int verify_stack(unsigned long sp)
54{
55 if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0))
56 return -EFAULT;
57
58 return 0;
59}
60
61/*
62 * Dump out the contents of some memory nicely...
63 */
64static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
65{
66 unsigned long p = bottom & ~31;
67 mm_segment_t fs;
68 int i;
69
70 /*
71 * We need to switch to kernel mode so that we can use __get_user
72 * to safely read from kernel space. Note that we now dump the
73 * code first, just in case the backtrace kills us.
74 */
75 fs = get_fs();
76 set_fs(KERNEL_DS);
77
78 printk("%s", str);
79 printk("(0x%08lx to 0x%08lx)\n", bottom, top);
80
81 for (p = bottom & ~31; p < top;) {
82 printk("%04lx: ", p & 0xffff);
83
84 for (i = 0; i < 8; i++, p += 4) {
85 unsigned int val;
86
87 if (p < bottom || p >= top)
88 printk(" ");
89 else {
90 __get_user(val, (unsigned long *)p);
91 printk("%08x ", val);
92 }
93 }
94 printk ("\n");
95 }
96
97 set_fs(fs);
98}
99
100static void dump_instr(struct pt_regs *regs)
101{
102 unsigned long addr = instruction_pointer(regs);
103 const int width = 8;
104 mm_segment_t fs;
105 int i;
106
107 /*
108 * We need to switch to kernel mode so that we can use __get_user
109 * to safely read from kernel space. Note that we now dump the
110 * code first, just in case the backtrace kills us.
111 */
112 fs = get_fs();
113 set_fs(KERNEL_DS);
114
115 printk("Code: ");
116 for (i = -4; i < 1; i++) {
117 unsigned int val, bad;
118
119 bad = __get_user(val, &((u32 *)addr)[i]);
120
121 if (!bad)
122 printk(i == 0 ? "(%0*x) " : "%0*x ", width, val);
123 else {
124 printk("bad PC value.");
125 break;
126 }
127 }
128 printk("\n");
129
130 set_fs(fs);
131}
132
133/*static*/ void __dump_stack(struct task_struct *tsk, unsigned long sp)
134{
135 dump_mem("Stack: ", sp, 8192+(unsigned long)tsk->thread_info);
136}
137
138void dump_stack(void)
139{
140#ifdef CONFIG_DEBUG_ERRORS
141 __backtrace();
142#endif
143}
144
145EXPORT_SYMBOL(dump_stack);
146
147//FIXME - was a static fn
148void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
149{
150 unsigned int fp;
151 int ok = 1;
152
153 printk("Backtrace: ");
154 fp = regs->ARM_fp;
155 if (!fp) {
156 printk("no frame pointer");
157 ok = 0;
158 } else if (verify_stack(fp)) {
159 printk("invalid frame pointer 0x%08x", fp);
160 ok = 0;
161 } else if (fp < (unsigned long)(tsk->thread_info + 1))
162 printk("frame pointer underflow");
163 printk("\n");
164
165 if (ok)
166 c_backtrace(fp, processor_mode(regs));
167}
168
169/* FIXME - this is probably wrong.. */
170void show_stack(struct task_struct *task, unsigned long *sp) {
171 dump_mem("Stack: ", (unsigned long)sp, 8192+(unsigned long)task->thread_info);
172}
173
174DEFINE_SPINLOCK(die_lock);
175
176/*
177 * This function is protected against re-entrancy.
178 */
179NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
180{
181 struct task_struct *tsk = current;
182
183 console_verbose();
184 spin_lock_irq(&die_lock);
185
186 printk("Internal error: %s: %x\n", str, err);
187 printk("CPU: %d\n", smp_processor_id());
188 show_regs(regs);
189 printk("Process %s (pid: %d, stack limit = 0x%p)\n",
190 current->comm, current->pid, tsk->thread_info + 1);
191
192 if (!user_mode(regs) || in_interrupt()) {
193 __dump_stack(tsk, (unsigned long)(regs + 1));
194 dump_backtrace(regs, tsk);
195 dump_instr(regs);
196 }
197while(1);
198 spin_unlock_irq(&die_lock);
199 do_exit(SIGSEGV);
200}
201
202void die_if_kernel(const char *str, struct pt_regs *regs, int err)
203{
204 if (user_mode(regs))
205 return;
206
207 die(str, regs, err);
208}
209
210static DECLARE_MUTEX(undef_sem);
211static int (*undef_hook)(struct pt_regs *);
212
213int request_undef_hook(int (*fn)(struct pt_regs *))
214{
215 int ret = -EBUSY;
216
217 down(&undef_sem);
218 if (undef_hook == NULL) {
219 undef_hook = fn;
220 ret = 0;
221 }
222 up(&undef_sem);
223
224 return ret;
225}
226
227int release_undef_hook(int (*fn)(struct pt_regs *))
228{
229 int ret = -EINVAL;
230
231 down(&undef_sem);
232 if (undef_hook == fn) {
233 undef_hook = NULL;
234 ret = 0;
235 }
236 up(&undef_sem);
237
238 return ret;
239}
240
241static int undefined_extension(struct pt_regs *regs, unsigned int op)
242{
243 switch (op) {
244 case 1: /* 0xde01 / 0x?7f001f0 */
245 ptrace_break(current, regs);
246 return 0;
247 }
248 return 1;
249}
250
251asmlinkage void do_undefinstr(struct pt_regs *regs)
252{
253 siginfo_t info;
254 void *pc;
255
256 regs->ARM_pc -= 4;
257
258 pc = (unsigned long *)instruction_pointer(regs); /* strip PSR */
259
260 if (user_mode(regs)) {
261 u32 instr;
262
263 get_user(instr, (u32 *)pc);
264
265 if ((instr & 0x0fff00ff) == 0x07f000f0 &&
266 undefined_extension(regs, (instr >> 8) & 255) == 0) {
267 regs->ARM_pc += 4;
268 return;
269 }
270 } else {
271 if (undef_hook && undef_hook(regs) == 0) {
272 regs->ARM_pc += 4;
273 return;
274 }
275 }
276
277#ifdef CONFIG_DEBUG_USER
278 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
279 current->comm, current->pid, pc);
280 dump_instr(regs);
281#endif
282
283 current->thread.error_code = 0;
284 current->thread.trap_no = 6;
285
286 info.si_signo = SIGILL;
287 info.si_errno = 0;
288 info.si_code = ILL_ILLOPC;
289 info.si_addr = pc;
290
291 force_sig_info(SIGILL, &info, current);
292
293 die_if_kernel("Oops - undefined instruction", regs, 0);
294}
295
296asmlinkage void do_excpt(unsigned long address, struct pt_regs *regs, int mode)
297{
298 siginfo_t info;
299
300#ifdef CONFIG_DEBUG_USER
301 printk(KERN_INFO "%s (%d): address exception: pc=%08lx\n",
302 current->comm, current->pid, instruction_pointer(regs));
303 dump_instr(regs);
304#endif
305
306 current->thread.error_code = 0;
307 current->thread.trap_no = 11;
308
309 info.si_signo = SIGBUS;
310 info.si_errno = 0;
311 info.si_code = BUS_ADRERR;
312 info.si_addr = (void *)address;
313
314 force_sig_info(SIGBUS, &info, current);
315
316 die_if_kernel("Oops - address exception", regs, mode);
317}
318
319asmlinkage void do_unexp_fiq (struct pt_regs *regs)
320{
321#ifndef CONFIG_IGNORE_FIQ
322 printk("Hmm. Unexpected FIQ received, but trying to continue\n");
323 printk("You may have a hardware problem...\n");
324#endif
325}
326
327/*
328 * bad_mode handles the impossible case in the vectors. If you see one of
329 * these, then it's extremely serious, and could mean you have buggy hardware.
330 * It never returns, and never tries to sync. We hope that we can at least
331 * dump out some state information...
332 */
333asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
334{
335 unsigned int vectors = vectors_base();
336
337 console_verbose();
338
339 printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n",
340 handler[reason<5?reason:4], processor_modes[proc_mode]);
341
342 /*
343 * Dump out the vectors and stub routines. Maybe a better solution
344 * would be to dump them out only if we detect that they are corrupted.
345 */
346 dump_mem(KERN_CRIT "Vectors: ", vectors, vectors + 0x40);
347 dump_mem(KERN_CRIT "Stubs: ", vectors + 0x200, vectors + 0x4b8);
348
349 die("Oops", regs, 0);
350 local_irq_disable();
351 panic("bad mode");
352}
353
354static int bad_syscall(int n, struct pt_regs *regs)
355{
356 struct thread_info *thread = current_thread_info();
357 siginfo_t info;
358
359 if (current->personality != PER_LINUX && thread->exec_domain->handler) {
360 thread->exec_domain->handler(n, regs);
361 return regs->ARM_r0;
362 }
363
364#ifdef CONFIG_DEBUG_USER
365 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
366 current->pid, current->comm, n);
367 dump_instr(regs);
368#endif
369
370 info.si_signo = SIGILL;
371 info.si_errno = 0;
372 info.si_code = ILL_ILLTRP;
373 info.si_addr = (void *)instruction_pointer(regs) - 4;
374
375 force_sig_info(SIGILL, &info, current);
376 die_if_kernel("Oops", regs, n);
377 return regs->ARM_r0;
378}
379
380static inline void
381do_cache_op(unsigned long start, unsigned long end, int flags)
382{
383 struct vm_area_struct *vma;
384
385 if (end < start)
386 return;
387
388 vma = find_vma(current->active_mm, start);
389 if (vma && vma->vm_start < end) {
390 if (start < vma->vm_start)
391 start = vma->vm_start;
392 if (end > vma->vm_end)
393 end = vma->vm_end;
394 }
395}
396
397/*
398 * Handle all unrecognised system calls.
399 * 0x9f0000 - 0x9fffff are some more esoteric system calls
400 */
401#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
402asmlinkage int arm_syscall(int no, struct pt_regs *regs)
403{
404 siginfo_t info;
405
406 if ((no >> 16) != 0x9f)
407 return bad_syscall(no, regs);
408
409 switch (no & 0xffff) {
410 case 0: /* branch through 0 */
411 info.si_signo = SIGSEGV;
412 info.si_errno = 0;
413 info.si_code = SEGV_MAPERR;
414 info.si_addr = NULL;
415
416 force_sig_info(SIGSEGV, &info, current);
417
418 die_if_kernel("branch through zero", regs, 0);
419 return 0;
420
421 case NR(breakpoint): /* SWI BREAK_POINT */
422 ptrace_break(current, regs);
423 return regs->ARM_r0;
424
425 case NR(cacheflush):
426 return 0;
427
428 case NR(usr26):
429 break;
430
431 default:
432 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
433 if not implemented, rather than raising SIGILL. This
434 way the calling program can gracefully determine whether
435 a feature is supported. */
436 if (no <= 0x7ff)
437 return -ENOSYS;
438 break;
439 }
440#ifdef CONFIG_DEBUG_USER
441 /*
442 * experience shows that these seem to indicate that
443 * something catastrophic has happened
444 */
445 printk("[%d] %s: arm syscall %d\n", current->pid, current->comm, no);
446 dump_instr(regs);
447 if (user_mode(regs)) {
448 show_regs(regs);
449 c_backtrace(regs->ARM_fp, processor_mode(regs));
450 }
451#endif
452 info.si_signo = SIGILL;
453 info.si_errno = 0;
454 info.si_code = ILL_ILLTRP;
455 info.si_addr = (void *)instruction_pointer(regs) - 4;
456
457 force_sig_info(SIGILL, &info, current);
458 die_if_kernel("Oops", regs, no);
459 return 0;
460}
461
462void __bad_xchg(volatile void *ptr, int size)
463{
464 printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
465 __builtin_return_address(0), ptr, size);
466 BUG();
467}
468
469/*
470 * A data abort trap was taken, but we did not handle the instruction.
471 * Try to abort the user program, or panic if it was the kernel.
472 */
473asmlinkage void
474baddataabort(int code, unsigned long instr, struct pt_regs *regs)
475{
476 unsigned long addr = instruction_pointer(regs);
477 siginfo_t info;
478
479#ifdef CONFIG_DEBUG_USER
480 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
481 current->pid, current->comm, code, instr);
482 dump_instr(regs);
483 show_pte(current->mm, addr);
484#endif
485
486 info.si_signo = SIGILL;
487 info.si_errno = 0;
488 info.si_code = ILL_ILLOPC;
489 info.si_addr = (void *)addr;
490
491 force_sig_info(SIGILL, &info, current);
492 die_if_kernel("unknown data abort code", regs, instr);
493}
494
495volatile void __bug(const char *file, int line, void *data)
496{
497 printk(KERN_CRIT"kernel BUG at %s:%d!", file, line);
498 if (data)
499 printk(KERN_CRIT" - extra data = %p", data);
500 printk("\n");
501 *(int *)0 = 0;
502}
503
504void __readwrite_bug(const char *fn)
505{
506 printk("%s called, but not implemented", fn);
507 BUG();
508}
509
510void __pte_error(const char *file, int line, unsigned long val)
511{
512 printk("%s:%d: bad pte %08lx.\n", file, line, val);
513}
514
515void __pmd_error(const char *file, int line, unsigned long val)
516{
517 printk("%s:%d: bad pmd %08lx.\n", file, line, val);
518}
519
520void __pgd_error(const char *file, int line, unsigned long val)
521{
522 printk("%s:%d: bad pgd %08lx.\n", file, line, val);
523}
524
525asmlinkage void __div0(void)
526{
527 printk("Division by zero in kernel.\n");
528 dump_stack();
529}
530
531void abort(void)
532{
533 BUG();
534
535 /* if that doesn't kill us, halt */
536 panic("Oops failed to kill thread");
537}
538
539void __init trap_init(void)
540{
541 extern void __trap_init(unsigned long);
542 unsigned long base = vectors_base();
543
544 __trap_init(base);
545 if (base != 0)
546 printk(KERN_DEBUG "Relocating machine vectors to 0x%08lx\n",
547 base);
548}
diff --git a/arch/arm26/kernel/vmlinux-arm26-xip.lds.in b/arch/arm26/kernel/vmlinux-arm26-xip.lds.in
new file mode 100644
index 000000000000..ca61ec8218fe
--- /dev/null
+++ b/arch/arm26/kernel/vmlinux-arm26-xip.lds.in
@@ -0,0 +1,134 @@
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 * borrowed from Russels ARM port by Ian Molton
5 */
6
7#include <asm-generic/vmlinux.lds.h>
8
9OUTPUT_ARCH(arm)
10ENTRY(stext)
11jiffies = jiffies_64;
12SECTIONS
13{
14 . = TEXTADDR;
15 .init : { /* Init code and data */
16 _stext = .;
17 __init_begin = .;
18 _sinittext = .;
19 *(.init.text)
20 _einittext = .;
21 __proc_info_begin = .;
22 *(.proc.info)
23 __proc_info_end = .;
24 __arch_info_begin = .;
25 *(.arch.info)
26 __arch_info_end = .;
27 __tagtable_begin = .;
28 *(.taglist)
29 __tagtable_end = .;
30 . = ALIGN(16);
31 __setup_start = .;
32 *(.init.setup)
33 __setup_end = .;
34 __early_begin = .;
35 *(__early_param)
36 __early_end = .;
37 __initcall_start = .;
38 *(.initcall1.init)
39 *(.initcall2.init)
40 *(.initcall3.init)
41 *(.initcall4.init)
42 *(.initcall5.init)
43 *(.initcall6.init)
44 *(.initcall7.init)
45 __initcall_end = .;
46 __con_initcall_start = .;
47 *(.con_initcall.init)
48 __con_initcall_end = .;
49 . = ALIGN(32);
50 __initramfs_start = .;
51 usr/built-in.o(.init.ramfs)
52 __initramfs_end = .;
53 . = ALIGN(32768);
54 __init_end = .;
55 }
56
57 /DISCARD/ : { /* Exit code and data */
58 *(.exit.text)
59 *(.exit.data)
60 *(.exitcall.exit)
61 }
62
63 .text : { /* Real text segment */
64 _text = .; /* Text and read-only data */
65 *(.text)
66 SCHED_TEXT
67 LOCK_TEXT /* FIXME - borrowed from arm32 - check*/
68 *(.fixup)
69 *(.gnu.warning)
70 *(.rodata)
71 *(.rodata.*)
72 *(.glue_7)
73 *(.glue_7t)
74 *(.got) /* Global offset table */
75
76 _etext = .; /* End of text section */
77 }
78
79 . = ALIGN(16);
80 __ex_table : { /* Exception table */
81 __start___ex_table = .;
82 *(__ex_table)
83 __stop___ex_table = .;
84 }
85
86 RODATA
87
88 _endtext = .;
89
90 . = DATAADDR;
91
92 _sdata = .;
93
94 .data : {
95 . = ALIGN(8192);
96 /*
97 * first, the init thread union, aligned
98 * to an 8192 byte boundary. (see arm26/kernel/init_task.c)
99 * FIXME - sould this be 32K aligned on arm26?
100 */
101 *(.init.task)
102
103 /*
104 * The cacheline aligned data
105 */
106 . = ALIGN(32);
107 *(.data.cacheline_aligned)
108
109 /*
110 * and the usual data section
111 */
112 *(.data)
113 CONSTRUCTORS
114
115 *(.init.data)
116
117 _edata = .;
118 }
119
120 .bss : {
121 __bss_start = .; /* BSS */
122 *(.bss)
123 *(COMMON)
124 _end = . ;
125 }
126 /* Stabs debugging sections. */
127 .stab 0 : { *(.stab) }
128 .stabstr 0 : { *(.stabstr) }
129 .stab.excl 0 : { *(.stab.excl) }
130 .stab.exclstr 0 : { *(.stab.exclstr) }
131 .stab.index 0 : { *(.stab.index) }
132 .stab.indexstr 0 : { *(.stab.indexstr) }
133 .comment 0 : { *(.comment) }
134}
diff --git a/arch/arm26/kernel/vmlinux-arm26.lds.in b/arch/arm26/kernel/vmlinux-arm26.lds.in
new file mode 100644
index 000000000000..d1d3418d7eb6
--- /dev/null
+++ b/arch/arm26/kernel/vmlinux-arm26.lds.in
@@ -0,0 +1,127 @@
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 * borrowed from Russels ARM port by Ian Molton and subsequently modified.
5 */
6
7#include <asm-generic/vmlinux.lds.h>
8
9OUTPUT_ARCH(arm)
10ENTRY(stext)
11jiffies = jiffies_64;
12SECTIONS
13{
14 . = TEXTADDR;
15 .init : { /* Init code and data */
16 _stext = .;
17 __init_begin = .;
18 _sinittext = .;
19 *(.init.text)
20 _einittext = .;
21 __proc_info_begin = .;
22 *(.proc.info)
23 __proc_info_end = .;
24 __arch_info_begin = .;
25 *(.arch.info)
26 __arch_info_end = .;
27 __tagtable_begin = .;
28 *(.taglist)
29 __tagtable_end = .;
30 *(.init.data)
31 . = ALIGN(16);
32 __setup_start = .;
33 *(.init.setup)
34 __setup_end = .;
35 __early_begin = .;
36 *(__early_param)
37 __early_end = .;
38 __initcall_start = .;
39 *(.initcall1.init)
40 *(.initcall2.init)
41 *(.initcall3.init)
42 *(.initcall4.init)
43 *(.initcall5.init)
44 *(.initcall6.init)
45 *(.initcall7.init)
46 __initcall_end = .;
47 __con_initcall_start = .;
48 *(.con_initcall.init)
49 __con_initcall_end = .;
50 . = ALIGN(32);
51 __initramfs_start = .;
52 usr/built-in.o(.init.ramfs)
53 __initramfs_end = .;
54 . = ALIGN(32768);
55 __init_end = .;
56 }
57
58 /DISCARD/ : { /* Exit code and data */
59 *(.exit.text)
60 *(.exit.data)
61 *(.exitcall.exit)
62 }
63
64 .text : { /* Real text segment */
65 _text = .; /* Text and read-only data */
66 *(.text)
67 SCHED_TEXT
68 LOCK_TEXT
69 *(.fixup)
70 *(.gnu.warning)
71 *(.rodata)
72 *(.rodata.*)
73 *(.glue_7)
74 *(.glue_7t)
75 *(.got) /* Global offset table */
76
77 _etext = .; /* End of text section */
78 }
79
80 . = ALIGN(16);
81 __ex_table : { /* Exception table */
82 __start___ex_table = .;
83 *(__ex_table)
84 __stop___ex_table = .;
85 }
86
87 RODATA
88
89 . = ALIGN(8192);
90
91 .data : {
92 /*
93 * first, the init task union, aligned
94 * to an 8192 byte boundary. (see arm26/kernel/init_task.c)
95 */
96 *(.init.task)
97
98 /*
99 * The cacheline aligned data
100 */
101 . = ALIGN(32);
102 *(.data.cacheline_aligned)
103
104 /*
105 * and the usual data section
106 */
107 *(.data)
108 CONSTRUCTORS
109
110 _edata = .;
111 }
112
113 .bss : {
114 __bss_start = .; /* BSS */
115 *(.bss)
116 *(COMMON)
117 _end = . ;
118 }
119 /* Stabs debugging sections. */
120 .stab 0 : { *(.stab) }
121 .stabstr 0 : { *(.stabstr) }
122 .stab.excl 0 : { *(.stab.excl) }
123 .stab.exclstr 0 : { *(.stab.exclstr) }
124 .stab.index 0 : { *(.stab.index) }
125 .stab.indexstr 0 : { *(.stab.indexstr) }
126 .comment 0 : { *(.comment) }
127}
diff --git a/arch/arm26/kernel/vmlinux.lds.S b/arch/arm26/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..811a69048010
--- /dev/null
+++ b/arch/arm26/kernel/vmlinux.lds.S
@@ -0,0 +1,12 @@
1#include <linux/config.h>
2
3#ifdef CONFIG_XIP_KERNEL
4
5#include "vmlinux-arm26-xip.lds.in"
6
7#else
8
9#include "vmlinux-arm26.lds.in"
10
11#endif
12