aboutsummaryrefslogtreecommitdiffstats
path: root/arch/avr32/kernel
diff options
context:
space:
mode:
authorHaavard Skinnemoen <hskinnemoen@atmel.com>2006-09-26 02:32:13 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:54 -0400
commit5f97f7f9400de47ae837170bb274e90ad3934386 (patch)
tree514451e6dc6b46253293a00035d375e77b1c65ed /arch/avr32/kernel
parent53e62d3aaa60590d4a69b4e07c29f448b5151047 (diff)
[PATCH] avr32 architecture
This adds support for the Atmel AVR32 architecture as well as the AT32AP7000 CPU and the AT32STK1000 development board. AVR32 is a new high-performance 32-bit RISC microprocessor core, designed for cost-sensitive embedded applications, with particular emphasis on low power consumption and high code density. The AVR32 architecture is not binary compatible with earlier 8-bit AVR architectures. The AVR32 architecture, including the instruction set, is described by the AVR32 Architecture Manual, available from http://www.atmel.com/dyn/resources/prod_documents/doc32000.pdf The Atmel AT32AP7000 is the first CPU implementing the AVR32 architecture. It features a 7-stage pipeline, 16KB instruction and data caches and a full Memory Management Unit. It also comes with a large set of integrated peripherals, many of which are shared with the AT91 ARM-based controllers from Atmel. Full data sheet is available from http://www.atmel.com/dyn/resources/prod_documents/doc32003.pdf while the CPU core implementation including caches and MMU is documented by the AVR32 AP Technical Reference, available from http://www.atmel.com/dyn/resources/prod_documents/doc32001.pdf Information about the AT32STK1000 development board can be found at http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3918 including a BSP CD image with an earlier version of this patch, development tools (binaries and source/patches) and a root filesystem image suitable for booting from SD card. Alternatively, there's a preliminary "getting started" guide available at http://avr32linux.org/twiki/bin/view/Main/GettingStarted which provides links to the sources and patches you will need in order to set up a cross-compiling environment for avr32-linux. This patch, as well as the other patches included with the BSP and the toolchain patches, is actively supported by Atmel Corporation. [dmccr@us.ibm.com: Fix more pxx_page macro locations] [bunk@stusta.de: fix `make defconfig'] Signed-off-by: Haavard Skinnemoen <hskinnemoen@atmel.com> Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Dave McCracken <dmccr@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/avr32/kernel')
-rw-r--r--arch/avr32/kernel/Makefile18
-rw-r--r--arch/avr32/kernel/asm-offsets.c25
-rw-r--r--arch/avr32/kernel/avr32_ksyms.c55
-rw-r--r--arch/avr32/kernel/cpu.c327
-rw-r--r--arch/avr32/kernel/entry-avr32b.S678
-rw-r--r--arch/avr32/kernel/head.S45
-rw-r--r--arch/avr32/kernel/init_task.c38
-rw-r--r--arch/avr32/kernel/irq.c71
-rw-r--r--arch/avr32/kernel/kprobes.c270
-rw-r--r--arch/avr32/kernel/module.c324
-rw-r--r--arch/avr32/kernel/process.c276
-rw-r--r--arch/avr32/kernel/ptrace.c371
-rw-r--r--arch/avr32/kernel/semaphore.c148
-rw-r--r--arch/avr32/kernel/setup.c335
-rw-r--r--arch/avr32/kernel/signal.c328
-rw-r--r--arch/avr32/kernel/switch_to.S35
-rw-r--r--arch/avr32/kernel/sys_avr32.c51
-rw-r--r--arch/avr32/kernel/syscall-stubs.S102
-rw-r--r--arch/avr32/kernel/syscall_table.S289
-rw-r--r--arch/avr32/kernel/time.c238
-rw-r--r--arch/avr32/kernel/traps.c425
-rw-r--r--arch/avr32/kernel/vmlinux.lds.c139
22 files changed, 4588 insertions, 0 deletions
diff --git a/arch/avr32/kernel/Makefile b/arch/avr32/kernel/Makefile
new file mode 100644
index 000000000000..90e5afff54a2
--- /dev/null
+++ b/arch/avr32/kernel/Makefile
@@ -0,0 +1,18 @@
1#
2# Makefile for the Linux/AVR32 kernel.
3#
4
5extra-y := head.o vmlinux.lds
6
7obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
8obj-y += syscall_table.o syscall-stubs.o irq.o
9obj-y += setup.o traps.o semaphore.o ptrace.o
10obj-y += signal.o sys_avr32.o process.o time.o
11obj-y += init_task.o switch_to.o cpu.o
12obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
13obj-$(CONFIG_KPROBES) += kprobes.o
14
15USE_STANDARD_AS_RULE := true
16
17%.lds: %.lds.c FORCE
18 $(call if_changed_dep,cpp_lds_S)
diff --git a/arch/avr32/kernel/asm-offsets.c b/arch/avr32/kernel/asm-offsets.c
new file mode 100644
index 000000000000..97d865865667
--- /dev/null
+++ b/arch/avr32/kernel/asm-offsets.c
@@ -0,0 +1,25 @@
1/*
2 * Generate definitions needed by assembly language modules.
3 * This code generates raw asm output which is post-processed
4 * to extract and format the required data.
5 */
6
7#include <linux/thread_info.h>
8
9#define DEFINE(sym, val) \
10 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
11
12#define BLANK() asm volatile("\n->" : : )
13
14#define OFFSET(sym, str, mem) \
15 DEFINE(sym, offsetof(struct str, mem));
16
17void foo(void)
18{
19 OFFSET(TI_task, thread_info, task);
20 OFFSET(TI_exec_domain, thread_info, exec_domain);
21 OFFSET(TI_flags, thread_info, flags);
22 OFFSET(TI_cpu, thread_info, cpu);
23 OFFSET(TI_preempt_count, thread_info, preempt_count);
24 OFFSET(TI_restart_block, thread_info, restart_block);
25}
diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c
new file mode 100644
index 000000000000..04f767a272b7
--- /dev/null
+++ b/arch/avr32/kernel/avr32_ksyms.c
@@ -0,0 +1,55 @@
1/*
2 * Export AVR32-specific functions for loadable modules.
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11
12#include <asm/checksum.h>
13#include <asm/uaccess.h>
14#include <asm/delay.h>
15
16/*
17 * GCC functions
18 */
19extern unsigned long long __avr32_lsl64(unsigned long long u, unsigned long b);
20extern unsigned long long __avr32_lsr64(unsigned long long u, unsigned long b);
21extern unsigned long long __avr32_asr64(unsigned long long u, unsigned long b);
22EXPORT_SYMBOL(__avr32_lsl64);
23EXPORT_SYMBOL(__avr32_lsr64);
24EXPORT_SYMBOL(__avr32_asr64);
25
26/*
27 * String functions
28 */
29EXPORT_SYMBOL(memset);
30EXPORT_SYMBOL(memcpy);
31
32/*
33 * Userspace access stuff.
34 */
35EXPORT_SYMBOL(copy_from_user);
36EXPORT_SYMBOL(copy_to_user);
37EXPORT_SYMBOL(__copy_user);
38EXPORT_SYMBOL(strncpy_from_user);
39EXPORT_SYMBOL(__strncpy_from_user);
40EXPORT_SYMBOL(clear_user);
41EXPORT_SYMBOL(__clear_user);
42EXPORT_SYMBOL(csum_partial);
43EXPORT_SYMBOL(csum_partial_copy_generic);
44
45/* Delay loops (lib/delay.S) */
46EXPORT_SYMBOL(__ndelay);
47EXPORT_SYMBOL(__udelay);
48EXPORT_SYMBOL(__const_udelay);
49
50/* Bit operations (lib/findbit.S) */
51EXPORT_SYMBOL(find_first_zero_bit);
52EXPORT_SYMBOL(find_next_zero_bit);
53EXPORT_SYMBOL(find_first_bit);
54EXPORT_SYMBOL(find_next_bit);
55EXPORT_SYMBOL(generic_find_next_zero_le_bit);
diff --git a/arch/avr32/kernel/cpu.c b/arch/avr32/kernel/cpu.c
new file mode 100644
index 000000000000..342452ba2049
--- /dev/null
+++ b/arch/avr32/kernel/cpu.c
@@ -0,0 +1,327 @@
1/*
2 * Copyright (C) 2005-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/init.h>
9#include <linux/sysdev.h>
10#include <linux/seq_file.h>
11#include <linux/cpu.h>
12#include <linux/percpu.h>
13#include <linux/param.h>
14#include <linux/errno.h>
15
16#include <asm/setup.h>
17#include <asm/sysreg.h>
18
19static DEFINE_PER_CPU(struct cpu, cpu_devices);
20
21#ifdef CONFIG_PERFORMANCE_COUNTERS
22
23/*
24 * XXX: If/when a SMP-capable implementation of AVR32 will ever be
25 * made, we must make sure that the code executes on the correct CPU.
26 */
27static ssize_t show_pc0event(struct sys_device *dev, char *buf)
28{
29 unsigned long pccr;
30
31 pccr = sysreg_read(PCCR);
32 return sprintf(buf, "0x%lx\n", (pccr >> 12) & 0x3f);
33}
34static ssize_t store_pc0event(struct sys_device *dev, const char *buf,
35 size_t count)
36{
37 unsigned long val;
38 char *endp;
39
40 val = simple_strtoul(buf, &endp, 0);
41 if (endp == buf || val > 0x3f)
42 return -EINVAL;
43 val = (val << 12) | (sysreg_read(PCCR) & 0xfffc0fff);
44 sysreg_write(PCCR, val);
45 return count;
46}
47static ssize_t show_pc0count(struct sys_device *dev, char *buf)
48{
49 unsigned long pcnt0;
50
51 pcnt0 = sysreg_read(PCNT0);
52 return sprintf(buf, "%lu\n", pcnt0);
53}
54static ssize_t store_pc0count(struct sys_device *dev, const char *buf,
55 size_t count)
56{
57 unsigned long val;
58 char *endp;
59
60 val = simple_strtoul(buf, &endp, 0);
61 if (endp == buf)
62 return -EINVAL;
63 sysreg_write(PCNT0, val);
64
65 return count;
66}
67
68static ssize_t show_pc1event(struct sys_device *dev, char *buf)
69{
70 unsigned long pccr;
71
72 pccr = sysreg_read(PCCR);
73 return sprintf(buf, "0x%lx\n", (pccr >> 18) & 0x3f);
74}
75static ssize_t store_pc1event(struct sys_device *dev, const char *buf,
76 size_t count)
77{
78 unsigned long val;
79 char *endp;
80
81 val = simple_strtoul(buf, &endp, 0);
82 if (endp == buf || val > 0x3f)
83 return -EINVAL;
84 val = (val << 18) | (sysreg_read(PCCR) & 0xff03ffff);
85 sysreg_write(PCCR, val);
86 return count;
87}
88static ssize_t show_pc1count(struct sys_device *dev, char *buf)
89{
90 unsigned long pcnt1;
91
92 pcnt1 = sysreg_read(PCNT1);
93 return sprintf(buf, "%lu\n", pcnt1);
94}
95static ssize_t store_pc1count(struct sys_device *dev, const char *buf,
96 size_t count)
97{
98 unsigned long val;
99 char *endp;
100
101 val = simple_strtoul(buf, &endp, 0);
102 if (endp == buf)
103 return -EINVAL;
104 sysreg_write(PCNT1, val);
105
106 return count;
107}
108
109static ssize_t show_pccycles(struct sys_device *dev, char *buf)
110{
111 unsigned long pccnt;
112
113 pccnt = sysreg_read(PCCNT);
114 return sprintf(buf, "%lu\n", pccnt);
115}
116static ssize_t store_pccycles(struct sys_device *dev, const char *buf,
117 size_t count)
118{
119 unsigned long val;
120 char *endp;
121
122 val = simple_strtoul(buf, &endp, 0);
123 if (endp == buf)
124 return -EINVAL;
125 sysreg_write(PCCNT, val);
126
127 return count;
128}
129
130static ssize_t show_pcenable(struct sys_device *dev, char *buf)
131{
132 unsigned long pccr;
133
134 pccr = sysreg_read(PCCR);
135 return sprintf(buf, "%c\n", (pccr & 1)?'1':'0');
136}
137static ssize_t store_pcenable(struct sys_device *dev, const char *buf,
138 size_t count)
139{
140 unsigned long pccr, val;
141 char *endp;
142
143 val = simple_strtoul(buf, &endp, 0);
144 if (endp == buf)
145 return -EINVAL;
146 if (val)
147 val = 1;
148
149 pccr = sysreg_read(PCCR);
150 pccr = (pccr & ~1UL) | val;
151 sysreg_write(PCCR, pccr);
152
153 return count;
154}
155
156static SYSDEV_ATTR(pc0event, 0600, show_pc0event, store_pc0event);
157static SYSDEV_ATTR(pc0count, 0600, show_pc0count, store_pc0count);
158static SYSDEV_ATTR(pc1event, 0600, show_pc1event, store_pc1event);
159static SYSDEV_ATTR(pc1count, 0600, show_pc1count, store_pc1count);
160static SYSDEV_ATTR(pccycles, 0600, show_pccycles, store_pccycles);
161static SYSDEV_ATTR(pcenable, 0600, show_pcenable, store_pcenable);
162
163#endif /* CONFIG_PERFORMANCE_COUNTERS */
164
165static int __init topology_init(void)
166{
167 int cpu;
168
169 for_each_possible_cpu(cpu) {
170 struct cpu *c = &per_cpu(cpu_devices, cpu);
171
172 register_cpu(c, cpu);
173
174#ifdef CONFIG_PERFORMANCE_COUNTERS
175 sysdev_create_file(&c->sysdev, &attr_pc0event);
176 sysdev_create_file(&c->sysdev, &attr_pc0count);
177 sysdev_create_file(&c->sysdev, &attr_pc1event);
178 sysdev_create_file(&c->sysdev, &attr_pc1count);
179 sysdev_create_file(&c->sysdev, &attr_pccycles);
180 sysdev_create_file(&c->sysdev, &attr_pcenable);
181#endif
182 }
183
184 return 0;
185}
186
187subsys_initcall(topology_init);
188
189static const char *cpu_names[] = {
190 "Morgan",
191 "AP7000",
192};
193#define NR_CPU_NAMES ARRAY_SIZE(cpu_names)
194
195static const char *arch_names[] = {
196 "AVR32A",
197 "AVR32B",
198};
199#define NR_ARCH_NAMES ARRAY_SIZE(arch_names)
200
201static const char *mmu_types[] = {
202 "No MMU",
203 "ITLB and DTLB",
204 "Shared TLB",
205 "MPU"
206};
207
208void __init setup_processor(void)
209{
210 unsigned long config0, config1;
211 unsigned cpu_id, cpu_rev, arch_id, arch_rev, mmu_type;
212 unsigned tmp;
213
214 config0 = sysreg_read(CONFIG0); /* 0x0000013e; */
215 config1 = sysreg_read(CONFIG1); /* 0x01f689a2; */
216 cpu_id = config0 >> 24;
217 cpu_rev = (config0 >> 16) & 0xff;
218 arch_id = (config0 >> 13) & 0x07;
219 arch_rev = (config0 >> 10) & 0x07;
220 mmu_type = (config0 >> 7) & 0x03;
221
222 boot_cpu_data.arch_type = arch_id;
223 boot_cpu_data.cpu_type = cpu_id;
224 boot_cpu_data.arch_revision = arch_rev;
225 boot_cpu_data.cpu_revision = cpu_rev;
226 boot_cpu_data.tlb_config = mmu_type;
227
228 tmp = (config1 >> 13) & 0x07;
229 if (tmp) {
230 boot_cpu_data.icache.ways = 1 << ((config1 >> 10) & 0x07);
231 boot_cpu_data.icache.sets = 1 << ((config1 >> 16) & 0x0f);
232 boot_cpu_data.icache.linesz = 1 << (tmp + 1);
233 }
234 tmp = (config1 >> 3) & 0x07;
235 if (tmp) {
236 boot_cpu_data.dcache.ways = 1 << (config1 & 0x07);
237 boot_cpu_data.dcache.sets = 1 << ((config1 >> 6) & 0x0f);
238 boot_cpu_data.dcache.linesz = 1 << (tmp + 1);
239 }
240
241 if ((cpu_id >= NR_CPU_NAMES) || (arch_id >= NR_ARCH_NAMES)) {
242 printk ("Unknown CPU configuration (ID %02x, arch %02x), "
243 "continuing anyway...\n",
244 cpu_id, arch_id);
245 return;
246 }
247
248 printk ("CPU: %s [%02x] revision %d (%s revision %d)\n",
249 cpu_names[cpu_id], cpu_id, cpu_rev,
250 arch_names[arch_id], arch_rev);
251 printk ("CPU: MMU configuration: %s\n", mmu_types[mmu_type]);
252 printk ("CPU: features:");
253 if (config0 & (1 << 6))
254 printk(" fpu");
255 if (config0 & (1 << 5))
256 printk(" java");
257 if (config0 & (1 << 4))
258 printk(" perfctr");
259 if (config0 & (1 << 3))
260 printk(" ocd");
261 printk("\n");
262}
263
264#ifdef CONFIG_PROC_FS
265static int c_show(struct seq_file *m, void *v)
266{
267 unsigned int icache_size, dcache_size;
268 unsigned int cpu = smp_processor_id();
269
270 icache_size = boot_cpu_data.icache.ways *
271 boot_cpu_data.icache.sets *
272 boot_cpu_data.icache.linesz;
273 dcache_size = boot_cpu_data.dcache.ways *
274 boot_cpu_data.dcache.sets *
275 boot_cpu_data.dcache.linesz;
276
277 seq_printf(m, "processor\t: %d\n", cpu);
278
279 if (boot_cpu_data.arch_type < NR_ARCH_NAMES)
280 seq_printf(m, "cpu family\t: %s revision %d\n",
281 arch_names[boot_cpu_data.arch_type],
282 boot_cpu_data.arch_revision);
283 if (boot_cpu_data.cpu_type < NR_CPU_NAMES)
284 seq_printf(m, "cpu type\t: %s revision %d\n",
285 cpu_names[boot_cpu_data.cpu_type],
286 boot_cpu_data.cpu_revision);
287
288 seq_printf(m, "i-cache\t\t: %dK (%u ways x %u sets x %u)\n",
289 icache_size >> 10,
290 boot_cpu_data.icache.ways,
291 boot_cpu_data.icache.sets,
292 boot_cpu_data.icache.linesz);
293 seq_printf(m, "d-cache\t\t: %dK (%u ways x %u sets x %u)\n",
294 dcache_size >> 10,
295 boot_cpu_data.dcache.ways,
296 boot_cpu_data.dcache.sets,
297 boot_cpu_data.dcache.linesz);
298 seq_printf(m, "bogomips\t: %lu.%02lu\n",
299 boot_cpu_data.loops_per_jiffy / (500000/HZ),
300 (boot_cpu_data.loops_per_jiffy / (5000/HZ)) % 100);
301
302 return 0;
303}
304
305static void *c_start(struct seq_file *m, loff_t *pos)
306{
307 return *pos < 1 ? (void *)1 : NULL;
308}
309
310static void *c_next(struct seq_file *m, void *v, loff_t *pos)
311{
312 ++*pos;
313 return NULL;
314}
315
316static void c_stop(struct seq_file *m, void *v)
317{
318
319}
320
321struct seq_operations cpuinfo_op = {
322 .start = c_start,
323 .next = c_next,
324 .stop = c_stop,
325 .show = c_show
326};
327#endif /* CONFIG_PROC_FS */
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
new file mode 100644
index 000000000000..eeb66792bc37
--- /dev/null
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -0,0 +1,678 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * This file contains the low-level entry-points into the kernel, that is,
11 * exception handlers, debug trap handlers, interrupt handlers and the
12 * system call handler.
13 */
14#include <linux/errno.h>
15
16#include <asm/asm.h>
17#include <asm/hardirq.h>
18#include <asm/irq.h>
19#include <asm/ocd.h>
20#include <asm/page.h>
21#include <asm/pgtable.h>
22#include <asm/ptrace.h>
23#include <asm/sysreg.h>
24#include <asm/thread_info.h>
25#include <asm/unistd.h>
26
27#ifdef CONFIG_PREEMPT
28# define preempt_stop mask_interrupts
29#else
30# define preempt_stop
31# define fault_resume_kernel fault_restore_all
32#endif
33
34#define __MASK(x) ((1 << (x)) - 1)
35#define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
36 (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
37
38 .section .ex.text,"ax",@progbits
39 .align 2
40exception_vectors:
41 bral handle_critical
42 .align 2
43 bral handle_critical
44 .align 2
45 bral do_bus_error_write
46 .align 2
47 bral do_bus_error_read
48 .align 2
49 bral do_nmi_ll
50 .align 2
51 bral handle_address_fault
52 .align 2
53 bral handle_protection_fault
54 .align 2
55 bral handle_debug
56 .align 2
57 bral do_illegal_opcode_ll
58 .align 2
59 bral do_illegal_opcode_ll
60 .align 2
61 bral do_illegal_opcode_ll
62 .align 2
63 bral do_fpe_ll
64 .align 2
65 bral do_illegal_opcode_ll
66 .align 2
67 bral handle_address_fault
68 .align 2
69 bral handle_address_fault
70 .align 2
71 bral handle_protection_fault
72 .align 2
73 bral handle_protection_fault
74 .align 2
75 bral do_dtlb_modified
76
77 /*
78 * r0 : PGD/PT/PTE
79 * r1 : Offending address
80 * r2 : Scratch register
81 * r3 : Cause (5, 12 or 13)
82 */
83#define tlbmiss_save pushm r0-r3
84#define tlbmiss_restore popm r0-r3
85
86 .section .tlbx.ex.text,"ax",@progbits
87 .global itlb_miss
88itlb_miss:
89 tlbmiss_save
90 rjmp tlb_miss_common
91
92 .section .tlbr.ex.text,"ax",@progbits
93dtlb_miss_read:
94 tlbmiss_save
95 rjmp tlb_miss_common
96
97 .section .tlbw.ex.text,"ax",@progbits
98dtlb_miss_write:
99 tlbmiss_save
100
101 .global tlb_miss_common
102tlb_miss_common:
103 mfsr r0, SYSREG_PTBR
104 mfsr r1, SYSREG_TLBEAR
105
106 /* Is it the vmalloc space? */
107 bld r1, 31
108 brcs handle_vmalloc_miss
109
110 /* First level lookup */
111pgtbl_lookup:
112 lsr r2, r1, PGDIR_SHIFT
113 ld.w r0, r0[r2 << 2]
114 bld r0, _PAGE_BIT_PRESENT
115 brcc page_table_not_present
116
117 /* TODO: Check access rights on page table if necessary */
118
119 /* Translate to virtual address in P1. */
120 andl r0, 0xf000
121 sbr r0, 31
122
123 /* Second level lookup */
124 lsl r1, (32 - PGDIR_SHIFT)
125 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
126 add r2, r0, r1 << 2
127 ld.w r1, r2[0]
128 bld r1, _PAGE_BIT_PRESENT
129 brcc page_not_present
130
131 /* Mark the page as accessed */
132 sbr r1, _PAGE_BIT_ACCESSED
133 st.w r2[0], r1
134
135 /* Drop software flags */
136 andl r1, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
137 mtsr SYSREG_TLBELO, r1
138
139 /* Figure out which entry we want to replace */
140 mfsr r0, SYSREG_TLBARLO
141 clz r2, r0
142 brcc 1f
143 mov r1, -1 /* All entries have been accessed, */
144 mtsr SYSREG_TLBARLO, r1 /* so reset TLBAR */
145 mov r2, 0 /* and start at 0 */
1461: mfsr r1, SYSREG_MMUCR
147 lsl r2, 14
148 andl r1, 0x3fff, COH
149 or r1, r2
150 mtsr SYSREG_MMUCR, r1
151
152 tlbw
153
154 tlbmiss_restore
155 rete
156
157handle_vmalloc_miss:
158 /* Simply do the lookup in init's page table */
159 mov r0, lo(swapper_pg_dir)
160 orh r0, hi(swapper_pg_dir)
161 rjmp pgtbl_lookup
162
163
164 /* --- System Call --- */
165
166 .section .scall.text,"ax",@progbits
167system_call:
168 pushm r12 /* r12_orig */
169 stmts --sp, r0-lr
170 zero_fp
171 mfsr r0, SYSREG_RAR_SUP
172 mfsr r1, SYSREG_RSR_SUP
173 stm --sp, r0-r1
174
175 /* check for syscall tracing */
176 get_thread_info r0
177 ld.w r1, r0[TI_flags]
178 bld r1, TIF_SYSCALL_TRACE
179 brcs syscall_trace_enter
180
181syscall_trace_cont:
182 cp.w r8, NR_syscalls
183 brhs syscall_badsys
184
185 lddpc lr, syscall_table_addr
186 ld.w lr, lr[r8 << 2]
187 mov r8, r5 /* 5th argument (6th is pushed by stub) */
188 icall lr
189
190 .global syscall_return
191syscall_return:
192 get_thread_info r0
193 mask_interrupts /* make sure we don't miss an interrupt
194 setting need_resched or sigpending
195 between sampling and the rets */
196
197 /* Store the return value so that the correct value is loaded below */
198 stdsp sp[REG_R12], r12
199
200 ld.w r1, r0[TI_flags]
201 andl r1, _TIF_ALLWORK_MASK, COH
202 brne syscall_exit_work
203
204syscall_exit_cont:
205 popm r8-r9
206 mtsr SYSREG_RAR_SUP, r8
207 mtsr SYSREG_RSR_SUP, r9
208 ldmts sp++, r0-lr
209 sub sp, -4 /* r12_orig */
210 rets
211
212 .align 2
213syscall_table_addr:
214 .long sys_call_table
215
216syscall_badsys:
217 mov r12, -ENOSYS
218 rjmp syscall_return
219
220 .global ret_from_fork
221ret_from_fork:
222 rcall schedule_tail
223
224 /* check for syscall tracing */
225 get_thread_info r0
226 ld.w r1, r0[TI_flags]
227 andl r1, _TIF_ALLWORK_MASK, COH
228 brne syscall_exit_work
229 rjmp syscall_exit_cont
230
231syscall_trace_enter:
232 pushm r8-r12
233 rcall syscall_trace
234 popm r8-r12
235 rjmp syscall_trace_cont
236
237syscall_exit_work:
238 bld r1, TIF_SYSCALL_TRACE
239 brcc 1f
240 unmask_interrupts
241 rcall syscall_trace
242 mask_interrupts
243 ld.w r1, r0[TI_flags]
244
2451: bld r1, TIF_NEED_RESCHED
246 brcc 2f
247 unmask_interrupts
248 rcall schedule
249 mask_interrupts
250 ld.w r1, r0[TI_flags]
251 rjmp 1b
252
2532: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
254 tst r1, r2
255 breq 3f
256 unmask_interrupts
257 mov r12, sp
258 mov r11, r0
259 rcall do_notify_resume
260 mask_interrupts
261 ld.w r1, r0[TI_flags]
262 rjmp 1b
263
2643: bld r1, TIF_BREAKPOINT
265 brcc syscall_exit_cont
266 mfsr r3, SYSREG_TLBEHI
267 lddsp r2, sp[REG_PC]
268 andl r3, 0xff, COH
269 lsl r3, 1
270 sbr r3, 30
271 sbr r3, 0
272 mtdr DBGREG_BWA2A, r2
273 mtdr DBGREG_BWC2A, r3
274 rjmp syscall_exit_cont
275
276
277 /* The slow path of the TLB miss handler */
278page_table_not_present:
279page_not_present:
280 tlbmiss_restore
281 sub sp, 4
282 stmts --sp, r0-lr
283 rcall save_full_context_ex
284 mfsr r12, SYSREG_ECR
285 mov r11, sp
286 rcall do_page_fault
287 rjmp ret_from_exception
288
289 /* This function expects to find offending PC in SYSREG_RAR_EX */
290save_full_context_ex:
291 mfsr r8, SYSREG_RSR_EX
292 mov r12, r8
293 andh r8, (MODE_MASK >> 16), COH
294 mfsr r11, SYSREG_RAR_EX
295 brne 2f
296
2971: pushm r11, r12 /* PC and SR */
298 unmask_exceptions
299 ret r12
300
3012: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
302 stdsp sp[4], r10 /* replace saved SP */
303 rjmp 1b
304
305 /* Low-level exception handlers */
306handle_critical:
307 pushm r12
308 pushm r0-r12
309 rcall save_full_context_ex
310 mfsr r12, SYSREG_ECR
311 mov r11, sp
312 rcall do_critical_exception
313
314 /* We should never get here... */
315bad_return:
316 sub r12, pc, (. - 1f)
317 bral panic
318 .align 2
3191: .asciz "Return from critical exception!"
320
321 .align 1
322do_bus_error_write:
323 sub sp, 4
324 stmts --sp, r0-lr
325 rcall save_full_context_ex
326 mov r11, 1
327 rjmp 1f
328
329do_bus_error_read:
330 sub sp, 4
331 stmts --sp, r0-lr
332 rcall save_full_context_ex
333 mov r11, 0
3341: mfsr r12, SYSREG_BEAR
335 mov r10, sp
336 rcall do_bus_error
337 rjmp ret_from_exception
338
339 .align 1
340do_nmi_ll:
341 sub sp, 4
342 stmts --sp, r0-lr
343 /* FIXME: Make sure RAR_NMI and RSR_NMI are pushed instead of *_EX */
344 rcall save_full_context_ex
345 mfsr r12, SYSREG_ECR
346 mov r11, sp
347 rcall do_nmi
348 rjmp bad_return
349
350handle_address_fault:
351 sub sp, 4
352 stmts --sp, r0-lr
353 rcall save_full_context_ex
354 mfsr r12, SYSREG_ECR
355 mov r11, sp
356 rcall do_address_exception
357 rjmp ret_from_exception
358
359handle_protection_fault:
360 sub sp, 4
361 stmts --sp, r0-lr
362 rcall save_full_context_ex
363 mfsr r12, SYSREG_ECR
364 mov r11, sp
365 rcall do_page_fault
366 rjmp ret_from_exception
367
368 .align 1
369do_illegal_opcode_ll:
370 sub sp, 4
371 stmts --sp, r0-lr
372 rcall save_full_context_ex
373 mfsr r12, SYSREG_ECR
374 mov r11, sp
375 rcall do_illegal_opcode
376 rjmp ret_from_exception
377
378do_dtlb_modified:
379 pushm r0-r3
380 mfsr r1, SYSREG_TLBEAR
381 mfsr r0, SYSREG_PTBR
382 lsr r2, r1, PGDIR_SHIFT
383 ld.w r0, r0[r2 << 2]
384 lsl r1, (32 - PGDIR_SHIFT)
385 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
386
387 /* Translate to virtual address in P1 */
388 andl r0, 0xf000
389 sbr r0, 31
390 add r2, r0, r1 << 2
391 ld.w r3, r2[0]
392 sbr r3, _PAGE_BIT_DIRTY
393 mov r0, r3
394 st.w r2[0], r3
395
396 /* The page table is up-to-date. Update the TLB entry as well */
397 andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
398 mtsr SYSREG_TLBELO, r0
399
400 /* MMUCR[DRP] is updated automatically, so let's go... */
401 tlbw
402
403 popm r0-r3
404 rete
405
406do_fpe_ll:
407 sub sp, 4
408 stmts --sp, r0-lr
409 rcall save_full_context_ex
410 unmask_interrupts
411 mov r12, 26
412 mov r11, sp
413 rcall do_fpe
414 rjmp ret_from_exception
415
416ret_from_exception:
417 mask_interrupts
418 lddsp r4, sp[REG_SR]
419 andh r4, (MODE_MASK >> 16), COH
420 brne fault_resume_kernel
421
422 get_thread_info r0
423 ld.w r1, r0[TI_flags]
424 andl r1, _TIF_WORK_MASK, COH
425 brne fault_exit_work
426
427fault_resume_user:
428 popm r8-r9
429 mask_exceptions
430 mtsr SYSREG_RAR_EX, r8
431 mtsr SYSREG_RSR_EX, r9
432 ldmts sp++, r0-lr
433 sub sp, -4
434 rete
435
436fault_resume_kernel:
437#ifdef CONFIG_PREEMPT
438 get_thread_info r0
439 ld.w r2, r0[TI_preempt_count]
440 cp.w r2, 0
441 brne 1f
442 ld.w r1, r0[TI_flags]
443 bld r1, TIF_NEED_RESCHED
444 brcc 1f
445 lddsp r4, sp[REG_SR]
446 bld r4, SYSREG_GM_OFFSET
447 brcs 1f
448 rcall preempt_schedule_irq
4491:
450#endif
451
452 popm r8-r9
453 mask_exceptions
454 mfsr r1, SYSREG_SR
455 mtsr SYSREG_RAR_EX, r8
456 mtsr SYSREG_RSR_EX, r9
457 popm lr
458 sub sp, -4 /* ignore SP */
459 popm r0-r12
460 sub sp, -4 /* ignore r12_orig */
461 rete
462
463irq_exit_work:
464 /* Switch to exception mode so that we can share the same code. */
465 mfsr r8, SYSREG_SR
466 cbr r8, SYSREG_M0_OFFSET
467 orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
468 mtsr SYSREG_SR, r8
469 sub pc, -2
470 get_thread_info r0
471 ld.w r1, r0[TI_flags]
472
473fault_exit_work:
474 bld r1, TIF_NEED_RESCHED
475 brcc 1f
476 unmask_interrupts
477 rcall schedule
478 mask_interrupts
479 ld.w r1, r0[TI_flags]
480 rjmp fault_exit_work
481
4821: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
483 tst r1, r2
484 breq 2f
485 unmask_interrupts
486 mov r12, sp
487 mov r11, r0
488 rcall do_notify_resume
489 mask_interrupts
490 ld.w r1, r0[TI_flags]
491 rjmp fault_exit_work
492
4932: bld r1, TIF_BREAKPOINT
494 brcc fault_resume_user
495 mfsr r3, SYSREG_TLBEHI
496 lddsp r2, sp[REG_PC]
497 andl r3, 0xff, COH
498 lsl r3, 1
499 sbr r3, 30
500 sbr r3, 0
501 mtdr DBGREG_BWA2A, r2
502 mtdr DBGREG_BWC2A, r3
503 rjmp fault_resume_user
504
505 /* If we get a debug trap from privileged context we end up here */
506handle_debug_priv:
507 /* Fix up LR and SP in regs. r11 contains the mode we came from */
508 mfsr r8, SYSREG_SR
509 mov r9, r8
510 andh r8, hi(~MODE_MASK)
511 or r8, r11
512 mtsr SYSREG_SR, r8
513 sub pc, -2
514 stdsp sp[REG_LR], lr
515 mtsr SYSREG_SR, r9
516 sub pc, -2
517 sub r10, sp, -FRAME_SIZE_FULL
518 stdsp sp[REG_SP], r10
519 mov r12, sp
520 rcall do_debug_priv
521
522 /* Now, put everything back */
523 ssrf SR_EM_BIT
524 popm r10, r11
525 mtsr SYSREG_RAR_DBG, r10
526 mtsr SYSREG_RSR_DBG, r11
527 mfsr r8, SYSREG_SR
528 mov r9, r8
529 andh r8, hi(~MODE_MASK)
530 andh r11, hi(MODE_MASK)
531 or r8, r11
532 mtsr SYSREG_SR, r8
533 sub pc, -2
534 popm lr
535 mtsr SYSREG_SR, r9
536 sub pc, -2
537 sub sp, -4 /* skip SP */
538 popm r0-r12
539 sub sp, -4
540 retd
541
542 /*
543 * At this point, everything is masked, that is, interrupts,
544 * exceptions and debugging traps. We might get called from
545 * interrupt or exception context in some rare cases, but this
546 * will be taken care of by do_debug(), so we're not going to
547 * do a 100% correct context save here.
548 */
549handle_debug:
550 sub sp, 4 /* r12_orig */
551 stmts --sp, r0-lr
552 mfsr r10, SYSREG_RAR_DBG
553 mfsr r11, SYSREG_RSR_DBG
554 unmask_exceptions
555 pushm r10,r11
556 andh r11, (MODE_MASK >> 16), COH
557 brne handle_debug_priv
558
559 mov r12, sp
560 rcall do_debug
561
562 lddsp r10, sp[REG_SR]
563 andh r10, (MODE_MASK >> 16), COH
564 breq debug_resume_user
565
566debug_restore_all:
567 popm r10,r11
568 mask_exceptions
569 mtsr SYSREG_RSR_DBG, r11
570 mtsr SYSREG_RAR_DBG, r10
571 ldmts sp++, r0-lr
572 sub sp, -4
573 retd
574
575debug_resume_user:
576 get_thread_info r0
577 mask_interrupts
578
579 ld.w r1, r0[TI_flags]
580 andl r1, _TIF_DBGWORK_MASK, COH
581 breq debug_restore_all
582
5831: bld r1, TIF_NEED_RESCHED
584 brcc 2f
585 unmask_interrupts
586 rcall schedule
587 mask_interrupts
588 ld.w r1, r0[TI_flags]
589 rjmp 1b
590
5912: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
592 tst r1, r2
593 breq 3f
594 unmask_interrupts
595 mov r12, sp
596 mov r11, r0
597 rcall do_notify_resume
598 mask_interrupts
599 ld.w r1, r0[TI_flags]
600 rjmp 1b
601
6023: bld r1, TIF_SINGLE_STEP
603 brcc debug_restore_all
604 mfdr r2, DBGREG_DC
605 sbr r2, DC_SS_BIT
606 mtdr DBGREG_DC, r2
607 rjmp debug_restore_all
608
609 .set rsr_int0, SYSREG_RSR_INT0
610 .set rsr_int1, SYSREG_RSR_INT1
611 .set rsr_int2, SYSREG_RSR_INT2
612 .set rsr_int3, SYSREG_RSR_INT3
613 .set rar_int0, SYSREG_RAR_INT0
614 .set rar_int1, SYSREG_RAR_INT1
615 .set rar_int2, SYSREG_RAR_INT2
616 .set rar_int3, SYSREG_RAR_INT3
617
618 .macro IRQ_LEVEL level
619 .type irq_level\level, @function
620irq_level\level:
621 sub sp, 4 /* r12_orig */
622 stmts --sp,r0-lr
623 mfsr r8, rar_int\level
624 mfsr r9, rsr_int\level
625 pushm r8-r9
626
627 mov r11, sp
628 mov r12, \level
629
630 rcall do_IRQ
631
632 lddsp r4, sp[REG_SR]
633 andh r4, (MODE_MASK >> 16), COH
634#ifdef CONFIG_PREEMPT
635 brne 2f
636#else
637 brne 1f
638#endif
639
640 get_thread_info r0
641 ld.w r1, r0[TI_flags]
642 andl r1, _TIF_WORK_MASK, COH
643 brne irq_exit_work
644
6451: popm r8-r9
646 mtsr rar_int\level, r8
647 mtsr rsr_int\level, r9
648 ldmts sp++,r0-lr
649 sub sp, -4 /* ignore r12_orig */
650 rete
651
652#ifdef CONFIG_PREEMPT
6532:
654 get_thread_info r0
655 ld.w r2, r0[TI_preempt_count]
656 cp.w r2, 0
657 brne 1b
658 ld.w r1, r0[TI_flags]
659 bld r1, TIF_NEED_RESCHED
660 brcc 1b
661 lddsp r4, sp[REG_SR]
662 bld r4, SYSREG_GM_OFFSET
663 brcs 1b
664 rcall preempt_schedule_irq
665 rjmp 1b
666#endif
667 .endm
668
669 .section .irq.text,"ax",@progbits
670
671 .global irq_level0
672 .global irq_level1
673 .global irq_level2
674 .global irq_level3
675 IRQ_LEVEL 0
676 IRQ_LEVEL 1
677 IRQ_LEVEL 2
678 IRQ_LEVEL 3
diff --git a/arch/avr32/kernel/head.S b/arch/avr32/kernel/head.S
new file mode 100644
index 000000000000..773b7ad87be9
--- /dev/null
+++ b/arch/avr32/kernel/head.S
@@ -0,0 +1,45 @@
1/*
2 * Non-board-specific low-level startup code
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11
12#include <asm/page.h>
13#include <asm/thread_info.h>
14#include <asm/sysreg.h>
15
16 .section .init.text,"ax"
17 .global kernel_entry
18kernel_entry:
19 /* Initialize status register */
20 lddpc r0, init_sr
21 mtsr SYSREG_SR, r0
22
23 /* Set initial stack pointer */
24 lddpc sp, stack_addr
25 sub sp, -THREAD_SIZE
26
27#ifdef CONFIG_FRAME_POINTER
28 /* Mark last stack frame */
29 mov lr, 0
30 mov r7, 0
31#endif
32
33 /* Set up the PIO, SDRAM controller, early printk, etc. */
34 rcall board_early_init
35
36 /* Start the show */
37 lddpc pc, kernel_start_addr
38
39 .align 2
40init_sr:
41 .long 0x007f0000 /* Supervisor mode, everything masked */
42stack_addr:
43 .long init_thread_union
44kernel_start_addr:
45 .long start_kernel
diff --git a/arch/avr32/kernel/init_task.c b/arch/avr32/kernel/init_task.c
new file mode 100644
index 000000000000..effcacf9d1a2
--- /dev/null
+++ b/arch/avr32/kernel/init_task.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/module.h>
9#include <linux/fs.h>
10#include <linux/sched.h>
11#include <linux/init_task.h>
12#include <linux/mqueue.h>
13
14#include <asm/pgtable.h>
15
16static struct fs_struct init_fs = INIT_FS;
17static struct files_struct init_files = INIT_FILES;
18static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
19static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
20struct mm_struct init_mm = INIT_MM(init_mm);
21
22EXPORT_SYMBOL(init_mm);
23
24/*
25 * Initial thread structure. Must be aligned on an 8192-byte boundary.
26 */
27union thread_union init_thread_union
28 __attribute__((__section__(".data.init_task"))) =
29 { INIT_THREAD_INFO(init_task) };
30
31/*
32 * Initial task structure.
33 *
34 * All other task structs will be allocated on slabs in fork.c
35 */
36struct task_struct init_task = INIT_TASK(init_task);
37
38EXPORT_SYMBOL(init_task);
diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c
new file mode 100644
index 000000000000..856f3548e664
--- /dev/null
+++ b/arch/avr32/kernel/irq.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * Based on arch/i386/kernel/irq.c
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This file contains the code used by various IRQ handling routines:
12 * asking for different IRQ's should be done through these routines
13 * instead of just grabbing them. Thus setups with different IRQ numbers
14 * shouldn't result in any weird surprises, and installing new handlers
15 * should be easier.
16 *
17 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
18 * Naturally it's not a 1:1 relation, but there are similarities.
19 */
20
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <linux/kernel_stat.h>
24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
26#include <linux/sysdev.h>
27
28/*
29 * 'what should we do if we get a hw irq event on an illegal vector'.
30 * each architecture has to answer this themselves.
31 */
32void ack_bad_irq(unsigned int irq)
33{
34 printk("unexpected IRQ %u\n", irq);
35}
36
37#ifdef CONFIG_PROC_FS
38int show_interrupts(struct seq_file *p, void *v)
39{
40 int i = *(loff_t *)v, cpu;
41 struct irqaction *action;
42 unsigned long flags;
43
44 if (i == 0) {
45 seq_puts(p, " ");
46 for_each_online_cpu(cpu)
47 seq_printf(p, "CPU%d ", cpu);
48 seq_putc(p, '\n');
49 }
50
51 if (i < NR_IRQS) {
52 spin_lock_irqsave(&irq_desc[i].lock, flags);
53 action = irq_desc[i].action;
54 if (!action)
55 goto unlock;
56
57 seq_printf(p, "%3d: ", i);
58 for_each_online_cpu(cpu)
59 seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
60 seq_printf(p, " %s", action->name);
61 for (action = action->next; action; action = action->next)
62 seq_printf(p, ", %s", action->name);
63
64 seq_putc(p, '\n');
65 unlock:
66 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
67 }
68
69 return 0;
70}
71#endif
diff --git a/arch/avr32/kernel/kprobes.c b/arch/avr32/kernel/kprobes.c
new file mode 100644
index 000000000000..6caf9e8d8080
--- /dev/null
+++ b/arch/avr32/kernel/kprobes.c
@@ -0,0 +1,270 @@
1/*
2 * Kernel Probes (KProbes)
3 *
4 * Copyright (C) 2005-2006 Atmel Corporation
5 *
6 * Based on arch/ppc64/kernel/kprobes.c
7 * Copyright (C) IBM Corporation, 2002, 2004
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kprobes.h>
15#include <linux/ptrace.h>
16
17#include <asm/cacheflush.h>
18#include <asm/kdebug.h>
19#include <asm/ocd.h>
20
21DEFINE_PER_CPU(struct kprobe *, current_kprobe);
22static unsigned long kprobe_status;
23static struct pt_regs jprobe_saved_regs;
24
25int __kprobes arch_prepare_kprobe(struct kprobe *p)
26{
27 int ret = 0;
28
29 if ((unsigned long)p->addr & 0x01) {
30 printk("Attempt to register kprobe at an unaligned address\n");
31 ret = -EINVAL;
32 }
33
34 /* XXX: Might be a good idea to check if p->addr is a valid
35 * kernel address as well... */
36
37 if (!ret) {
38 pr_debug("copy kprobe at %p\n", p->addr);
39 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
40 p->opcode = *p->addr;
41 }
42
43 return ret;
44}
45
46void __kprobes arch_arm_kprobe(struct kprobe *p)
47{
48 pr_debug("arming kprobe at %p\n", p->addr);
49 *p->addr = BREAKPOINT_INSTRUCTION;
50 flush_icache_range((unsigned long)p->addr,
51 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
52}
53
54void __kprobes arch_disarm_kprobe(struct kprobe *p)
55{
56 pr_debug("disarming kprobe at %p\n", p->addr);
57 *p->addr = p->opcode;
58 flush_icache_range((unsigned long)p->addr,
59 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
60}
61
62static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
63{
64 unsigned long dc;
65
66 pr_debug("preparing to singlestep over %p (PC=%08lx)\n",
67 p->addr, regs->pc);
68
69 BUG_ON(!(sysreg_read(SR) & SYSREG_BIT(SR_D)));
70
71 dc = __mfdr(DBGREG_DC);
72 dc |= DC_SS;
73 __mtdr(DBGREG_DC, dc);
74
75 /*
76 * We must run the instruction from its original location
77 * since it may actually reference PC.
78 *
79 * TODO: Do the instruction replacement directly in icache.
80 */
81 *p->addr = p->opcode;
82 flush_icache_range((unsigned long)p->addr,
83 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
84}
85
86static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
87{
88 unsigned long dc;
89
90 pr_debug("resuming execution at PC=%08lx\n", regs->pc);
91
92 dc = __mfdr(DBGREG_DC);
93 dc &= ~DC_SS;
94 __mtdr(DBGREG_DC, dc);
95
96 *p->addr = BREAKPOINT_INSTRUCTION;
97 flush_icache_range((unsigned long)p->addr,
98 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
99}
100
101static void __kprobes set_current_kprobe(struct kprobe *p)
102{
103 __get_cpu_var(current_kprobe) = p;
104}
105
106static int __kprobes kprobe_handler(struct pt_regs *regs)
107{
108 struct kprobe *p;
109 void *addr = (void *)regs->pc;
110 int ret = 0;
111
112 pr_debug("kprobe_handler: kprobe_running=%d\n",
113 kprobe_running());
114
115 /*
116 * We don't want to be preempted for the entire
117 * duration of kprobe processing
118 */
119 preempt_disable();
120
121 /* Check that we're not recursing */
122 if (kprobe_running()) {
123 p = get_kprobe(addr);
124 if (p) {
125 if (kprobe_status == KPROBE_HIT_SS) {
126 printk("FIXME: kprobe hit while single-stepping!\n");
127 goto no_kprobe;
128 }
129
130 printk("FIXME: kprobe hit while handling another kprobe\n");
131 goto no_kprobe;
132 } else {
133 p = kprobe_running();
134 if (p->break_handler && p->break_handler(p, regs))
135 goto ss_probe;
136 }
137 /* If it's not ours, can't be delete race, (we hold lock). */
138 goto no_kprobe;
139 }
140
141 p = get_kprobe(addr);
142 if (!p)
143 goto no_kprobe;
144
145 kprobe_status = KPROBE_HIT_ACTIVE;
146 set_current_kprobe(p);
147 if (p->pre_handler && p->pre_handler(p, regs))
148 /* handler has already set things up, so skip ss setup */
149 return 1;
150
151ss_probe:
152 prepare_singlestep(p, regs);
153 kprobe_status = KPROBE_HIT_SS;
154 return 1;
155
156no_kprobe:
157 return ret;
158}
159
160static int __kprobes post_kprobe_handler(struct pt_regs *regs)
161{
162 struct kprobe *cur = kprobe_running();
163
164 pr_debug("post_kprobe_handler, cur=%p\n", cur);
165
166 if (!cur)
167 return 0;
168
169 if (cur->post_handler) {
170 kprobe_status = KPROBE_HIT_SSDONE;
171 cur->post_handler(cur, regs, 0);
172 }
173
174 resume_execution(cur, regs);
175 reset_current_kprobe();
176 preempt_enable_no_resched();
177
178 return 1;
179}
180
181static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
182{
183 struct kprobe *cur = kprobe_running();
184
185 pr_debug("kprobe_fault_handler: trapnr=%d\n", trapnr);
186
187 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
188 return 1;
189
190 if (kprobe_status & KPROBE_HIT_SS) {
191 resume_execution(cur, regs);
192 preempt_enable_no_resched();
193 }
194 return 0;
195}
196
197/*
198 * Wrapper routine to for handling exceptions.
199 */
200int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
201 unsigned long val, void *data)
202{
203 struct die_args *args = (struct die_args *)data;
204 int ret = NOTIFY_DONE;
205
206 pr_debug("kprobe_exceptions_notify: val=%lu, data=%p\n",
207 val, data);
208
209 switch (val) {
210 case DIE_BREAKPOINT:
211 if (kprobe_handler(args->regs))
212 ret = NOTIFY_STOP;
213 break;
214 case DIE_SSTEP:
215 if (post_kprobe_handler(args->regs))
216 ret = NOTIFY_STOP;
217 break;
218 case DIE_FAULT:
219 if (kprobe_running()
220 && kprobe_fault_handler(args->regs, args->trapnr))
221 ret = NOTIFY_STOP;
222 break;
223 default:
224 break;
225 }
226
227 return ret;
228}
229
230int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
231{
232 struct jprobe *jp = container_of(p, struct jprobe, kp);
233
234 memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
235
236 /*
237 * TODO: We should probably save some of the stack here as
238 * well, since gcc may pass arguments on the stack for certain
239 * functions (lots of arguments, large aggregates, varargs)
240 */
241
242 /* setup return addr to the jprobe handler routine */
243 regs->pc = (unsigned long)jp->entry;
244 return 1;
245}
246
247void __kprobes jprobe_return(void)
248{
249 asm volatile("breakpoint" ::: "memory");
250}
251
252int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
253{
254 /*
255 * FIXME - we should ideally be validating that we got here 'cos
256 * of the "trap" in jprobe_return() above, before restoring the
257 * saved regs...
258 */
259 memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
260 return 1;
261}
262
263int __init arch_init_kprobes(void)
264{
265 printk("KPROBES: Enabling monitor mode (MM|DBE)...\n");
266 __mtdr(DBGREG_DC, DC_MM | DC_DBE);
267
268 /* TODO: Register kretprobe trampoline */
269 return 0;
270}
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
new file mode 100644
index 000000000000..dfc32f2817b6
--- /dev/null
+++ b/arch/avr32/kernel/module.c
@@ -0,0 +1,324 @@
1/*
2 * AVR32-specific kernel module loader
3 *
4 * Copyright (C) 2005-2006 Atmel Corporation
5 *
6 * GOT initialization parts are based on the s390 version
7 * Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH,
8 * IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/moduleloader.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/elf.h>
19#include <linux/vmalloc.h>
20
21void *module_alloc(unsigned long size)
22{
23 if (size == 0)
24 return NULL;
25 return vmalloc(size);
26}
27
28void module_free(struct module *mod, void *module_region)
29{
30 vfree(mod->arch.syminfo);
31 mod->arch.syminfo = NULL;
32
33 vfree(module_region);
34 /* FIXME: if module_region == mod->init_region, trim exception
35 * table entries. */
36}
37
38static inline int check_rela(Elf32_Rela *rela, struct module *module,
39 char *strings, Elf32_Sym *symbols)
40{
41 struct mod_arch_syminfo *info;
42
43 info = module->arch.syminfo + ELF32_R_SYM(rela->r_info);
44 switch (ELF32_R_TYPE(rela->r_info)) {
45 case R_AVR32_GOT32:
46 case R_AVR32_GOT16:
47 case R_AVR32_GOT8:
48 case R_AVR32_GOT21S:
49 case R_AVR32_GOT18SW: /* mcall */
50 case R_AVR32_GOT16S: /* ld.w */
51 if (rela->r_addend != 0) {
52 printk(KERN_ERR
53 "GOT relocation against %s at offset %u with addend\n",
54 strings + symbols[ELF32_R_SYM(rela->r_info)].st_name,
55 rela->r_offset);
56 return -ENOEXEC;
57 }
58 if (info->got_offset == -1UL) {
59 info->got_offset = module->arch.got_size;
60 module->arch.got_size += sizeof(void *);
61 }
62 pr_debug("GOT[%3lu] %s\n", info->got_offset,
63 strings + symbols[ELF32_R_SYM(rela->r_info)].st_name);
64 break;
65 }
66
67 return 0;
68}
69
70int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
71 char *secstrings, struct module *module)
72{
73 Elf32_Shdr *symtab;
74 Elf32_Sym *symbols;
75 Elf32_Rela *rela;
76 char *strings;
77 int nrela, i, j;
78 int ret;
79
80 /* Find the symbol table */
81 symtab = NULL;
82 for (i = 0; i < hdr->e_shnum; i++)
83 switch (sechdrs[i].sh_type) {
84 case SHT_SYMTAB:
85 symtab = &sechdrs[i];
86 break;
87 }
88 if (!symtab) {
89 printk(KERN_ERR "module %s: no symbol table\n", module->name);
90 return -ENOEXEC;
91 }
92
93 /* Allocate room for one syminfo structure per symbol. */
94 module->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
95 module->arch.syminfo = vmalloc(module->arch.nsyms
96 * sizeof(struct mod_arch_syminfo));
97 if (!module->arch.syminfo)
98 return -ENOMEM;
99
100 symbols = (void *)hdr + symtab->sh_offset;
101 strings = (void *)hdr + sechdrs[symtab->sh_link].sh_offset;
102 for (i = 0; i < module->arch.nsyms; i++) {
103 if (symbols[i].st_shndx == SHN_UNDEF &&
104 strcmp(strings + symbols[i].st_name,
105 "_GLOBAL_OFFSET_TABLE_") == 0)
106 /* "Define" it as absolute. */
107 symbols[i].st_shndx = SHN_ABS;
108 module->arch.syminfo[i].got_offset = -1UL;
109 module->arch.syminfo[i].got_initialized = 0;
110 }
111
112 /* Allocate GOT entries for symbols that need it. */
113 module->arch.got_size = 0;
114 for (i = 0; i < hdr->e_shnum; i++) {
115 if (sechdrs[i].sh_type != SHT_RELA)
116 continue;
117 nrela = sechdrs[i].sh_size / sizeof(Elf32_Rela);
118 rela = (void *)hdr + sechdrs[i].sh_offset;
119 for (j = 0; j < nrela; j++) {
120 ret = check_rela(rela + j, module,
121 strings, symbols);
122 if (ret)
123 goto out_free_syminfo;
124 }
125 }
126
127 /*
128 * Increase core size to make room for GOT and set start
129 * offset for GOT.
130 */
131 module->core_size = ALIGN(module->core_size, 4);
132 module->arch.got_offset = module->core_size;
133 module->core_size += module->arch.got_size;
134
135 return 0;
136
137out_free_syminfo:
138 vfree(module->arch.syminfo);
139 module->arch.syminfo = NULL;
140
141 return ret;
142}
143
144static inline int reloc_overflow(struct module *module, const char *reloc_name,
145 Elf32_Addr relocation)
146{
147 printk(KERN_ERR "module %s: Value %lx does not fit relocation %s\n",
148 module->name, (unsigned long)relocation, reloc_name);
149 return -ENOEXEC;
150}
151
152#define get_u16(loc) (*((uint16_t *)loc))
153#define put_u16(loc, val) (*((uint16_t *)loc) = (val))
154
155int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
156 unsigned int symindex, unsigned int relindex,
157 struct module *module)
158{
159 Elf32_Shdr *symsec = sechdrs + symindex;
160 Elf32_Shdr *relsec = sechdrs + relindex;
161 Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
162 Elf32_Rela *rel = (void *)relsec->sh_addr;
163 unsigned int i;
164 int ret = 0;
165
166 for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rela); i++, rel++) {
167 struct mod_arch_syminfo *info;
168 Elf32_Sym *sym;
169 Elf32_Addr relocation;
170 uint32_t *location;
171 uint32_t value;
172
173 location = (void *)dstsec->sh_addr + rel->r_offset;
174 sym = (Elf32_Sym *)symsec->sh_addr + ELF32_R_SYM(rel->r_info);
175 relocation = sym->st_value + rel->r_addend;
176
177 info = module->arch.syminfo + ELF32_R_SYM(rel->r_info);
178
179 /* Initialize GOT entry if necessary */
180 switch (ELF32_R_TYPE(rel->r_info)) {
181 case R_AVR32_GOT32:
182 case R_AVR32_GOT16:
183 case R_AVR32_GOT8:
184 case R_AVR32_GOT21S:
185 case R_AVR32_GOT18SW:
186 case R_AVR32_GOT16S:
187 if (!info->got_initialized) {
188 Elf32_Addr *gotent;
189
190 gotent = (module->module_core
191 + module->arch.got_offset
192 + info->got_offset);
193 *gotent = relocation;
194 info->got_initialized = 1;
195 }
196
197 relocation = info->got_offset;
198 break;
199 }
200
201 switch (ELF32_R_TYPE(rel->r_info)) {
202 case R_AVR32_32:
203 case R_AVR32_32_CPENT:
204 *location = relocation;
205 break;
206 case R_AVR32_22H_PCREL:
207 relocation -= (Elf32_Addr)location;
208 if ((relocation & 0xffe00001) != 0
209 && (relocation & 0xffc00001) != 0xffc00000)
210 return reloc_overflow(module,
211 "R_AVR32_22H_PCREL",
212 relocation);
213 relocation >>= 1;
214
215 value = *location;
216 value = ((value & 0xe1ef0000)
217 | (relocation & 0xffff)
218 | ((relocation & 0x10000) << 4)
219 | ((relocation & 0x1e0000) << 8));
220 *location = value;
221 break;
222 case R_AVR32_11H_PCREL:
223 relocation -= (Elf32_Addr)location;
224 if ((relocation & 0xfffffc01) != 0
225 && (relocation & 0xfffff801) != 0xfffff800)
226 return reloc_overflow(module,
227 "R_AVR32_11H_PCREL",
228 relocation);
229 value = get_u16(location);
230 value = ((value & 0xf00c)
231 | ((relocation & 0x1fe) << 3)
232 | ((relocation & 0x600) >> 9));
233 put_u16(location, value);
234 break;
235 case R_AVR32_9H_PCREL:
236 relocation -= (Elf32_Addr)location;
237 if ((relocation & 0xffffff01) != 0
238 && (relocation & 0xfffffe01) != 0xfffffe00)
239 return reloc_overflow(module,
240 "R_AVR32_9H_PCREL",
241 relocation);
242 value = get_u16(location);
243 value = ((value & 0xf00f)
244 | ((relocation & 0x1fe) << 3));
245 put_u16(location, value);
246 break;
247 case R_AVR32_9UW_PCREL:
248 relocation -= ((Elf32_Addr)location) & 0xfffffffc;
249 if ((relocation & 0xfffffc03) != 0)
250 return reloc_overflow(module,
251 "R_AVR32_9UW_PCREL",
252 relocation);
253 value = get_u16(location);
254 value = ((value & 0xf80f)
255 | ((relocation & 0x1fc) << 2));
256 put_u16(location, value);
257 break;
258 case R_AVR32_GOTPC:
259 /*
260 * R6 = PC - (PC - GOT)
261 *
262 * At this point, relocation contains the
263 * value of PC. Just subtract the value of
264 * GOT, and we're done.
265 */
266 pr_debug("GOTPC: PC=0x%lx, got_offset=0x%lx, core=0x%p\n",
267 relocation, module->arch.got_offset,
268 module->module_core);
269 relocation -= ((unsigned long)module->module_core
270 + module->arch.got_offset);
271 *location = relocation;
272 break;
273 case R_AVR32_GOT18SW:
274 if ((relocation & 0xfffe0003) != 0
275 && (relocation & 0xfffc0003) != 0xffff0000)
276 return reloc_overflow(module, "R_AVR32_GOT18SW",
277 relocation);
278 relocation >>= 2;
279 /* fall through */
280 case R_AVR32_GOT16S:
281 if ((relocation & 0xffff8000) != 0
282 && (relocation & 0xffff0000) != 0xffff0000)
283 return reloc_overflow(module, "R_AVR32_GOT16S",
284 relocation);
285 pr_debug("GOT reloc @ 0x%lx -> %lu\n",
286 rel->r_offset, relocation);
287 value = *location;
288 value = ((value & 0xffff0000)
289 | (relocation & 0xffff));
290 *location = value;
291 break;
292
293 default:
294 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
295 module->name, ELF32_R_TYPE(rel->r_info));
296 return -ENOEXEC;
297 }
298 }
299
300 return ret;
301}
302
303int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab,
304 unsigned int symindex, unsigned int relindex,
305 struct module *module)
306{
307 printk(KERN_ERR "module %s: REL relocations are not supported\n",
308 module->name);
309 return -ENOEXEC;
310}
311
312int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
313 struct module *module)
314{
315 vfree(module->arch.syminfo);
316 module->arch.syminfo = NULL;
317
318 return 0;
319}
320
321void module_arch_cleanup(struct module *module)
322{
323
324}
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
new file mode 100644
index 000000000000..317dc50945f2
--- /dev/null
+++ b/arch/avr32/kernel/process.c
@@ -0,0 +1,276 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/sched.h>
9#include <linux/module.h>
10#include <linux/kallsyms.h>
11#include <linux/fs.h>
12#include <linux/ptrace.h>
13#include <linux/reboot.h>
14#include <linux/unistd.h>
15
16#include <asm/sysreg.h>
17#include <asm/ocd.h>
18
19void (*pm_power_off)(void) = NULL;
20EXPORT_SYMBOL(pm_power_off);
21
22/*
23 * This file handles the architecture-dependent parts of process handling..
24 */
25
26void cpu_idle(void)
27{
28 /* endless idle loop with no priority at all */
29 while (1) {
30 /* TODO: Enter sleep mode */
31 while (!need_resched())
32 cpu_relax();
33 preempt_enable_no_resched();
34 schedule();
35 preempt_disable();
36 }
37}
38
39void machine_halt(void)
40{
41}
42
43void machine_power_off(void)
44{
45}
46
47void machine_restart(char *cmd)
48{
49 __mtdr(DBGREG_DC, DC_DBE);
50 __mtdr(DBGREG_DC, DC_RES);
51 while (1) ;
52}
53
54/*
55 * PC is actually discarded when returning from a system call -- the
56 * return address must be stored in LR. This function will make sure
57 * LR points to do_exit before starting the thread.
58 *
59 * Also, when returning from fork(), r12 is 0, so we must copy the
60 * argument as well.
61 *
62 * r0 : The argument to the main thread function
63 * r1 : The address of do_exit
64 * r2 : The address of the main thread function
65 */
66asmlinkage extern void kernel_thread_helper(void);
67__asm__(" .type kernel_thread_helper, @function\n"
68 "kernel_thread_helper:\n"
69 " mov r12, r0\n"
70 " mov lr, r2\n"
71 " mov pc, r1\n"
72 " .size kernel_thread_helper, . - kernel_thread_helper");
73
74int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
75{
76 struct pt_regs regs;
77
78 memset(&regs, 0, sizeof(regs));
79
80 regs.r0 = (unsigned long)arg;
81 regs.r1 = (unsigned long)fn;
82 regs.r2 = (unsigned long)do_exit;
83 regs.lr = (unsigned long)kernel_thread_helper;
84 regs.pc = (unsigned long)kernel_thread_helper;
85 regs.sr = MODE_SUPERVISOR;
86
87 return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
88 0, &regs, 0, NULL, NULL);
89}
90EXPORT_SYMBOL(kernel_thread);
91
92/*
93 * Free current thread data structures etc
94 */
95void exit_thread(void)
96{
97 /* nothing to do */
98}
99
100void flush_thread(void)
101{
102 /* nothing to do */
103}
104
105void release_thread(struct task_struct *dead_task)
106{
107 /* do nothing */
108}
109
110static const char *cpu_modes[] = {
111 "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
112 "Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
113};
114
115void show_regs(struct pt_regs *regs)
116{
117 unsigned long sp = regs->sp;
118 unsigned long lr = regs->lr;
119 unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
120
121 if (!user_mode(regs))
122 sp = (unsigned long)regs + FRAME_SIZE_FULL;
123
124 print_symbol("PC is at %s\n", instruction_pointer(regs));
125 print_symbol("LR is at %s\n", lr);
126 printk("pc : [<%08lx>] lr : [<%08lx>] %s\n"
127 "sp : %08lx r12: %08lx r11: %08lx\n",
128 instruction_pointer(regs),
129 lr, print_tainted(), sp, regs->r12, regs->r11);
130 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
131 regs->r10, regs->r9, regs->r8);
132 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
133 regs->r7, regs->r6, regs->r5, regs->r4);
134 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
135 regs->r3, regs->r2, regs->r1, regs->r0);
136 printk("Flags: %c%c%c%c%c\n",
137 regs->sr & SR_Q ? 'Q' : 'q',
138 regs->sr & SR_V ? 'V' : 'v',
139 regs->sr & SR_N ? 'N' : 'n',
140 regs->sr & SR_Z ? 'Z' : 'z',
141 regs->sr & SR_C ? 'C' : 'c');
142 printk("Mode bits: %c%c%c%c%c%c%c%c%c\n",
143 regs->sr & SR_H ? 'H' : 'h',
144 regs->sr & SR_R ? 'R' : 'r',
145 regs->sr & SR_J ? 'J' : 'j',
146 regs->sr & SR_EM ? 'E' : 'e',
147 regs->sr & SR_I3M ? '3' : '.',
148 regs->sr & SR_I2M ? '2' : '.',
149 regs->sr & SR_I1M ? '1' : '.',
150 regs->sr & SR_I0M ? '0' : '.',
151 regs->sr & SR_GM ? 'G' : 'g');
152 printk("CPU Mode: %s\n", cpu_modes[mode]);
153
154 show_trace(NULL, (unsigned long *)sp, regs);
155}
156EXPORT_SYMBOL(show_regs);
157
158/* Fill in the fpu structure for a core dump. This is easy -- we don't have any */
159int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
160{
161 /* Not valid */
162 return 0;
163}
164
165asmlinkage void ret_from_fork(void);
166
167int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
168 unsigned long unused,
169 struct task_struct *p, struct pt_regs *regs)
170{
171 struct pt_regs *childregs;
172
173 childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)p->thread_info)) - 1;
174 *childregs = *regs;
175
176 if (user_mode(regs))
177 childregs->sp = usp;
178 else
179 childregs->sp = (unsigned long)p->thread_info + THREAD_SIZE;
180
181 childregs->r12 = 0; /* Set return value for child */
182
183 p->thread.cpu_context.sr = MODE_SUPERVISOR | SR_GM;
184 p->thread.cpu_context.ksp = (unsigned long)childregs;
185 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
186
187 return 0;
188}
189
190/* r12-r8 are dummy parameters to force the compiler to use the stack */
191asmlinkage int sys_fork(struct pt_regs *regs)
192{
193 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
194}
195
196asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
197 unsigned long parent_tidptr,
198 unsigned long child_tidptr, struct pt_regs *regs)
199{
200 if (!newsp)
201 newsp = regs->sp;
202 return do_fork(clone_flags, newsp, regs, 0,
203 (int __user *)parent_tidptr,
204 (int __user *)child_tidptr);
205}
206
207asmlinkage int sys_vfork(struct pt_regs *regs)
208{
209 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs,
210 0, NULL, NULL);
211}
212
213asmlinkage int sys_execve(char __user *ufilename, char __user *__user *uargv,
214 char __user *__user *uenvp, struct pt_regs *regs)
215{
216 int error;
217 char *filename;
218
219 filename = getname(ufilename);
220 error = PTR_ERR(filename);
221 if (IS_ERR(filename))
222 goto out;
223
224 error = do_execve(filename, uargv, uenvp, regs);
225 if (error == 0)
226 current->ptrace &= ~PT_DTRACE;
227 putname(filename);
228
229out:
230 return error;
231}
232
233
234/*
235 * This function is supposed to answer the question "who called
236 * schedule()?"
237 */
238unsigned long get_wchan(struct task_struct *p)
239{
240 unsigned long pc;
241 unsigned long stack_page;
242
243 if (!p || p == current || p->state == TASK_RUNNING)
244 return 0;
245
246 stack_page = (unsigned long)p->thread_info;
247 BUG_ON(!stack_page);
248
249 /*
250 * The stored value of PC is either the address right after
251 * the call to __switch_to() or ret_from_fork.
252 */
253 pc = thread_saved_pc(p);
254 if (in_sched_functions(pc)) {
255#ifdef CONFIG_FRAME_POINTER
256 unsigned long fp = p->thread.cpu_context.r7;
257 BUG_ON(fp < stack_page || fp > (THREAD_SIZE + stack_page));
258 pc = *(unsigned long *)fp;
259#else
260 /*
261 * We depend on the frame size of schedule here, which
262 * is actually quite ugly. It might be possible to
263 * determine the frame size automatically at build
264 * time by doing this:
265 * - compile sched.c
266 * - disassemble the resulting sched.o
267 * - look for 'sub sp,??' shortly after '<schedule>:'
268 */
269 unsigned long sp = p->thread.cpu_context.ksp + 16;
270 BUG_ON(sp < stack_page || sp > (THREAD_SIZE + stack_page));
271 pc = *(unsigned long *)sp;
272#endif
273 }
274
275 return pc;
276}
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c
new file mode 100644
index 000000000000..3c89e59029ab
--- /dev/null
+++ b/arch/avr32/kernel/ptrace.c
@@ -0,0 +1,371 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#undef DEBUG
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <linux/smp_lock.h>
13#include <linux/ptrace.h>
14#include <linux/errno.h>
15#include <linux/user.h>
16#include <linux/security.h>
17#include <linux/unistd.h>
18#include <linux/notifier.h>
19
20#include <asm/traps.h>
21#include <asm/uaccess.h>
22#include <asm/ocd.h>
23#include <asm/mmu_context.h>
24#include <asm/kdebug.h>
25
26static struct pt_regs *get_user_regs(struct task_struct *tsk)
27{
28 return (struct pt_regs *)((unsigned long) tsk->thread_info +
29 THREAD_SIZE - sizeof(struct pt_regs));
30}
31
32static void ptrace_single_step(struct task_struct *tsk)
33{
34 pr_debug("ptrace_single_step: pid=%u, SR=0x%08lx\n",
35 tsk->pid, tsk->thread.cpu_context.sr);
36 if (!(tsk->thread.cpu_context.sr & SR_D)) {
37 /*
38 * Set a breakpoint at the current pc to force the
39 * process into debug mode. The syscall/exception
40 * exit code will set a breakpoint at the return
41 * address when this flag is set.
42 */
43 pr_debug("ptrace_single_step: Setting TIF_BREAKPOINT\n");
44 set_tsk_thread_flag(tsk, TIF_BREAKPOINT);
45 }
46
47 /* The monitor code will do the actual step for us */
48 set_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
49}
50
51/*
52 * Called by kernel/ptrace.c when detaching
53 *
54 * Make sure any single step bits, etc. are not set
55 */
56void ptrace_disable(struct task_struct *child)
57{
58 clear_tsk_thread_flag(child, TIF_SINGLE_STEP);
59}
60
61/*
62 * Handle hitting a breakpoint
63 */
64static void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
65{
66 siginfo_t info;
67
68 info.si_signo = SIGTRAP;
69 info.si_errno = 0;
70 info.si_code = TRAP_BRKPT;
71 info.si_addr = (void __user *)instruction_pointer(regs);
72
73 pr_debug("ptrace_break: Sending SIGTRAP to PID %u (pc = 0x%p)\n",
74 tsk->pid, info.si_addr);
75 force_sig_info(SIGTRAP, &info, tsk);
76}
77
78/*
79 * Read the word at offset "offset" into the task's "struct user". We
80 * actually access the pt_regs struct stored on the kernel stack.
81 */
82static int ptrace_read_user(struct task_struct *tsk, unsigned long offset,
83 unsigned long __user *data)
84{
85 unsigned long *regs;
86 unsigned long value;
87
88 pr_debug("ptrace_read_user(%p, %#lx, %p)\n",
89 tsk, offset, data);
90
91 if (offset & 3 || offset >= sizeof(struct user)) {
92 printk("ptrace_read_user: invalid offset 0x%08lx\n", offset);
93 return -EIO;
94 }
95
96 regs = (unsigned long *)get_user_regs(tsk);
97
98 value = 0;
99 if (offset < sizeof(struct pt_regs))
100 value = regs[offset / sizeof(regs[0])];
101
102 return put_user(value, data);
103}
104
105/*
106 * Write the word "value" to offset "offset" into the task's "struct
107 * user". We actually access the pt_regs struct stored on the kernel
108 * stack.
109 */
110static int ptrace_write_user(struct task_struct *tsk, unsigned long offset,
111 unsigned long value)
112{
113 unsigned long *regs;
114
115 if (offset & 3 || offset >= sizeof(struct user)) {
116 printk("ptrace_write_user: invalid offset 0x%08lx\n", offset);
117 return -EIO;
118 }
119
120 if (offset >= sizeof(struct pt_regs))
121 return 0;
122
123 regs = (unsigned long *)get_user_regs(tsk);
124 regs[offset / sizeof(regs[0])] = value;
125
126 return 0;
127}
128
129static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
130{
131 struct pt_regs *regs = get_user_regs(tsk);
132
133 return copy_to_user(uregs, regs, sizeof(*regs)) ? -EFAULT : 0;
134}
135
136static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
137{
138 struct pt_regs newregs;
139 int ret;
140
141 ret = -EFAULT;
142 if (copy_from_user(&newregs, uregs, sizeof(newregs)) == 0) {
143 struct pt_regs *regs = get_user_regs(tsk);
144
145 ret = -EINVAL;
146 if (valid_user_regs(&newregs)) {
147 *regs = newregs;
148 ret = 0;
149 }
150 }
151
152 return ret;
153}
154
155long arch_ptrace(struct task_struct *child, long request, long addr, long data)
156{
157 unsigned long tmp;
158 int ret;
159
160 pr_debug("arch_ptrace(%ld, %ld, %#lx, %#lx)\n",
161 request, child->pid, addr, data);
162
163 pr_debug("ptrace: Enabling monitor mode...\n");
164 __mtdr(DBGREG_DC, __mfdr(DBGREG_DC) | DC_MM | DC_DBE);
165
166 switch (request) {
167 /* Read the word at location addr in the child process */
168 case PTRACE_PEEKTEXT:
169 case PTRACE_PEEKDATA:
170 ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
171 if (ret == sizeof(tmp))
172 ret = put_user(tmp, (unsigned long __user *)data);
173 else
174 ret = -EIO;
175 break;
176
177 case PTRACE_PEEKUSR:
178 ret = ptrace_read_user(child, addr,
179 (unsigned long __user *)data);
180 break;
181
182 /* Write the word in data at location addr */
183 case PTRACE_POKETEXT:
184 case PTRACE_POKEDATA:
185 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
186 if (ret == sizeof(data))
187 ret = 0;
188 else
189 ret = -EIO;
190 break;
191
192 case PTRACE_POKEUSR:
193 ret = ptrace_write_user(child, addr, data);
194 break;
195
196 /* continue and stop at next (return from) syscall */
197 case PTRACE_SYSCALL:
198 /* restart after signal */
199 case PTRACE_CONT:
200 ret = -EIO;
201 if (!valid_signal(data))
202 break;
203 if (request == PTRACE_SYSCALL)
204 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
205 else
206 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
207 child->exit_code = data;
208 /* XXX: Are we sure no breakpoints are active here? */
209 wake_up_process(child);
210 ret = 0;
211 break;
212
213 /*
214 * Make the child exit. Best I can do is send it a
215 * SIGKILL. Perhaps it should be put in the status that it
216 * wants to exit.
217 */
218 case PTRACE_KILL:
219 ret = 0;
220 if (child->exit_state == EXIT_ZOMBIE)
221 break;
222 child->exit_code = SIGKILL;
223 wake_up_process(child);
224 break;
225
226 /*
227 * execute single instruction.
228 */
229 case PTRACE_SINGLESTEP:
230 ret = -EIO;
231 if (!valid_signal(data))
232 break;
233 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
234 ptrace_single_step(child);
235 child->exit_code = data;
236 wake_up_process(child);
237 ret = 0;
238 break;
239
240 /* Detach a process that was attached */
241 case PTRACE_DETACH:
242 ret = ptrace_detach(child, data);
243 break;
244
245 case PTRACE_GETREGS:
246 ret = ptrace_getregs(child, (void __user *)data);
247 break;
248
249 case PTRACE_SETREGS:
250 ret = ptrace_setregs(child, (const void __user *)data);
251 break;
252
253 default:
254 ret = ptrace_request(child, request, addr, data);
255 break;
256 }
257
258 pr_debug("sys_ptrace returning %d (DC = 0x%08lx)\n", ret, __mfdr(DBGREG_DC));
259 return ret;
260}
261
262asmlinkage void syscall_trace(void)
263{
264 pr_debug("syscall_trace called\n");
265 if (!test_thread_flag(TIF_SYSCALL_TRACE))
266 return;
267 if (!(current->ptrace & PT_PTRACED))
268 return;
269
270 pr_debug("syscall_trace: notifying parent\n");
271 /* The 0x80 provides a way for the tracing parent to
272 * distinguish between a syscall stop and SIGTRAP delivery */
273 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
274 ? 0x80 : 0));
275
276 /*
277 * this isn't the same as continuing with a signal, but it
278 * will do for normal use. strace only continues with a
279 * signal if the stopping signal is not SIGTRAP. -brl
280 */
281 if (current->exit_code) {
282 pr_debug("syscall_trace: sending signal %d to PID %u\n",
283 current->exit_code, current->pid);
284 send_sig(current->exit_code, current, 1);
285 current->exit_code = 0;
286 }
287}
288
289asmlinkage void do_debug_priv(struct pt_regs *regs)
290{
291 unsigned long dc, ds;
292 unsigned long die_val;
293
294 ds = __mfdr(DBGREG_DS);
295
296 pr_debug("do_debug_priv: pc = %08lx, ds = %08lx\n", regs->pc, ds);
297
298 if (ds & DS_SSS)
299 die_val = DIE_SSTEP;
300 else
301 die_val = DIE_BREAKPOINT;
302
303 if (notify_die(die_val, regs, 0, SIGTRAP) == NOTIFY_STOP)
304 return;
305
306 if (likely(ds & DS_SSS)) {
307 extern void itlb_miss(void);
308 extern void tlb_miss_common(void);
309 struct thread_info *ti;
310
311 dc = __mfdr(DBGREG_DC);
312 dc &= ~DC_SS;
313 __mtdr(DBGREG_DC, dc);
314
315 ti = current_thread_info();
316 ti->flags |= _TIF_BREAKPOINT;
317
318 /* The TLB miss handlers don't check thread flags */
319 if ((regs->pc >= (unsigned long)&itlb_miss)
320 && (regs->pc <= (unsigned long)&tlb_miss_common)) {
321 __mtdr(DBGREG_BWA2A, sysreg_read(RAR_EX));
322 __mtdr(DBGREG_BWC2A, 0x40000001 | (get_asid() << 1));
323 }
324
325 /*
326 * If we're running in supervisor mode, the breakpoint
327 * will take us where we want directly, no need to
328 * single step.
329 */
330 if ((regs->sr & MODE_MASK) != MODE_SUPERVISOR)
331 ti->flags |= TIF_SINGLE_STEP;
332 } else {
333 panic("Unable to handle debug trap at pc = %08lx\n",
334 regs->pc);
335 }
336}
337
338/*
339 * Handle breakpoints, single steps and other debuggy things. To keep
340 * things simple initially, we run with interrupts and exceptions
341 * disabled all the time.
342 */
343asmlinkage void do_debug(struct pt_regs *regs)
344{
345 unsigned long dc, ds;
346
347 ds = __mfdr(DBGREG_DS);
348 pr_debug("do_debug: pc = %08lx, ds = %08lx\n", regs->pc, ds);
349
350 if (test_thread_flag(TIF_BREAKPOINT)) {
351 pr_debug("TIF_BREAKPOINT set\n");
352 /* We're taking care of it */
353 clear_thread_flag(TIF_BREAKPOINT);
354 __mtdr(DBGREG_BWC2A, 0);
355 }
356
357 if (test_thread_flag(TIF_SINGLE_STEP)) {
358 pr_debug("TIF_SINGLE_STEP set, ds = 0x%08lx\n", ds);
359 if (ds & DS_SSS) {
360 dc = __mfdr(DBGREG_DC);
361 dc &= ~DC_SS;
362 __mtdr(DBGREG_DC, dc);
363
364 clear_thread_flag(TIF_SINGLE_STEP);
365 ptrace_break(current, regs);
366 }
367 } else {
368 /* regular breakpoint */
369 ptrace_break(current, regs);
370 }
371}
diff --git a/arch/avr32/kernel/semaphore.c b/arch/avr32/kernel/semaphore.c
new file mode 100644
index 000000000000..1e2705a05016
--- /dev/null
+++ b/arch/avr32/kernel/semaphore.c
@@ -0,0 +1,148 @@
1/*
2 * AVR32 sempahore implementation.
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * Based on linux/arch/i386/kernel/semaphore.c
7 * Copyright (C) 1999 Linus Torvalds
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/module.h>
17
18#include <asm/semaphore.h>
19#include <asm/atomic.h>
20
21/*
22 * Semaphores are implemented using a two-way counter:
23 * The "count" variable is decremented for each process
24 * that tries to acquire the semaphore, while the "sleeping"
25 * variable is a count of such acquires.
26 *
27 * Notably, the inline "up()" and "down()" functions can
28 * efficiently test if they need to do any extra work (up
29 * needs to do something only if count was negative before
30 * the increment operation.
31 *
32 * "sleeping" and the contention routine ordering is protected
33 * by the spinlock in the semaphore's waitqueue head.
34 *
35 * Note that these functions are only called when there is
36 * contention on the lock, and as such all this is the
37 * "non-critical" part of the whole semaphore business. The
38 * critical part is the inline stuff in <asm/semaphore.h>
39 * where we want to avoid any extra jumps and calls.
40 */
41
42/*
43 * Logic:
44 * - only on a boundary condition do we need to care. When we go
45 * from a negative count to a non-negative, we wake people up.
46 * - when we go from a non-negative count to a negative do we
47 * (a) synchronize with the "sleeper" count and (b) make sure
48 * that we're on the wakeup list before we synchronize so that
49 * we cannot lose wakeup events.
50 */
51
52void __up(struct semaphore *sem)
53{
54 wake_up(&sem->wait);
55}
56EXPORT_SYMBOL(__up);
57
58void __sched __down(struct semaphore *sem)
59{
60 struct task_struct *tsk = current;
61 DECLARE_WAITQUEUE(wait, tsk);
62 unsigned long flags;
63
64 tsk->state = TASK_UNINTERRUPTIBLE;
65 spin_lock_irqsave(&sem->wait.lock, flags);
66 add_wait_queue_exclusive_locked(&sem->wait, &wait);
67
68 sem->sleepers++;
69 for (;;) {
70 int sleepers = sem->sleepers;
71
72 /*
73 * Add "everybody else" into it. They aren't
74 * playing, because we own the spinlock in
75 * the wait_queue_head.
76 */
77 if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
78 sem->sleepers = 0;
79 break;
80 }
81 sem->sleepers = 1; /* us - see -1 above */
82 spin_unlock_irqrestore(&sem->wait.lock, flags);
83
84 schedule();
85
86 spin_lock_irqsave(&sem->wait.lock, flags);
87 tsk->state = TASK_UNINTERRUPTIBLE;
88 }
89 remove_wait_queue_locked(&sem->wait, &wait);
90 wake_up_locked(&sem->wait);
91 spin_unlock_irqrestore(&sem->wait.lock, flags);
92 tsk->state = TASK_RUNNING;
93}
94EXPORT_SYMBOL(__down);
95
96int __sched __down_interruptible(struct semaphore *sem)
97{
98 int retval = 0;
99 struct task_struct *tsk = current;
100 DECLARE_WAITQUEUE(wait, tsk);
101 unsigned long flags;
102
103 tsk->state = TASK_INTERRUPTIBLE;
104 spin_lock_irqsave(&sem->wait.lock, flags);
105 add_wait_queue_exclusive_locked(&sem->wait, &wait);
106
107 sem->sleepers++;
108 for (;;) {
109 int sleepers = sem->sleepers;
110
111 /*
112 * With signals pending, this turns into the trylock
113 * failure case - we won't be sleeping, and we can't
114 * get the lock as it has contention. Just correct the
115 * count and exit.
116 */
117 if (signal_pending(current)) {
118 retval = -EINTR;
119 sem->sleepers = 0;
120 atomic_add(sleepers, &sem->count);
121 break;
122 }
123
124 /*
125 * Add "everybody else" into it. They aren't
126 * playing, because we own the spinlock in
127 * the wait_queue_head.
128 */
129 if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
130 sem->sleepers = 0;
131 break;
132 }
133 sem->sleepers = 1; /* us - see -1 above */
134 spin_unlock_irqrestore(&sem->wait.lock, flags);
135
136 schedule();
137
138 spin_lock_irqsave(&sem->wait.lock, flags);
139 tsk->state = TASK_INTERRUPTIBLE;
140 }
141 remove_wait_queue_locked(&sem->wait, &wait);
142 wake_up_locked(&sem->wait);
143 spin_unlock_irqrestore(&sem->wait.lock, flags);
144
145 tsk->state = TASK_RUNNING;
146 return retval;
147}
148EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
new file mode 100644
index 000000000000..5d68f3c6990b
--- /dev/null
+++ b/arch/avr32/kernel/setup.c
@@ -0,0 +1,335 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/clk.h>
10#include <linux/init.h>
11#include <linux/sched.h>
12#include <linux/console.h>
13#include <linux/ioport.h>
14#include <linux/bootmem.h>
15#include <linux/fs.h>
16#include <linux/module.h>
17#include <linux/root_dev.h>
18#include <linux/cpu.h>
19
20#include <asm/sections.h>
21#include <asm/processor.h>
22#include <asm/pgtable.h>
23#include <asm/setup.h>
24#include <asm/sysreg.h>
25
26#include <asm/arch/board.h>
27#include <asm/arch/init.h>
28
29extern int root_mountflags;
30
31/*
32 * Bootloader-provided information about physical memory
33 */
34struct tag_mem_range *mem_phys;
35struct tag_mem_range *mem_reserved;
36struct tag_mem_range *mem_ramdisk;
37
38/*
39 * Initialize loops_per_jiffy as 5000000 (500MIPS).
40 * Better make it too large than too small...
41 */
42struct avr32_cpuinfo boot_cpu_data = {
43 .loops_per_jiffy = 5000000
44};
45EXPORT_SYMBOL(boot_cpu_data);
46
47static char command_line[COMMAND_LINE_SIZE];
48
49/*
50 * Should be more than enough, but if you have a _really_ complex
51 * setup, you might need to increase the size of this...
52 */
53static struct tag_mem_range __initdata mem_range_cache[32];
54static unsigned mem_range_next_free;
55
56/*
57 * Standard memory resources
58 */
59static struct resource mem_res[] = {
60 {
61 .name = "Kernel code",
62 .start = 0,
63 .end = 0,
64 .flags = IORESOURCE_MEM
65 },
66 {
67 .name = "Kernel data",
68 .start = 0,
69 .end = 0,
70 .flags = IORESOURCE_MEM,
71 },
72};
73
74#define kernel_code mem_res[0]
75#define kernel_data mem_res[1]
76
77/*
78 * Early framebuffer allocation. Works as follows:
79 * - If fbmem_size is zero, nothing will be allocated or reserved.
80 * - If fbmem_start is zero when setup_bootmem() is called,
81 * fbmem_size bytes will be allocated from the bootmem allocator.
82 * - If fbmem_start is nonzero, an area of size fbmem_size will be
83 * reserved at the physical address fbmem_start if necessary. If
84 * the area isn't in a memory region known to the kernel, it will
85 * be left alone.
86 *
87 * Board-specific code may use these variables to set up platform data
88 * for the framebuffer driver if fbmem_size is nonzero.
89 */
90static unsigned long __initdata fbmem_start;
91static unsigned long __initdata fbmem_size;
92
93/*
94 * "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for
95 * use as framebuffer.
96 *
97 * "fbmem=xxx[kKmM]@yyy[kKmM]" defines a memory region of size xxx and
98 * starting at yyy to be reserved for use as framebuffer.
99 *
100 * The kernel won't verify that the memory region starting at yyy
101 * actually contains usable RAM.
102 */
103static int __init early_parse_fbmem(char *p)
104{
105 fbmem_size = memparse(p, &p);
106 if (*p == '@')
107 fbmem_start = memparse(p, &p);
108 return 0;
109}
110early_param("fbmem", early_parse_fbmem);
111
112static inline void __init resource_init(void)
113{
114 struct tag_mem_range *region;
115
116 kernel_code.start = __pa(init_mm.start_code);
117 kernel_code.end = __pa(init_mm.end_code - 1);
118 kernel_data.start = __pa(init_mm.end_code);
119 kernel_data.end = __pa(init_mm.brk - 1);
120
121 for (region = mem_phys; region; region = region->next) {
122 struct resource *res;
123 unsigned long phys_start, phys_end;
124
125 if (region->size == 0)
126 continue;
127
128 phys_start = region->addr;
129 phys_end = phys_start + region->size - 1;
130
131 res = alloc_bootmem_low(sizeof(*res));
132 res->name = "System RAM";
133 res->start = phys_start;
134 res->end = phys_end;
135 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
136
137 request_resource (&iomem_resource, res);
138
139 if (kernel_code.start >= res->start &&
140 kernel_code.end <= res->end)
141 request_resource (res, &kernel_code);
142 if (kernel_data.start >= res->start &&
143 kernel_data.end <= res->end)
144 request_resource (res, &kernel_data);
145 }
146}
147
148static int __init parse_tag_core(struct tag *tag)
149{
150 if (tag->hdr.size > 2) {
151 if ((tag->u.core.flags & 1) == 0)
152 root_mountflags &= ~MS_RDONLY;
153 ROOT_DEV = new_decode_dev(tag->u.core.rootdev);
154 }
155 return 0;
156}
157__tagtable(ATAG_CORE, parse_tag_core);
158
159static int __init parse_tag_mem_range(struct tag *tag,
160 struct tag_mem_range **root)
161{
162 struct tag_mem_range *cur, **pprev;
163 struct tag_mem_range *new;
164
165 /*
166 * Ignore zero-sized entries. If we're running standalone, the
167 * SDRAM code may emit such entries if something goes
168 * wrong...
169 */
170 if (tag->u.mem_range.size == 0)
171 return 0;
172
173 /*
174 * Copy the data so the bootmem init code doesn't need to care
175 * about it.
176 */
177 if (mem_range_next_free >=
178 (sizeof(mem_range_cache) / sizeof(mem_range_cache[0])))
179 panic("Physical memory map too complex!\n");
180
181 new = &mem_range_cache[mem_range_next_free++];
182 *new = tag->u.mem_range;
183
184 pprev = root;
185 cur = *root;
186 while (cur) {
187 pprev = &cur->next;
188 cur = cur->next;
189 }
190
191 *pprev = new;
192 new->next = NULL;
193
194 return 0;
195}
196
197static int __init parse_tag_mem(struct tag *tag)
198{
199 return parse_tag_mem_range(tag, &mem_phys);
200}
201__tagtable(ATAG_MEM, parse_tag_mem);
202
203static int __init parse_tag_cmdline(struct tag *tag)
204{
205 strlcpy(saved_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
206 return 0;
207}
208__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
209
210static int __init parse_tag_rdimg(struct tag *tag)
211{
212 return parse_tag_mem_range(tag, &mem_ramdisk);
213}
214__tagtable(ATAG_RDIMG, parse_tag_rdimg);
215
216static int __init parse_tag_clock(struct tag *tag)
217{
218 /*
219 * We'll figure out the clocks by peeking at the system
220 * manager regs directly.
221 */
222 return 0;
223}
224__tagtable(ATAG_CLOCK, parse_tag_clock);
225
226static int __init parse_tag_rsvd_mem(struct tag *tag)
227{
228 return parse_tag_mem_range(tag, &mem_reserved);
229}
230__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
231
232static int __init parse_tag_ethernet(struct tag *tag)
233{
234#if 0
235 const struct platform_device *pdev;
236
237 /*
238 * We really need a bus type that supports "classes"...this
239 * will do for now (until we must handle other kinds of
240 * ethernet controllers)
241 */
242 pdev = platform_get_device("macb", tag->u.ethernet.mac_index);
243 if (pdev && pdev->dev.platform_data) {
244 struct eth_platform_data *data = pdev->dev.platform_data;
245
246 data->valid = 1;
247 data->mii_phy_addr = tag->u.ethernet.mii_phy_addr;
248 memcpy(data->hw_addr, tag->u.ethernet.hw_address,
249 sizeof(data->hw_addr));
250 }
251#endif
252 return 0;
253}
254__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
255
256/*
257 * Scan the tag table for this tag, and call its parse function. The
258 * tag table is built by the linker from all the __tagtable
259 * declarations.
260 */
261static int __init parse_tag(struct tag *tag)
262{
263 extern struct tagtable __tagtable_begin, __tagtable_end;
264 struct tagtable *t;
265
266 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
267 if (tag->hdr.tag == t->tag) {
268 t->parse(tag);
269 break;
270 }
271
272 return t < &__tagtable_end;
273}
274
275/*
276 * Parse all tags in the list we got from the boot loader
277 */
278static void __init parse_tags(struct tag *t)
279{
280 for (; t->hdr.tag != ATAG_NONE; t = tag_next(t))
281 if (!parse_tag(t))
282 printk(KERN_WARNING
283 "Ignoring unrecognised tag 0x%08x\n",
284 t->hdr.tag);
285}
286
287void __init setup_arch (char **cmdline_p)
288{
289 struct clk *cpu_clk;
290
291 parse_tags(bootloader_tags);
292
293 setup_processor();
294 setup_platform();
295
296 cpu_clk = clk_get(NULL, "cpu");
297 if (IS_ERR(cpu_clk)) {
298 printk(KERN_WARNING "Warning: Unable to get CPU clock\n");
299 } else {
300 unsigned long cpu_hz = clk_get_rate(cpu_clk);
301
302 /*
303 * Well, duh, but it's probably a good idea to
304 * increment the use count.
305 */
306 clk_enable(cpu_clk);
307
308 boot_cpu_data.clk = cpu_clk;
309 boot_cpu_data.loops_per_jiffy = cpu_hz * 4;
310 printk("CPU: Running at %lu.%03lu MHz\n",
311 ((cpu_hz + 500) / 1000) / 1000,
312 ((cpu_hz + 500) / 1000) % 1000);
313 }
314
315 init_mm.start_code = (unsigned long) &_text;
316 init_mm.end_code = (unsigned long) &_etext;
317 init_mm.end_data = (unsigned long) &_edata;
318 init_mm.brk = (unsigned long) &_end;
319
320 strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
321 *cmdline_p = command_line;
322 parse_early_param();
323
324 setup_bootmem();
325
326 board_setup_fbmem(fbmem_start, fbmem_size);
327
328#ifdef CONFIG_VT
329 conswitchp = &dummy_con;
330#endif
331
332 paging_init();
333
334 resource_init();
335}
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
new file mode 100644
index 000000000000..33096651c24f
--- /dev/null
+++ b/arch/avr32/kernel/signal.c
@@ -0,0 +1,328 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * Based on linux/arch/sh/kernel/signal.c
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/unistd.h>
18#include <linux/suspend.h>
19
20#include <asm/uaccess.h>
21#include <asm/ucontext.h>
22
23#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
24
25asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
26 struct pt_regs *regs)
27{
28 return do_sigaltstack(uss, uoss, regs->sp);
29}
30
31struct rt_sigframe
32{
33 struct siginfo info;
34 struct ucontext uc;
35 unsigned long retcode;
36};
37
38static int
39restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
40{
41 int err = 0;
42
43#define COPY(x) err |= __get_user(regs->x, &sc->x)
44 COPY(sr);
45 COPY(pc);
46 COPY(lr);
47 COPY(sp);
48 COPY(r12);
49 COPY(r11);
50 COPY(r10);
51 COPY(r9);
52 COPY(r8);
53 COPY(r7);
54 COPY(r6);
55 COPY(r5);
56 COPY(r4);
57 COPY(r3);
58 COPY(r2);
59 COPY(r1);
60 COPY(r0);
61#undef COPY
62
63 /*
64 * Don't allow anyone to pretend they're running in supervisor
65 * mode or something...
66 */
67 err |= !valid_user_regs(regs);
68
69 return err;
70}
71
72
73asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
74{
75 struct rt_sigframe __user *frame;
76 sigset_t set;
77
78 frame = (struct rt_sigframe __user *)regs->sp;
79 pr_debug("SIG return: frame = %p\n", frame);
80
81 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
82 goto badframe;
83
84 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
85 goto badframe;
86
87 sigdelsetmask(&set, ~_BLOCKABLE);
88 spin_lock_irq(&current->sighand->siglock);
89 current->blocked = set;
90 recalc_sigpending();
91 spin_unlock_irq(&current->sighand->siglock);
92
93 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
94 goto badframe;
95
96 pr_debug("Context restored: pc = %08lx, lr = %08lx, sp = %08lx\n",
97 regs->pc, regs->lr, regs->sp);
98
99 return regs->r12;
100
101badframe:
102 force_sig(SIGSEGV, current);
103 return 0;
104}
105
106static int
107setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
108{
109 int err = 0;
110
111#define COPY(x) err |= __put_user(regs->x, &sc->x)
112 COPY(sr);
113 COPY(pc);
114 COPY(lr);
115 COPY(sp);
116 COPY(r12);
117 COPY(r11);
118 COPY(r10);
119 COPY(r9);
120 COPY(r8);
121 COPY(r7);
122 COPY(r6);
123 COPY(r5);
124 COPY(r4);
125 COPY(r3);
126 COPY(r2);
127 COPY(r1);
128 COPY(r0);
129#undef COPY
130
131 return err;
132}
133
134static inline void __user *
135get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
136{
137 unsigned long sp = regs->sp;
138
139 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
140 sp = current->sas_ss_sp + current->sas_ss_size;
141
142 return (void __user *)((sp - framesize) & ~3);
143}
144
145static int
146setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
147 sigset_t *set, struct pt_regs *regs)
148{
149 struct rt_sigframe __user *frame;
150 int err = 0;
151
152 frame = get_sigframe(ka, regs, sizeof(*frame));
153 err = -EFAULT;
154 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
155 goto out;
156
157 /*
158 * Set up the return code:
159 *
160 * mov r8, __NR_rt_sigreturn
161 * scall
162 *
163 * Note: This will blow up since we're using a non-executable
164 * stack. Better use SA_RESTORER.
165 */
166#if __NR_rt_sigreturn > 127
167# error __NR_rt_sigreturn must be < 127 to fit in a short mov
168#endif
169 err = __put_user(0x3008d733 | (__NR_rt_sigreturn << 20),
170 &frame->retcode);
171
172 err |= copy_siginfo_to_user(&frame->info, info);
173
174 /* Set up the ucontext */
175 err |= __put_user(0, &frame->uc.uc_flags);
176 err |= __put_user(NULL, &frame->uc.uc_link);
177 err |= __put_user((void __user *)current->sas_ss_sp,
178 &frame->uc.uc_stack.ss_sp);
179 err |= __put_user(sas_ss_flags(regs->sp),
180 &frame->uc.uc_stack.ss_flags);
181 err |= __put_user(current->sas_ss_size,
182 &frame->uc.uc_stack.ss_size);
183 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
184 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
185
186 if (err)
187 goto out;
188
189 regs->r12 = sig;
190 regs->r11 = (unsigned long) &frame->info;
191 regs->r10 = (unsigned long) &frame->uc;
192 regs->sp = (unsigned long) frame;
193 if (ka->sa.sa_flags & SA_RESTORER)
194 regs->lr = (unsigned long)ka->sa.sa_restorer;
195 else {
196 printk(KERN_NOTICE "[%s:%d] did not set SA_RESTORER\n",
197 current->comm, current->pid);
198 regs->lr = (unsigned long) &frame->retcode;
199 }
200
201 pr_debug("SIG deliver [%s:%d]: sig=%d sp=0x%lx pc=0x%lx->0x%p lr=0x%lx\n",
202 current->comm, current->pid, sig, regs->sp,
203 regs->pc, ka->sa.sa_handler, regs->lr);
204
205 regs->pc = (unsigned long) ka->sa.sa_handler;
206
207out:
208 return err;
209}
210
211static inline void restart_syscall(struct pt_regs *regs)
212{
213 if (regs->r12 == -ERESTART_RESTARTBLOCK)
214 regs->r8 = __NR_restart_syscall;
215 else
216 regs->r12 = regs->r12_orig;
217 regs->pc -= 2;
218}
219
220static inline void
221handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
222 sigset_t *oldset, struct pt_regs *regs, int syscall)
223{
224 int ret;
225
226 /*
227 * Set up the stack frame
228 */
229 ret = setup_rt_frame(sig, ka, info, oldset, regs);
230
231 /*
232 * Check that the resulting registers are sane
233 */
234 ret |= !valid_user_regs(regs);
235
236 /*
237 * Block the signal if we were unsuccessful.
238 */
239 if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) {
240 spin_lock_irq(&current->sighand->siglock);
241 sigorsets(&current->blocked, &current->blocked,
242 &ka->sa.sa_mask);
243 sigaddset(&current->blocked, sig);
244 recalc_sigpending();
245 spin_unlock_irq(&current->sighand->siglock);
246 }
247
248 if (ret == 0)
249 return;
250
251 force_sigsegv(sig, current);
252}
253
254/*
255 * Note that 'init' is a special process: it doesn't get signals it
256 * doesn't want to handle. Thus you cannot kill init even with a
257 * SIGKILL even by mistake.
258 */
259int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
260{
261 siginfo_t info;
262 int signr;
263 struct k_sigaction ka;
264
265 /*
266 * We want the common case to go fast, which is why we may in
267 * certain cases get here from kernel mode. Just return
268 * without doing anything if so.
269 */
270 if (!user_mode(regs))
271 return 0;
272
273 if (try_to_freeze()) {
274 signr = 0;
275 if (!signal_pending(current))
276 goto no_signal;
277 }
278
279 if (test_thread_flag(TIF_RESTORE_SIGMASK))
280 oldset = &current->saved_sigmask;
281 else if (!oldset)
282 oldset = &current->blocked;
283
284 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
285no_signal:
286 if (syscall) {
287 switch (regs->r12) {
288 case -ERESTART_RESTARTBLOCK:
289 case -ERESTARTNOHAND:
290 if (signr > 0) {
291 regs->r12 = -EINTR;
292 break;
293 }
294 /* fall through */
295 case -ERESTARTSYS:
296 if (signr > 0 && !(ka.sa.sa_flags & SA_RESTART)) {
297 regs->r12 = -EINTR;
298 break;
299 }
300 /* fall through */
301 case -ERESTARTNOINTR:
302 restart_syscall(regs);
303 }
304 }
305
306 if (signr == 0) {
307 /* No signal to deliver -- put the saved sigmask back */
308 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
309 clear_thread_flag(TIF_RESTORE_SIGMASK);
310 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
311 }
312 return 0;
313 }
314
315 handle_signal(signr, &ka, &info, oldset, regs, syscall);
316 return 1;
317}
318
319asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
320{
321 int syscall = 0;
322
323 if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
324 syscall = 1;
325
326 if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
327 do_signal(regs, &current->blocked, syscall);
328}
diff --git a/arch/avr32/kernel/switch_to.S b/arch/avr32/kernel/switch_to.S
new file mode 100644
index 000000000000..a48d046723c5
--- /dev/null
+++ b/arch/avr32/kernel/switch_to.S
@@ -0,0 +1,35 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <asm/sysreg.h>
10
11 .text
12 .global __switch_to
13 .type __switch_to, @function
14
15 /* Switch thread context from "prev" to "next", returning "last"
16 * r12 : prev
17 * r11 : &prev->thread + 1
18 * r10 : &next->thread
19 */
20__switch_to:
21 stm --r11, r0,r1,r2,r3,r4,r5,r6,r7,sp,lr
22 mfsr r9, SYSREG_SR
23 st.w --r11, r9
24 ld.w r8, r10++
25 /*
26 * schedule() may have been called from a mode with a different
27 * set of registers. Make sure we don't lose anything here.
28 */
29 pushm r10,r12
30 mtsr SYSREG_SR, r8
31 frs /* flush the return stack */
32 sub pc, -2 /* flush the pipeline */
33 popm r10,r12
34 ldm r10++, r0,r1,r2,r3,r4,r5,r6,r7,sp,pc
35 .size __switch_to, . - __switch_to
diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c
new file mode 100644
index 000000000000..6ec5693da448
--- /dev/null
+++ b/arch/avr32/kernel/sys_avr32.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/errno.h>
9#include <linux/fs.h>
10#include <linux/file.h>
11#include <linux/mm.h>
12#include <linux/unistd.h>
13
14#include <asm/mman.h>
15#include <asm/uaccess.h>
16
17asmlinkage int sys_pipe(unsigned long __user *filedes)
18{
19 int fd[2];
20 int error;
21
22 error = do_pipe(fd);
23 if (!error) {
24 if (copy_to_user(filedes, fd, sizeof(fd)))
25 error = -EFAULT;
26 }
27 return error;
28}
29
30asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
31 unsigned long prot, unsigned long flags,
32 unsigned long fd, off_t offset)
33{
34 int error = -EBADF;
35 struct file *file = NULL;
36
37 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
38 if (!(flags & MAP_ANONYMOUS)) {
39 file = fget(fd);
40 if (!file)
41 return error;
42 }
43
44 down_write(&current->mm->mmap_sem);
45 error = do_mmap_pgoff(file, addr, len, prot, flags, offset);
46 up_write(&current->mm->mmap_sem);
47
48 if (file)
49 fput(file);
50 return error;
51}
diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S
new file mode 100644
index 000000000000..7589a9b426cb
--- /dev/null
+++ b/arch/avr32/kernel/syscall-stubs.S
@@ -0,0 +1,102 @@
1/*
2 * Copyright (C) 2005-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * Stubs for syscalls that require access to pt_regs or that take more
11 * than five parameters.
12 */
13
14#define ARG6 r3
15
16 .text
17 .global __sys_rt_sigsuspend
18 .type __sys_rt_sigsuspend,@function
19__sys_rt_sigsuspend:
20 mov r10, sp
21 rjmp sys_rt_sigsuspend
22
23 .global __sys_sigaltstack
24 .type __sys_sigaltstack,@function
25__sys_sigaltstack:
26 mov r10, sp
27 rjmp sys_sigaltstack
28
29 .global __sys_rt_sigreturn
30 .type __sys_rt_sigreturn,@function
31__sys_rt_sigreturn:
32 mov r12, sp
33 rjmp sys_rt_sigreturn
34
35 .global __sys_fork
36 .type __sys_fork,@function
37__sys_fork:
38 mov r12, sp
39 rjmp sys_fork
40
41 .global __sys_clone
42 .type __sys_clone,@function
43__sys_clone:
44 mov r8, sp
45 rjmp sys_clone
46
47 .global __sys_vfork
48 .type __sys_vfork,@function
49__sys_vfork:
50 mov r12, sp
51 rjmp sys_vfork
52
53 .global __sys_execve
54 .type __sys_execve,@function
55__sys_execve:
56 mov r9, sp
57 rjmp sys_execve
58
59 .global __sys_mmap2
60 .type __sys_mmap2,@function
61__sys_mmap2:
62 pushm lr
63 st.w --sp, ARG6
64 rcall sys_mmap2
65 sub sp, -4
66 popm pc
67
68 .global __sys_sendto
69 .type __sys_sendto,@function
70__sys_sendto:
71 pushm lr
72 st.w --sp, ARG6
73 rcall sys_sendto
74 sub sp, -4
75 popm pc
76
77 .global __sys_recvfrom
78 .type __sys_recvfrom,@function
79__sys_recvfrom:
80 pushm lr
81 st.w --sp, ARG6
82 rcall sys_recvfrom
83 sub sp, -4
84 popm pc
85
86 .global __sys_pselect6
87 .type __sys_pselect6,@function
88__sys_pselect6:
89 pushm lr
90 st.w --sp, ARG6
91 rcall sys_pselect6
92 sub sp, -4
93 popm pc
94
95 .global __sys_splice
96 .type __sys_splice,@function
97__sys_splice:
98 pushm lr
99 st.w --sp, ARG6
100 rcall sys_splice
101 sub sp, -4
102 popm pc
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
new file mode 100644
index 000000000000..63b206965d05
--- /dev/null
+++ b/arch/avr32/kernel/syscall_table.S
@@ -0,0 +1,289 @@
1/*
2 * AVR32 system call table
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#if !defined(CONFIG_NFSD) && !defined(CONFIG_NFSD_MODULE)
12#define sys_nfsservctl sys_ni_syscall
13#endif
14
15#if !defined(CONFIG_SYSV_IPC)
16# define sys_ipc sys_ni_syscall
17#endif
18
19 .section .rodata,"a",@progbits
20 .type sys_call_table,@object
21 .global sys_call_table
22 .align 2
23sys_call_table:
24 .long sys_restart_syscall
25 .long sys_exit
26 .long __sys_fork
27 .long sys_read
28 .long sys_write
29 .long sys_open /* 5 */
30 .long sys_close
31 .long sys_umask
32 .long sys_creat
33 .long sys_link
34 .long sys_unlink /* 10 */
35 .long __sys_execve
36 .long sys_chdir
37 .long sys_time
38 .long sys_mknod
39 .long sys_chmod /* 15 */
40 .long sys_chown
41 .long sys_lchown
42 .long sys_lseek
43 .long sys_llseek
44 .long sys_getpid /* 20 */
45 .long sys_mount
46 .long sys_umount
47 .long sys_setuid
48 .long sys_getuid
49 .long sys_stime /* 25 */
50 .long sys_ptrace
51 .long sys_alarm
52 .long sys_pause
53 .long sys_utime
54 .long sys_newstat /* 30 */
55 .long sys_newfstat
56 .long sys_newlstat
57 .long sys_access
58 .long sys_chroot
59 .long sys_sync /* 35 */
60 .long sys_fsync
61 .long sys_kill
62 .long sys_rename
63 .long sys_mkdir
64 .long sys_rmdir /* 40 */
65 .long sys_dup
66 .long sys_pipe
67 .long sys_times
68 .long __sys_clone
69 .long sys_brk /* 45 */
70 .long sys_setgid
71 .long sys_getgid
72 .long sys_getcwd
73 .long sys_geteuid
74 .long sys_getegid /* 50 */
75 .long sys_acct
76 .long sys_setfsuid
77 .long sys_setfsgid
78 .long sys_ioctl
79 .long sys_fcntl /* 55 */
80 .long sys_setpgid
81 .long sys_mremap
82 .long sys_setresuid
83 .long sys_getresuid
84 .long sys_setreuid /* 60 */
85 .long sys_setregid
86 .long sys_ustat
87 .long sys_dup2
88 .long sys_getppid
89 .long sys_getpgrp /* 65 */
90 .long sys_setsid
91 .long sys_rt_sigaction
92 .long __sys_rt_sigreturn
93 .long sys_rt_sigprocmask
94 .long sys_rt_sigpending /* 70 */
95 .long sys_rt_sigtimedwait
96 .long sys_rt_sigqueueinfo
97 .long __sys_rt_sigsuspend
98 .long sys_sethostname
99 .long sys_setrlimit /* 75 */
100 .long sys_getrlimit
101 .long sys_getrusage
102 .long sys_gettimeofday
103 .long sys_settimeofday
104 .long sys_getgroups /* 80 */
105 .long sys_setgroups
106 .long sys_select
107 .long sys_symlink
108 .long sys_fchdir
109 .long sys_readlink /* 85 */
110 .long sys_pread64
111 .long sys_pwrite64
112 .long sys_swapon
113 .long sys_reboot
114 .long __sys_mmap2 /* 90 */
115 .long sys_munmap
116 .long sys_truncate
117 .long sys_ftruncate
118 .long sys_fchmod
119 .long sys_fchown /* 95 */
120 .long sys_getpriority
121 .long sys_setpriority
122 .long sys_wait4
123 .long sys_statfs
124 .long sys_fstatfs /* 100 */
125 .long sys_vhangup
126 .long __sys_sigaltstack
127 .long sys_syslog
128 .long sys_setitimer
129 .long sys_getitimer /* 105 */
130 .long sys_swapoff
131 .long sys_sysinfo
132 .long sys_ipc
133 .long sys_sendfile
134 .long sys_setdomainname /* 110 */
135 .long sys_newuname
136 .long sys_adjtimex
137 .long sys_mprotect
138 .long __sys_vfork
139 .long sys_init_module /* 115 */
140 .long sys_delete_module
141 .long sys_quotactl
142 .long sys_getpgid
143 .long sys_bdflush
144 .long sys_sysfs /* 120 */
145 .long sys_personality
146 .long sys_ni_syscall /* reserved for afs_syscall */
147 .long sys_getdents
148 .long sys_flock
149 .long sys_msync /* 125 */
150 .long sys_readv
151 .long sys_writev
152 .long sys_getsid
153 .long sys_fdatasync
154 .long sys_sysctl /* 130 */
155 .long sys_mlock
156 .long sys_munlock
157 .long sys_mlockall
158 .long sys_munlockall
159 .long sys_sched_setparam /* 135 */
160 .long sys_sched_getparam
161 .long sys_sched_setscheduler
162 .long sys_sched_getscheduler
163 .long sys_sched_yield
164 .long sys_sched_get_priority_max /* 140 */
165 .long sys_sched_get_priority_min
166 .long sys_sched_rr_get_interval
167 .long sys_nanosleep
168 .long sys_poll
169 .long sys_nfsservctl /* 145 */
170 .long sys_setresgid
171 .long sys_getresgid
172 .long sys_prctl
173 .long sys_socket
174 .long sys_bind /* 150 */
175 .long sys_connect
176 .long sys_listen
177 .long sys_accept
178 .long sys_getsockname
179 .long sys_getpeername /* 155 */
180 .long sys_socketpair
181 .long sys_send
182 .long sys_recv
183 .long __sys_sendto
184 .long __sys_recvfrom /* 160 */
185 .long sys_shutdown
186 .long sys_setsockopt
187 .long sys_getsockopt
188 .long sys_sendmsg
189 .long sys_recvmsg /* 165 */
190 .long sys_truncate64
191 .long sys_ftruncate64
192 .long sys_stat64
193 .long sys_lstat64
194 .long sys_fstat64 /* 170 */
195 .long sys_pivot_root
196 .long sys_mincore
197 .long sys_madvise
198 .long sys_getdents64
199 .long sys_fcntl64 /* 175 */
200 .long sys_gettid
201 .long sys_readahead
202 .long sys_setxattr
203 .long sys_lsetxattr
204 .long sys_fsetxattr /* 180 */
205 .long sys_getxattr
206 .long sys_lgetxattr
207 .long sys_fgetxattr
208 .long sys_listxattr
209 .long sys_llistxattr /* 185 */
210 .long sys_flistxattr
211 .long sys_removexattr
212 .long sys_lremovexattr
213 .long sys_fremovexattr
214 .long sys_tkill /* 190 */
215 .long sys_sendfile64
216 .long sys_futex
217 .long sys_sched_setaffinity
218 .long sys_sched_getaffinity
219 .long sys_capget /* 195 */
220 .long sys_capset
221 .long sys_io_setup
222 .long sys_io_destroy
223 .long sys_io_getevents
224 .long sys_io_submit /* 200 */
225 .long sys_io_cancel
226 .long sys_fadvise64
227 .long sys_exit_group
228 .long sys_lookup_dcookie
229 .long sys_epoll_create /* 205 */
230 .long sys_epoll_ctl
231 .long sys_epoll_wait
232 .long sys_remap_file_pages
233 .long sys_set_tid_address
234 .long sys_timer_create /* 210 */
235 .long sys_timer_settime
236 .long sys_timer_gettime
237 .long sys_timer_getoverrun
238 .long sys_timer_delete
239 .long sys_clock_settime /* 215 */
240 .long sys_clock_gettime
241 .long sys_clock_getres
242 .long sys_clock_nanosleep
243 .long sys_statfs64
244 .long sys_fstatfs64 /* 220 */
245 .long sys_tgkill
246 .long sys_ni_syscall /* reserved for TUX */
247 .long sys_utimes
248 .long sys_fadvise64_64
249 .long sys_cacheflush /* 225 */
250 .long sys_ni_syscall /* sys_vserver */
251 .long sys_mq_open
252 .long sys_mq_unlink
253 .long sys_mq_timedsend
254 .long sys_mq_timedreceive /* 230 */
255 .long sys_mq_notify
256 .long sys_mq_getsetattr
257 .long sys_kexec_load
258 .long sys_waitid
259 .long sys_add_key /* 235 */
260 .long sys_request_key
261 .long sys_keyctl
262 .long sys_ioprio_set
263 .long sys_ioprio_get
264 .long sys_inotify_init /* 240 */
265 .long sys_inotify_add_watch
266 .long sys_inotify_rm_watch
267 .long sys_openat
268 .long sys_mkdirat
269 .long sys_mknodat /* 245 */
270 .long sys_fchownat
271 .long sys_futimesat
272 .long sys_fstatat64
273 .long sys_unlinkat
274 .long sys_renameat /* 250 */
275 .long sys_linkat
276 .long sys_symlinkat
277 .long sys_readlinkat
278 .long sys_fchmodat
279 .long sys_faccessat /* 255 */
280 .long __sys_pselect6
281 .long sys_ppoll
282 .long sys_unshare
283 .long sys_set_robust_list
284 .long sys_get_robust_list /* 260 */
285 .long __sys_splice
286 .long sys_sync_file_range
287 .long sys_tee
288 .long sys_vmsplice
289 .long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
new file mode 100644
index 000000000000..b0e6b5855a38
--- /dev/null
+++ b/arch/avr32/kernel/time.c
@@ -0,0 +1,238 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * Based on MIPS implementation arch/mips/kernel/time.c
5 * Copyright 2001 MontaVista Software Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/clk.h>
13#include <linux/clocksource.h>
14#include <linux/time.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/kernel_stat.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/profile.h>
22#include <linux/sysdev.h>
23
24#include <asm/div64.h>
25#include <asm/sysreg.h>
26#include <asm/io.h>
27#include <asm/sections.h>
28
29static cycle_t read_cycle_count(void)
30{
31 return (cycle_t)sysreg_read(COUNT);
32}
33
34static struct clocksource clocksource_avr32 = {
35 .name = "avr32",
36 .rating = 350,
37 .read = read_cycle_count,
38 .mask = CLOCKSOURCE_MASK(32),
39 .shift = 16,
40 .is_continuous = 1,
41};
42
43/*
44 * By default we provide the null RTC ops
45 */
46static unsigned long null_rtc_get_time(void)
47{
48 return mktime(2004, 1, 1, 0, 0, 0);
49}
50
51static int null_rtc_set_time(unsigned long sec)
52{
53 return 0;
54}
55
56static unsigned long (*rtc_get_time)(void) = null_rtc_get_time;
57static int (*rtc_set_time)(unsigned long) = null_rtc_set_time;
58
59/* how many counter cycles in a jiffy? */
60static unsigned long cycles_per_jiffy;
61
62/* cycle counter value at the previous timer interrupt */
63static unsigned int timerhi, timerlo;
64
65/* the count value for the next timer interrupt */
66static unsigned int expirelo;
67
68static void avr32_timer_ack(void)
69{
70 unsigned int count;
71
72 /* Ack this timer interrupt and set the next one */
73 expirelo += cycles_per_jiffy;
74 if (expirelo == 0) {
75 printk(KERN_DEBUG "expirelo == 0\n");
76 sysreg_write(COMPARE, expirelo + 1);
77 } else {
78 sysreg_write(COMPARE, expirelo);
79 }
80
81 /* Check to see if we have missed any timer interrupts */
82 count = sysreg_read(COUNT);
83 if ((count - expirelo) < 0x7fffffff) {
84 expirelo = count + cycles_per_jiffy;
85 sysreg_write(COMPARE, expirelo);
86 }
87}
88
89static unsigned int avr32_hpt_read(void)
90{
91 return sysreg_read(COUNT);
92}
93
94/*
95 * Taken from MIPS c0_hpt_timer_init().
96 *
97 * Why is it so complicated, and what is "count"? My assumption is
98 * that `count' specifies the "reference cycle", i.e. the cycle since
99 * reset that should mean "zero". The reason COUNT is written twice is
100 * probably to make sure we don't get any timer interrupts while we
101 * are messing with the counter.
102 */
103static void avr32_hpt_init(unsigned int count)
104{
105 count = sysreg_read(COUNT) - count;
106 expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
107 sysreg_write(COUNT, expirelo - cycles_per_jiffy);
108 sysreg_write(COMPARE, expirelo);
109 sysreg_write(COUNT, count);
110}
111
112/*
113 * Scheduler clock - returns current time in nanosec units.
114 */
115unsigned long long sched_clock(void)
116{
117 /* There must be better ways...? */
118 return (unsigned long long)jiffies * (1000000000 / HZ);
119}
120
121/*
122 * local_timer_interrupt() does profiling and process accounting on a
123 * per-CPU basis.
124 *
125 * In UP mode, it is invoked from the (global) timer_interrupt.
126 */
127static void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
128{
129 if (current->pid)
130 profile_tick(CPU_PROFILING, regs);
131 update_process_times(user_mode(regs));
132}
133
134static irqreturn_t
135timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
136{
137 unsigned int count;
138
139 /* ack timer interrupt and try to set next interrupt */
140 count = avr32_hpt_read();
141 avr32_timer_ack();
142
143 /* Update timerhi/timerlo for intra-jiffy calibration */
144 timerhi += count < timerlo; /* Wrap around */
145 timerlo = count;
146
147 /*
148 * Call the generic timer interrupt handler
149 */
150 write_seqlock(&xtime_lock);
151 do_timer(regs);
152 write_sequnlock(&xtime_lock);
153
154 /*
155 * In UP mode, we call local_timer_interrupt() to do profiling
156 * and process accounting.
157 *
158 * SMP is not supported yet.
159 */
160 local_timer_interrupt(irq, dev_id, regs);
161
162 return IRQ_HANDLED;
163}
164
165static struct irqaction timer_irqaction = {
166 .handler = timer_interrupt,
167 .flags = IRQF_DISABLED,
168 .name = "timer",
169};
170
171void __init time_init(void)
172{
173 unsigned long mult, shift, count_hz;
174 int ret;
175
176 xtime.tv_sec = rtc_get_time();
177 xtime.tv_nsec = 0;
178
179 set_normalized_timespec(&wall_to_monotonic,
180 -xtime.tv_sec, -xtime.tv_nsec);
181
182 printk("Before time_init: count=%08lx, compare=%08lx\n",
183 (unsigned long)sysreg_read(COUNT),
184 (unsigned long)sysreg_read(COMPARE));
185
186 count_hz = clk_get_rate(boot_cpu_data.clk);
187 shift = clocksource_avr32.shift;
188 mult = clocksource_hz2mult(count_hz, shift);
189 clocksource_avr32.mult = mult;
190
191 printk("Cycle counter: mult=%lu, shift=%lu\n", mult, shift);
192
193 {
194 u64 tmp;
195
196 tmp = TICK_NSEC;
197 tmp <<= shift;
198 tmp += mult / 2;
199 do_div(tmp, mult);
200
201 cycles_per_jiffy = tmp;
202 }
203
204 /* This sets up the high precision timer for the first interrupt. */
205 avr32_hpt_init(avr32_hpt_read());
206
207 printk("After time_init: count=%08lx, compare=%08lx\n",
208 (unsigned long)sysreg_read(COUNT),
209 (unsigned long)sysreg_read(COMPARE));
210
211 ret = clocksource_register(&clocksource_avr32);
212 if (ret)
213 printk(KERN_ERR
214 "timer: could not register clocksource: %d\n", ret);
215
216 ret = setup_irq(0, &timer_irqaction);
217 if (ret)
218 printk("timer: could not request IRQ 0: %d\n", ret);
219}
220
221static struct sysdev_class timer_class = {
222 set_kset_name("timer"),
223};
224
225static struct sys_device timer_device = {
226 .id = 0,
227 .cls = &timer_class,
228};
229
230static int __init init_timer_sysfs(void)
231{
232 int err = sysdev_class_register(&timer_class);
233 if (!err)
234 err = sysdev_register(&timer_device);
235 return err;
236}
237
238device_initcall(init_timer_sysfs);
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
new file mode 100644
index 000000000000..7e803f4d7a12
--- /dev/null
+++ b/arch/avr32/kernel/traps.c
@@ -0,0 +1,425 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#undef DEBUG
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/kallsyms.h>
13#include <linux/notifier.h>
14
15#include <asm/traps.h>
16#include <asm/sysreg.h>
17#include <asm/addrspace.h>
18#include <asm/ocd.h>
19#include <asm/mmu_context.h>
20#include <asm/uaccess.h>
21
22static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
23{
24 unsigned long p;
25 int i;
26
27 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
28
29 for (p = bottom & ~31; p < top; ) {
30 printk("%04lx: ", p & 0xffff);
31
32 for (i = 0; i < 8; i++, p += 4) {
33 unsigned int val;
34
35 if (p < bottom || p >= top)
36 printk(" ");
37 else {
38 if (__get_user(val, (unsigned int __user *)p)) {
39 printk("\n");
40 goto out;
41 }
42 printk("%08x ", val);
43 }
44 }
45 printk("\n");
46 }
47
48out:
49 return;
50}
51
52#ifdef CONFIG_FRAME_POINTER
53static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
54 struct pt_regs *regs)
55{
56 unsigned long __user *fp;
57 unsigned long __user *last_fp = NULL;
58
59 if (regs) {
60 fp = (unsigned long __user *)regs->r7;
61 } else if (tsk == current) {
62 register unsigned long __user *real_fp __asm__("r7");
63 fp = real_fp;
64 } else {
65 fp = (unsigned long __user *)tsk->thread.cpu_context.r7;
66 }
67
68 /*
69 * Walk the stack until (a) we get an exception, (b) the frame
70 * pointer becomes zero, or (c) the frame pointer gets stuck
71 * at the same value.
72 */
73 while (fp && fp != last_fp) {
74 unsigned long lr, new_fp = 0;
75
76 last_fp = fp;
77 if (__get_user(lr, fp))
78 break;
79 if (fp && __get_user(new_fp, fp + 1))
80 break;
81 fp = (unsigned long __user *)new_fp;
82
83 printk(" [<%08lx>] ", lr);
84 print_symbol("%s\n", lr);
85 }
86 printk("\n");
87}
88#else
89static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
90 struct pt_regs *regs)
91{
92 unsigned long addr;
93
94 while (!kstack_end(sp)) {
95 addr = *sp++;
96 if (kernel_text_address(addr)) {
97 printk(" [<%08lx>] ", addr);
98 print_symbol("%s\n", addr);
99 }
100 }
101}
102#endif
103
104void show_trace(struct task_struct *tsk, unsigned long *sp,
105 struct pt_regs *regs)
106{
107 if (regs &&
108 (((regs->sr & MODE_MASK) == MODE_EXCEPTION) ||
109 ((regs->sr & MODE_MASK) == MODE_USER)))
110 return;
111
112 printk ("Call trace:");
113#ifdef CONFIG_KALLSYMS
114 printk("\n");
115#endif
116
117 __show_trace(tsk, sp, regs);
118 printk("\n");
119}
120
121void show_stack(struct task_struct *tsk, unsigned long *sp)
122{
123 unsigned long stack;
124
125 if (!tsk)
126 tsk = current;
127 if (sp == 0) {
128 if (tsk == current) {
129 register unsigned long *real_sp __asm__("sp");
130 sp = real_sp;
131 } else {
132 sp = (unsigned long *)tsk->thread.cpu_context.ksp;
133 }
134 }
135
136 stack = (unsigned long)sp;
137 dump_mem("Stack: ", stack,
138 THREAD_SIZE + (unsigned long)tsk->thread_info);
139 show_trace(tsk, sp, NULL);
140}
141
142void dump_stack(void)
143{
144 show_stack(NULL, NULL);
145}
146EXPORT_SYMBOL(dump_stack);
147
148ATOMIC_NOTIFIER_HEAD(avr32_die_chain);
149
150int register_die_notifier(struct notifier_block *nb)
151{
152 pr_debug("register_die_notifier: %p\n", nb);
153
154 return atomic_notifier_chain_register(&avr32_die_chain, nb);
155}
156EXPORT_SYMBOL(register_die_notifier);
157
158int unregister_die_notifier(struct notifier_block *nb)
159{
160 return atomic_notifier_chain_unregister(&avr32_die_chain, nb);
161}
162EXPORT_SYMBOL(unregister_die_notifier);
163
164static DEFINE_SPINLOCK(die_lock);
165
166void __die(const char *str, struct pt_regs *regs, unsigned long err,
167 const char *file, const char *func, unsigned long line)
168{
169 struct task_struct *tsk = current;
170 static int die_counter;
171
172 console_verbose();
173 spin_lock_irq(&die_lock);
174 bust_spinlocks(1);
175
176 printk(KERN_ALERT "%s", str);
177 if (file && func)
178 printk(" in %s:%s, line %ld", file, func, line);
179 printk("[#%d]:\n", ++die_counter);
180 print_modules();
181 show_regs(regs);
182 printk("Process %s (pid: %d, stack limit = 0x%p)\n",
183 tsk->comm, tsk->pid, tsk->thread_info + 1);
184
185 if (!user_mode(regs) || in_interrupt()) {
186 dump_mem("Stack: ", regs->sp,
187 THREAD_SIZE + (unsigned long)tsk->thread_info);
188 }
189
190 bust_spinlocks(0);
191 spin_unlock_irq(&die_lock);
192 do_exit(SIGSEGV);
193}
194
195void __die_if_kernel(const char *str, struct pt_regs *regs, unsigned long err,
196 const char *file, const char *func, unsigned long line)
197{
198 if (!user_mode(regs))
199 __die(str, regs, err, file, func, line);
200}
201
202asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
203{
204#ifdef CONFIG_SUBARCH_AVR32B
205 /*
206 * The exception entry always saves RSR_EX. For NMI, this is
207 * wrong; it should be RSR_NMI
208 */
209 regs->sr = sysreg_read(RSR_NMI);
210#endif
211
212 printk("NMI taken!!!!\n");
213 die("NMI", regs, ecr);
214 BUG();
215}
216
217asmlinkage void do_critical_exception(unsigned long ecr, struct pt_regs *regs)
218{
219 printk("Unable to handle critical exception %lu at pc = %08lx!\n",
220 ecr, regs->pc);
221 die("Oops", regs, ecr);
222 BUG();
223}
224
225asmlinkage void do_address_exception(unsigned long ecr, struct pt_regs *regs)
226{
227 siginfo_t info;
228
229 die_if_kernel("Oops: Address exception in kernel mode", regs, ecr);
230
231#ifdef DEBUG
232 if (ecr == ECR_ADDR_ALIGN_X)
233 pr_debug("Instruction Address Exception at pc = %08lx\n",
234 regs->pc);
235 else if (ecr == ECR_ADDR_ALIGN_R)
236 pr_debug("Data Address Exception (Read) at pc = %08lx\n",
237 regs->pc);
238 else if (ecr == ECR_ADDR_ALIGN_W)
239 pr_debug("Data Address Exception (Write) at pc = %08lx\n",
240 regs->pc);
241 else
242 BUG();
243
244 show_regs(regs);
245#endif
246
247 info.si_signo = SIGBUS;
248 info.si_errno = 0;
249 info.si_code = BUS_ADRALN;
250 info.si_addr = (void __user *)regs->pc;
251
252 force_sig_info(SIGBUS, &info, current);
253}
254
255/* This way of handling undefined instructions is stolen from ARM */
256static LIST_HEAD(undef_hook);
257static spinlock_t undef_lock = SPIN_LOCK_UNLOCKED;
258
259void register_undef_hook(struct undef_hook *hook)
260{
261 spin_lock_irq(&undef_lock);
262 list_add(&hook->node, &undef_hook);
263 spin_unlock_irq(&undef_lock);
264}
265
266void unregister_undef_hook(struct undef_hook *hook)
267{
268 spin_lock_irq(&undef_lock);
269 list_del(&hook->node);
270 spin_unlock_irq(&undef_lock);
271}
272
273static int do_cop_absent(u32 insn)
274{
275 int cop_nr;
276 u32 cpucr;
277 if ( (insn & 0xfdf00000) == 0xf1900000 )
278 /* LDC0 */
279 cop_nr = 0;
280 else
281 cop_nr = (insn >> 13) & 0x7;
282
283 /* Try enabling the coprocessor */
284 cpucr = sysreg_read(CPUCR);
285 cpucr |= (1 << (24 + cop_nr));
286 sysreg_write(CPUCR, cpucr);
287
288 cpucr = sysreg_read(CPUCR);
289 if ( !(cpucr & (1 << (24 + cop_nr))) ){
290 printk("Coprocessor #%i not found!\n", cop_nr);
291 return -1;
292 }
293
294 return 0;
295}
296
297#ifdef CONFIG_BUG
298#ifdef CONFIG_DEBUG_BUGVERBOSE
299static inline void do_bug_verbose(struct pt_regs *regs, u32 insn)
300{
301 char *file;
302 u16 line;
303 char c;
304
305 if (__get_user(line, (u16 __user *)(regs->pc + 2)))
306 return;
307 if (__get_user(file, (char * __user *)(regs->pc + 4))
308 || (unsigned long)file < PAGE_OFFSET
309 || __get_user(c, file))
310 file = "<bad filename>";
311
312 printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
313}
314#else
315static inline void do_bug_verbose(struct pt_regs *regs, u32 insn)
316{
317
318}
319#endif
320#endif
321
322asmlinkage void do_illegal_opcode(unsigned long ecr, struct pt_regs *regs)
323{
324 u32 insn;
325 struct undef_hook *hook;
326 siginfo_t info;
327 void __user *pc;
328
329 if (!user_mode(regs))
330 goto kernel_trap;
331
332 local_irq_enable();
333
334 pc = (void __user *)instruction_pointer(regs);
335 if (__get_user(insn, (u32 __user *)pc))
336 goto invalid_area;
337
338 if (ecr == ECR_COPROC_ABSENT) {
339 if (do_cop_absent(insn) == 0)
340 return;
341 }
342
343 spin_lock_irq(&undef_lock);
344 list_for_each_entry(hook, &undef_hook, node) {
345 if ((insn & hook->insn_mask) == hook->insn_val) {
346 if (hook->fn(regs, insn) == 0) {
347 spin_unlock_irq(&undef_lock);
348 return;
349 }
350 }
351 }
352 spin_unlock_irq(&undef_lock);
353
354invalid_area:
355
356#ifdef DEBUG
357 printk("Illegal instruction at pc = %08lx\n", regs->pc);
358 if (regs->pc < TASK_SIZE) {
359 unsigned long ptbr, pgd, pte, *p;
360
361 ptbr = sysreg_read(PTBR);
362 p = (unsigned long *)ptbr;
363 pgd = p[regs->pc >> 22];
364 p = (unsigned long *)((pgd & 0x1ffff000) | 0x80000000);
365 pte = p[(regs->pc >> 12) & 0x3ff];
366 printk("page table: 0x%08lx -> 0x%08lx -> 0x%08lx\n", ptbr, pgd, pte);
367 }
368#endif
369
370 info.si_signo = SIGILL;
371 info.si_errno = 0;
372 info.si_addr = (void __user *)regs->pc;
373 switch (ecr) {
374 case ECR_ILLEGAL_OPCODE:
375 case ECR_UNIMPL_INSTRUCTION:
376 info.si_code = ILL_ILLOPC;
377 break;
378 case ECR_PRIVILEGE_VIOLATION:
379 info.si_code = ILL_PRVOPC;
380 break;
381 case ECR_COPROC_ABSENT:
382 info.si_code = ILL_COPROC;
383 break;
384 default:
385 BUG();
386 }
387
388 force_sig_info(SIGILL, &info, current);
389 return;
390
391kernel_trap:
392#ifdef CONFIG_BUG
393 if (__kernel_text_address(instruction_pointer(regs))) {
394 insn = *(u16 *)instruction_pointer(regs);
395 if (insn == AVR32_BUG_OPCODE) {
396 do_bug_verbose(regs, insn);
397 die("Kernel BUG", regs, 0);
398 return;
399 }
400 }
401#endif
402
403 die("Oops: Illegal instruction in kernel code", regs, ecr);
404}
405
406asmlinkage void do_fpe(unsigned long ecr, struct pt_regs *regs)
407{
408 siginfo_t info;
409
410 printk("Floating-point exception at pc = %08lx\n", regs->pc);
411
412 /* We have no FPU... */
413 info.si_signo = SIGILL;
414 info.si_errno = 0;
415 info.si_addr = (void __user *)regs->pc;
416 info.si_code = ILL_COPROC;
417
418 force_sig_info(SIGILL, &info, current);
419}
420
421
422void __init trap_init(void)
423{
424
425}
diff --git a/arch/avr32/kernel/vmlinux.lds.c b/arch/avr32/kernel/vmlinux.lds.c
new file mode 100644
index 000000000000..cdd627c6b7dc
--- /dev/null
+++ b/arch/avr32/kernel/vmlinux.lds.c
@@ -0,0 +1,139 @@
1/*
2 * AVR32 linker script for the Linux kernel
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#define LOAD_OFFSET 0x00000000
11#include <asm-generic/vmlinux.lds.h>
12
13OUTPUT_FORMAT("elf32-avr32", "elf32-avr32", "elf32-avr32")
14OUTPUT_ARCH(avr32)
15ENTRY(_start)
16
17/* Big endian */
18jiffies = jiffies_64 + 4;
19
20SECTIONS
21{
22 . = CONFIG_ENTRY_ADDRESS;
23 .init : AT(ADDR(.init) - LOAD_OFFSET) {
24 _stext = .;
25 __init_begin = .;
26 _sinittext = .;
27 *(.text.reset)
28 *(.init.text)
29 _einittext = .;
30 . = ALIGN(4);
31 __tagtable_begin = .;
32 *(.taglist)
33 __tagtable_end = .;
34 *(.init.data)
35 . = ALIGN(16);
36 __setup_start = .;
37 *(.init.setup)
38 __setup_end = .;
39 . = ALIGN(4);
40 __initcall_start = .;
41 *(.initcall1.init)
42 *(.initcall2.init)
43 *(.initcall3.init)
44 *(.initcall4.init)
45 *(.initcall5.init)
46 *(.initcall6.init)
47 *(.initcall7.init)
48 __initcall_end = .;
49 __con_initcall_start = .;
50 *(.con_initcall.init)
51 __con_initcall_end = .;
52 __security_initcall_start = .;
53 *(.security_initcall.init)
54 __security_initcall_end = .;
55 . = ALIGN(32);
56 __initramfs_start = .;
57 *(.init.ramfs)
58 __initramfs_end = .;
59 . = ALIGN(4096);
60 __init_end = .;
61 }
62
63 . = ALIGN(8192);
64 .text : AT(ADDR(.text) - LOAD_OFFSET) {
65 _evba = .;
66 _text = .;
67 *(.ex.text)
68 . = 0x50;
69 *(.tlbx.ex.text)
70 . = 0x60;
71 *(.tlbr.ex.text)
72 . = 0x70;
73 *(.tlbw.ex.text)
74 . = 0x100;
75 *(.scall.text)
76 *(.irq.text)
77 *(.text)
78 SCHED_TEXT
79 LOCK_TEXT
80 KPROBES_TEXT
81 *(.fixup)
82 *(.gnu.warning)
83 _etext = .;
84 } = 0xd703d703
85
86 . = ALIGN(4);
87 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
88 __start___ex_table = .;
89 *(__ex_table)
90 __stop___ex_table = .;
91 }
92
93 RODATA
94
95 . = ALIGN(8192);
96
97 .data : AT(ADDR(.data) - LOAD_OFFSET) {
98 _data = .;
99 _sdata = .;
100 /*
101 * First, the init task union, aligned to an 8K boundary.
102 */
103 *(.data.init_task)
104
105 /* Then, the cacheline aligned data */
106 . = ALIGN(32);
107 *(.data.cacheline_aligned)
108
109 /* And the rest... */
110 *(.data.rel*)
111 *(.data)
112 CONSTRUCTORS
113
114 _edata = .;
115 }
116
117
118 . = ALIGN(8);
119 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
120 __bss_start = .;
121 *(.bss)
122 *(COMMON)
123 . = ALIGN(8);
124 __bss_stop = .;
125 _end = .;
126 }
127
128 /* When something in the kernel is NOT compiled as a module, the module
129 * cleanup code and data are put into these segments. Both can then be
130 * thrown away, as cleanup code is never called unless it's a module.
131 */
132 /DISCARD/ : {
133 *(.exit.text)
134 *(.exit.data)
135 *(.exitcall.exit)
136 }
137
138 DWARF_DEBUG
139}