diff options
Diffstat (limited to 'arch/openrisc/kernel')
-rw-r--r-- | arch/openrisc/kernel/Makefile | 14 | ||||
-rw-r--r-- | arch/openrisc/kernel/asm-offsets.c | 70 | ||||
-rw-r--r-- | arch/openrisc/kernel/dma.c | 217 | ||||
-rw-r--r-- | arch/openrisc/kernel/entry.S | 1128 | ||||
-rw-r--r-- | arch/openrisc/kernel/head.S | 1607 | ||||
-rw-r--r-- | arch/openrisc/kernel/idle.c | 77 | ||||
-rw-r--r-- | arch/openrisc/kernel/init_task.c | 41 | ||||
-rw-r--r-- | arch/openrisc/kernel/irq.c | 172 | ||||
-rw-r--r-- | arch/openrisc/kernel/module.c | 72 | ||||
-rw-r--r-- | arch/openrisc/kernel/or32_ksyms.c | 46 | ||||
-rw-r--r-- | arch/openrisc/kernel/process.c | 311 | ||||
-rw-r--r-- | arch/openrisc/kernel/prom.c | 108 | ||||
-rw-r--r-- | arch/openrisc/kernel/ptrace.c | 211 | ||||
-rw-r--r-- | arch/openrisc/kernel/setup.c | 381 | ||||
-rw-r--r-- | arch/openrisc/kernel/signal.c | 389 | ||||
-rw-r--r-- | arch/openrisc/kernel/sys_call_table.c | 28 | ||||
-rw-r--r-- | arch/openrisc/kernel/sys_or32.c | 57 | ||||
-rw-r--r-- | arch/openrisc/kernel/time.c | 181 | ||||
-rw-r--r-- | arch/openrisc/kernel/traps.c | 366 | ||||
-rw-r--r-- | arch/openrisc/kernel/vmlinux.h | 12 | ||||
-rw-r--r-- | arch/openrisc/kernel/vmlinux.lds.S | 115 |
21 files changed, 5603 insertions, 0 deletions
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile new file mode 100644 index 00000000000..9a4c2706d79 --- /dev/null +++ b/arch/openrisc/kernel/Makefile | |||
@@ -0,0 +1,14 @@ | |||
1 | # | ||
2 | # Makefile for the linux kernel. | ||
3 | # | ||
4 | |||
5 | extra-y := head.o vmlinux.lds init_task.o | ||
6 | |||
7 | obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \ | ||
8 | traps.o time.o irq.o entry.o ptrace.o signal.o sys_or32.o \ | ||
9 | sys_call_table.o | ||
10 | |||
11 | obj-$(CONFIG_MODULES) += module.o | ||
12 | obj-$(CONFIG_OF) += prom.o | ||
13 | |||
14 | clean: | ||
diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c new file mode 100644 index 00000000000..1a242a0d758 --- /dev/null +++ b/arch/openrisc/kernel/asm-offsets.c | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * OpenRISC asm-offsets.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This program is used to generate definitions needed by | ||
18 | * assembly language modules. | ||
19 | * | ||
20 | * We use the technique used in the OSF Mach kernel code: | ||
21 | * generate asm statements containing #defines, | ||
22 | * compile this file to assembler, and then extract the | ||
23 | * #defines from the assembly-language output. | ||
24 | */ | ||
25 | |||
26 | #include <linux/signal.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/types.h> | ||
32 | #include <linux/ptrace.h> | ||
33 | #include <linux/mman.h> | ||
34 | #include <linux/mm.h> | ||
35 | #include <linux/io.h> | ||
36 | #include <linux/thread_info.h> | ||
37 | #include <asm/page.h> | ||
38 | #include <asm/pgtable.h> | ||
39 | #include <asm/processor.h> | ||
40 | |||
41 | #define DEFINE(sym, val) \ | ||
42 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
43 | |||
44 | #define BLANK() asm volatile("\n->" : : ) | ||
45 | |||
46 | int main(void) | ||
47 | { | ||
48 | /* offsets into the task_struct */ | ||
49 | DEFINE(TASK_STATE, offsetof(struct task_struct, state)); | ||
50 | DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); | ||
51 | DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); | ||
52 | DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); | ||
53 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); | ||
54 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); | ||
55 | |||
56 | /* offsets into thread_info */ | ||
57 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | ||
58 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | ||
59 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); | ||
60 | DEFINE(TI_KSP, offsetof(struct thread_info, ksp)); | ||
61 | |||
62 | DEFINE(PT_SIZE, sizeof(struct pt_regs)); | ||
63 | |||
64 | /* Interrupt register frame */ | ||
65 | DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); | ||
66 | DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); | ||
67 | |||
68 | DEFINE(NUM_USER_SEGMENTS, TASK_SIZE >> 28); | ||
69 | return 0; | ||
70 | } | ||
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c new file mode 100644 index 00000000000..f1c8ee2895d --- /dev/null +++ b/arch/openrisc/kernel/dma.c | |||
@@ -0,0 +1,217 @@ | |||
1 | /* | ||
2 | * OpenRISC Linux | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * DMA mapping callbacks... | ||
18 | * As alloc_coherent is the only DMA callback being used currently, that's | ||
19 | * the only thing implemented properly. The rest need looking into... | ||
20 | */ | ||
21 | |||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/dma-debug.h> | ||
24 | |||
25 | #include <asm/cpuinfo.h> | ||
26 | #include <asm/spr_defs.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | static int page_set_nocache(pte_t *pte, unsigned long addr, | ||
30 | unsigned long next, struct mm_walk *walk) | ||
31 | { | ||
32 | unsigned long cl; | ||
33 | |||
34 | pte_val(*pte) |= _PAGE_CI; | ||
35 | |||
36 | /* | ||
37 | * Flush the page out of the TLB so that the new page flags get | ||
38 | * picked up next time there's an access | ||
39 | */ | ||
40 | flush_tlb_page(NULL, addr); | ||
41 | |||
42 | /* Flush page out of dcache */ | ||
43 | for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size) | ||
44 | mtspr(SPR_DCBFR, cl); | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static int page_clear_nocache(pte_t *pte, unsigned long addr, | ||
50 | unsigned long next, struct mm_walk *walk) | ||
51 | { | ||
52 | pte_val(*pte) &= ~_PAGE_CI; | ||
53 | |||
54 | /* | ||
55 | * Flush the page out of the TLB so that the new page flags get | ||
56 | * picked up next time there's an access | ||
57 | */ | ||
58 | flush_tlb_page(NULL, addr); | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Alloc "coherent" memory, which for OpenRISC means simply uncached. | ||
65 | * | ||
66 | * This function effectively just calls __get_free_pages, sets the | ||
67 | * cache-inhibit bit on those pages, and makes sure that the pages are | ||
68 | * flushed out of the cache before they are used. | ||
69 | * | ||
70 | */ | ||
71 | void *or1k_dma_alloc_coherent(struct device *dev, size_t size, | ||
72 | dma_addr_t *dma_handle, gfp_t gfp) | ||
73 | { | ||
74 | unsigned long va; | ||
75 | void *page; | ||
76 | struct mm_walk walk = { | ||
77 | .pte_entry = page_set_nocache, | ||
78 | .mm = &init_mm | ||
79 | }; | ||
80 | |||
81 | page = alloc_pages_exact(size, gfp); | ||
82 | if (!page) | ||
83 | return NULL; | ||
84 | |||
85 | /* This gives us the real physical address of the first page. */ | ||
86 | *dma_handle = __pa(page); | ||
87 | |||
88 | va = (unsigned long)page; | ||
89 | |||
90 | /* | ||
91 | * We need to iterate through the pages, clearing the dcache for | ||
92 | * them and setting the cache-inhibit bit. | ||
93 | */ | ||
94 | if (walk_page_range(va, va + size, &walk)) { | ||
95 | free_pages_exact(page, size); | ||
96 | return NULL; | ||
97 | } | ||
98 | |||
99 | return (void *)va; | ||
100 | } | ||
101 | |||
102 | void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
103 | dma_addr_t dma_handle) | ||
104 | { | ||
105 | unsigned long va = (unsigned long)vaddr; | ||
106 | struct mm_walk walk = { | ||
107 | .pte_entry = page_clear_nocache, | ||
108 | .mm = &init_mm | ||
109 | }; | ||
110 | |||
111 | /* walk_page_range shouldn't be able to fail here */ | ||
112 | WARN_ON(walk_page_range(va, va + size, &walk)); | ||
113 | |||
114 | free_pages_exact(vaddr, size); | ||
115 | } | ||
116 | |||
117 | dma_addr_t or1k_map_page(struct device *dev, struct page *page, | ||
118 | unsigned long offset, size_t size, | ||
119 | enum dma_data_direction dir, | ||
120 | struct dma_attrs *attrs) | ||
121 | { | ||
122 | unsigned long cl; | ||
123 | dma_addr_t addr = page_to_phys(page) + offset; | ||
124 | |||
125 | switch (dir) { | ||
126 | case DMA_TO_DEVICE: | ||
127 | /* Flush the dcache for the requested range */ | ||
128 | for (cl = addr; cl < addr + size; | ||
129 | cl += cpuinfo.dcache_block_size) | ||
130 | mtspr(SPR_DCBFR, cl); | ||
131 | break; | ||
132 | case DMA_FROM_DEVICE: | ||
133 | /* Invalidate the dcache for the requested range */ | ||
134 | for (cl = addr; cl < addr + size; | ||
135 | cl += cpuinfo.dcache_block_size) | ||
136 | mtspr(SPR_DCBIR, cl); | ||
137 | break; | ||
138 | default: | ||
139 | /* | ||
140 | * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to | ||
141 | * flush nor invalidate the cache here as the area will need | ||
142 | * to be manually synced anyway. | ||
143 | */ | ||
144 | break; | ||
145 | } | ||
146 | |||
147 | return addr; | ||
148 | } | ||
149 | |||
150 | void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, | ||
151 | size_t size, enum dma_data_direction dir, | ||
152 | struct dma_attrs *attrs) | ||
153 | { | ||
154 | /* Nothing special to do here... */ | ||
155 | } | ||
156 | |||
157 | int or1k_map_sg(struct device *dev, struct scatterlist *sg, | ||
158 | int nents, enum dma_data_direction dir, | ||
159 | struct dma_attrs *attrs) | ||
160 | { | ||
161 | struct scatterlist *s; | ||
162 | int i; | ||
163 | |||
164 | for_each_sg(sg, s, nents, i) { | ||
165 | s->dma_address = or1k_map_page(dev, sg_page(s), s->offset, | ||
166 | s->length, dir, NULL); | ||
167 | } | ||
168 | |||
169 | return nents; | ||
170 | } | ||
171 | |||
172 | void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
173 | int nents, enum dma_data_direction dir, | ||
174 | struct dma_attrs *attrs) | ||
175 | { | ||
176 | struct scatterlist *s; | ||
177 | int i; | ||
178 | |||
179 | for_each_sg(sg, s, nents, i) { | ||
180 | or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL); | ||
181 | } | ||
182 | } | ||
183 | |||
184 | void or1k_sync_single_for_cpu(struct device *dev, | ||
185 | dma_addr_t dma_handle, size_t size, | ||
186 | enum dma_data_direction dir) | ||
187 | { | ||
188 | unsigned long cl; | ||
189 | dma_addr_t addr = dma_handle; | ||
190 | |||
191 | /* Invalidate the dcache for the requested range */ | ||
192 | for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size) | ||
193 | mtspr(SPR_DCBIR, cl); | ||
194 | } | ||
195 | |||
196 | void or1k_sync_single_for_device(struct device *dev, | ||
197 | dma_addr_t dma_handle, size_t size, | ||
198 | enum dma_data_direction dir) | ||
199 | { | ||
200 | unsigned long cl; | ||
201 | dma_addr_t addr = dma_handle; | ||
202 | |||
203 | /* Flush the dcache for the requested range */ | ||
204 | for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size) | ||
205 | mtspr(SPR_DCBFR, cl); | ||
206 | } | ||
207 | |||
208 | /* Number of entries preallocated for DMA-API debugging */ | ||
209 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
210 | |||
211 | static int __init dma_init(void) | ||
212 | { | ||
213 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | fs_initcall(dma_init); | ||
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S new file mode 100644 index 00000000000..d5f9c35a583 --- /dev/null +++ b/arch/openrisc/kernel/entry.S | |||
@@ -0,0 +1,1128 @@ | |||
1 | /* | ||
2 | * OpenRISC entry.S | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com> | ||
11 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | */ | ||
18 | |||
19 | #include <linux/linkage.h> | ||
20 | |||
21 | #include <asm/processor.h> | ||
22 | #include <asm/unistd.h> | ||
23 | #include <asm/thread_info.h> | ||
24 | #include <asm/errno.h> | ||
25 | #include <asm/spr_defs.h> | ||
26 | #include <asm/page.h> | ||
27 | #include <asm/mmu.h> | ||
28 | #include <asm/pgtable.h> | ||
29 | #include <asm/asm-offsets.h> | ||
30 | |||
31 | #define DISABLE_INTERRUPTS(t1,t2) \ | ||
32 | l.mfspr t2,r0,SPR_SR ;\ | ||
33 | l.movhi t1,hi(~(SPR_SR_IEE|SPR_SR_TEE)) ;\ | ||
34 | l.ori t1,t1,lo(~(SPR_SR_IEE|SPR_SR_TEE)) ;\ | ||
35 | l.and t2,t2,t1 ;\ | ||
36 | l.mtspr r0,t2,SPR_SR | ||
37 | |||
38 | #define ENABLE_INTERRUPTS(t1) \ | ||
39 | l.mfspr t1,r0,SPR_SR ;\ | ||
40 | l.ori t1,t1,lo(SPR_SR_IEE|SPR_SR_TEE) ;\ | ||
41 | l.mtspr r0,t1,SPR_SR | ||
42 | |||
43 | /* =========================================================[ macros ]=== */ | ||
44 | |||
45 | /* | ||
46 | * We need to disable interrupts at beginning of RESTORE_ALL | ||
47 | * since interrupt might come in after we've loaded EPC return address | ||
48 | * and overwrite EPC with address somewhere in RESTORE_ALL | ||
49 | * which is of course wrong! | ||
50 | */ | ||
51 | |||
52 | #define RESTORE_ALL \ | ||
53 | DISABLE_INTERRUPTS(r3,r4) ;\ | ||
54 | l.lwz r3,PT_PC(r1) ;\ | ||
55 | l.mtspr r0,r3,SPR_EPCR_BASE ;\ | ||
56 | l.lwz r3,PT_SR(r1) ;\ | ||
57 | l.mtspr r0,r3,SPR_ESR_BASE ;\ | ||
58 | l.lwz r2,PT_GPR2(r1) ;\ | ||
59 | l.lwz r3,PT_GPR3(r1) ;\ | ||
60 | l.lwz r4,PT_GPR4(r1) ;\ | ||
61 | l.lwz r5,PT_GPR5(r1) ;\ | ||
62 | l.lwz r6,PT_GPR6(r1) ;\ | ||
63 | l.lwz r7,PT_GPR7(r1) ;\ | ||
64 | l.lwz r8,PT_GPR8(r1) ;\ | ||
65 | l.lwz r9,PT_GPR9(r1) ;\ | ||
66 | l.lwz r10,PT_GPR10(r1) ;\ | ||
67 | l.lwz r11,PT_GPR11(r1) ;\ | ||
68 | l.lwz r12,PT_GPR12(r1) ;\ | ||
69 | l.lwz r13,PT_GPR13(r1) ;\ | ||
70 | l.lwz r14,PT_GPR14(r1) ;\ | ||
71 | l.lwz r15,PT_GPR15(r1) ;\ | ||
72 | l.lwz r16,PT_GPR16(r1) ;\ | ||
73 | l.lwz r17,PT_GPR17(r1) ;\ | ||
74 | l.lwz r18,PT_GPR18(r1) ;\ | ||
75 | l.lwz r19,PT_GPR19(r1) ;\ | ||
76 | l.lwz r20,PT_GPR20(r1) ;\ | ||
77 | l.lwz r21,PT_GPR21(r1) ;\ | ||
78 | l.lwz r22,PT_GPR22(r1) ;\ | ||
79 | l.lwz r23,PT_GPR23(r1) ;\ | ||
80 | l.lwz r24,PT_GPR24(r1) ;\ | ||
81 | l.lwz r25,PT_GPR25(r1) ;\ | ||
82 | l.lwz r26,PT_GPR26(r1) ;\ | ||
83 | l.lwz r27,PT_GPR27(r1) ;\ | ||
84 | l.lwz r28,PT_GPR28(r1) ;\ | ||
85 | l.lwz r29,PT_GPR29(r1) ;\ | ||
86 | l.lwz r30,PT_GPR30(r1) ;\ | ||
87 | l.lwz r31,PT_GPR31(r1) ;\ | ||
88 | l.lwz r1,PT_SP(r1) ;\ | ||
89 | l.rfe | ||
90 | |||
91 | |||
92 | #define EXCEPTION_ENTRY(handler) \ | ||
93 | .global handler ;\ | ||
94 | handler: ;\ | ||
95 | /* r1, EPCR, ESR a already saved */ ;\ | ||
96 | l.sw PT_GPR2(r1),r2 ;\ | ||
97 | l.sw PT_GPR3(r1),r3 ;\ | ||
98 | l.sw PT_ORIG_GPR11(r1),r11 ;\ | ||
99 | /* r4 already save */ ;\ | ||
100 | l.sw PT_GPR5(r1),r5 ;\ | ||
101 | l.sw PT_GPR6(r1),r6 ;\ | ||
102 | l.sw PT_GPR7(r1),r7 ;\ | ||
103 | l.sw PT_GPR8(r1),r8 ;\ | ||
104 | l.sw PT_GPR9(r1),r9 ;\ | ||
105 | /* r10 already saved */ ;\ | ||
106 | l.sw PT_GPR11(r1),r11 ;\ | ||
107 | /* r12 already saved */ ;\ | ||
108 | l.sw PT_GPR13(r1),r13 ;\ | ||
109 | l.sw PT_GPR14(r1),r14 ;\ | ||
110 | l.sw PT_GPR15(r1),r15 ;\ | ||
111 | l.sw PT_GPR16(r1),r16 ;\ | ||
112 | l.sw PT_GPR17(r1),r17 ;\ | ||
113 | l.sw PT_GPR18(r1),r18 ;\ | ||
114 | l.sw PT_GPR19(r1),r19 ;\ | ||
115 | l.sw PT_GPR20(r1),r20 ;\ | ||
116 | l.sw PT_GPR21(r1),r21 ;\ | ||
117 | l.sw PT_GPR22(r1),r22 ;\ | ||
118 | l.sw PT_GPR23(r1),r23 ;\ | ||
119 | l.sw PT_GPR24(r1),r24 ;\ | ||
120 | l.sw PT_GPR25(r1),r25 ;\ | ||
121 | l.sw PT_GPR26(r1),r26 ;\ | ||
122 | l.sw PT_GPR27(r1),r27 ;\ | ||
123 | l.sw PT_GPR28(r1),r28 ;\ | ||
124 | l.sw PT_GPR29(r1),r29 ;\ | ||
125 | /* r30 already save */ ;\ | ||
126 | /* l.sw PT_GPR30(r1),r30*/ ;\ | ||
127 | l.sw PT_GPR31(r1),r31 ;\ | ||
128 | l.sw PT_SYSCALLNO(r1),r0 | ||
129 | |||
130 | #define UNHANDLED_EXCEPTION(handler,vector) \ | ||
131 | .global handler ;\ | ||
132 | handler: ;\ | ||
133 | /* r1, EPCR, ESR already saved */ ;\ | ||
134 | l.sw PT_GPR2(r1),r2 ;\ | ||
135 | l.sw PT_GPR3(r1),r3 ;\ | ||
136 | l.sw PT_ORIG_GPR11(r1),r11 ;\ | ||
137 | l.sw PT_GPR5(r1),r5 ;\ | ||
138 | l.sw PT_GPR6(r1),r6 ;\ | ||
139 | l.sw PT_GPR7(r1),r7 ;\ | ||
140 | l.sw PT_GPR8(r1),r8 ;\ | ||
141 | l.sw PT_GPR9(r1),r9 ;\ | ||
142 | /* r10 already saved */ ;\ | ||
143 | l.sw PT_GPR11(r1),r11 ;\ | ||
144 | /* r12 already saved */ ;\ | ||
145 | l.sw PT_GPR13(r1),r13 ;\ | ||
146 | l.sw PT_GPR14(r1),r14 ;\ | ||
147 | l.sw PT_GPR15(r1),r15 ;\ | ||
148 | l.sw PT_GPR16(r1),r16 ;\ | ||
149 | l.sw PT_GPR17(r1),r17 ;\ | ||
150 | l.sw PT_GPR18(r1),r18 ;\ | ||
151 | l.sw PT_GPR19(r1),r19 ;\ | ||
152 | l.sw PT_GPR20(r1),r20 ;\ | ||
153 | l.sw PT_GPR21(r1),r21 ;\ | ||
154 | l.sw PT_GPR22(r1),r22 ;\ | ||
155 | l.sw PT_GPR23(r1),r23 ;\ | ||
156 | l.sw PT_GPR24(r1),r24 ;\ | ||
157 | l.sw PT_GPR25(r1),r25 ;\ | ||
158 | l.sw PT_GPR26(r1),r26 ;\ | ||
159 | l.sw PT_GPR27(r1),r27 ;\ | ||
160 | l.sw PT_GPR28(r1),r28 ;\ | ||
161 | l.sw PT_GPR29(r1),r29 ;\ | ||
162 | /* r31 already saved */ ;\ | ||
163 | l.sw PT_GPR30(r1),r30 ;\ | ||
164 | /* l.sw PT_GPR31(r1),r31 */ ;\ | ||
165 | l.sw PT_SYSCALLNO(r1),r0 ;\ | ||
166 | l.addi r3,r1,0 ;\ | ||
167 | /* r4 is exception EA */ ;\ | ||
168 | l.addi r5,r0,vector ;\ | ||
169 | l.jal unhandled_exception ;\ | ||
170 | l.nop ;\ | ||
171 | l.j _ret_from_exception ;\ | ||
172 | l.nop | ||
173 | |||
174 | /* | ||
175 | * NOTE: one should never assume that SPR_EPC, SPR_ESR, SPR_EEAR | ||
176 | * contain the same values as when exception we're handling | ||
177 | * occured. in fact they never do. if you need them use | ||
178 | * values saved on stack (for SPR_EPC, SPR_ESR) or content | ||
179 | * of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE() | ||
180 | * in 'arch/or32/kernel/head.S' | ||
181 | */ | ||
182 | |||
183 | /* =====================================================[ exceptions] === */ | ||
184 | |||
185 | /* ---[ 0x100: RESET exception ]----------------------------------------- */ | ||
186 | |||
187 | EXCEPTION_ENTRY(_tng_kernel_start) | ||
188 | l.jal _start | ||
189 | l.andi r0,r0,0 | ||
190 | |||
191 | /* ---[ 0x200: BUS exception ]------------------------------------------- */ | ||
192 | |||
193 | EXCEPTION_ENTRY(_bus_fault_handler) | ||
194 | /* r4: EA of fault (set by EXCEPTION_HANDLE) */ | ||
195 | l.jal do_bus_fault | ||
196 | l.addi r3,r1,0 /* pt_regs */ | ||
197 | |||
198 | l.j _ret_from_exception | ||
199 | l.nop | ||
200 | |||
201 | /* ---[ 0x300: Data Page Fault exception ]------------------------------- */ | ||
202 | |||
203 | EXCEPTION_ENTRY(_data_page_fault_handler) | ||
204 | /* set up parameters for do_page_fault */ | ||
205 | l.addi r3,r1,0 // pt_regs | ||
206 | /* r4 set be EXCEPTION_HANDLE */ // effective address of fault | ||
207 | l.ori r5,r0,0x300 // exception vector | ||
208 | |||
209 | /* | ||
210 | * __PHX__: TODO | ||
211 | * | ||
212 | * all this can be written much simpler. look at | ||
213 | * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part | ||
214 | */ | ||
215 | #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX | ||
216 | l.lwz r6,PT_PC(r3) // address of an offending insn | ||
217 | l.lwz r6,0(r6) // instruction that caused pf | ||
218 | |||
219 | l.srli r6,r6,26 // check opcode for jump insn | ||
220 | l.sfeqi r6,0 // l.j | ||
221 | l.bf 8f | ||
222 | l.sfeqi r6,1 // l.jal | ||
223 | l.bf 8f | ||
224 | l.sfeqi r6,3 // l.bnf | ||
225 | l.bf 8f | ||
226 | l.sfeqi r6,4 // l.bf | ||
227 | l.bf 8f | ||
228 | l.sfeqi r6,0x11 // l.jr | ||
229 | l.bf 8f | ||
230 | l.sfeqi r6,0x12 // l.jalr | ||
231 | l.bf 8f | ||
232 | |||
233 | l.nop | ||
234 | |||
235 | l.j 9f | ||
236 | l.nop | ||
237 | 8: | ||
238 | |||
239 | l.lwz r6,PT_PC(r3) // address of an offending insn | ||
240 | l.addi r6,r6,4 | ||
241 | l.lwz r6,0(r6) // instruction that caused pf | ||
242 | l.srli r6,r6,26 // get opcode | ||
243 | 9: | ||
244 | |||
245 | #else | ||
246 | |||
247 | l.mfspr r6,r0,SPR_SR // SR | ||
248 | // l.lwz r6,PT_SR(r3) // ESR | ||
249 | l.andi r6,r6,SPR_SR_DSX // check for delay slot exception | ||
250 | l.sfeqi r6,0x1 // exception happened in delay slot | ||
251 | l.bnf 7f | ||
252 | l.lwz r6,PT_PC(r3) // address of an offending insn | ||
253 | |||
254 | l.addi r6,r6,4 // offending insn is in delay slot | ||
255 | 7: | ||
256 | l.lwz r6,0(r6) // instruction that caused pf | ||
257 | l.srli r6,r6,26 // check opcode for write access | ||
258 | #endif | ||
259 | |||
260 | l.sfgeui r6,0x34 // check opcode for write access | ||
261 | l.bnf 1f | ||
262 | l.sfleui r6,0x37 | ||
263 | l.bnf 1f | ||
264 | l.ori r6,r0,0x1 // write access | ||
265 | l.j 2f | ||
266 | l.nop | ||
267 | 1: l.ori r6,r0,0x0 // !write access | ||
268 | 2: | ||
269 | |||
270 | /* call fault.c handler in or32/mm/fault.c */ | ||
271 | l.jal do_page_fault | ||
272 | l.nop | ||
273 | l.j _ret_from_exception | ||
274 | l.nop | ||
275 | |||
276 | /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */ | ||
277 | |||
278 | EXCEPTION_ENTRY(_insn_page_fault_handler) | ||
279 | /* set up parameters for do_page_fault */ | ||
280 | l.addi r3,r1,0 // pt_regs | ||
281 | /* r4 set be EXCEPTION_HANDLE */ // effective address of fault | ||
282 | l.ori r5,r0,0x400 // exception vector | ||
283 | l.ori r6,r0,0x0 // !write access | ||
284 | |||
285 | /* call fault.c handler in or32/mm/fault.c */ | ||
286 | l.jal do_page_fault | ||
287 | l.nop | ||
288 | l.j _ret_from_exception | ||
289 | l.nop | ||
290 | |||
291 | |||
292 | /* ---[ 0x500: Timer exception ]----------------------------------------- */ | ||
293 | |||
294 | EXCEPTION_ENTRY(_timer_handler) | ||
295 | l.jal timer_interrupt | ||
296 | l.addi r3,r1,0 /* pt_regs */ | ||
297 | |||
298 | l.j _ret_from_intr | ||
299 | l.nop | ||
300 | |||
301 | /* ---[ 0x600: Aligment exception ]-------------------------------------- */ | ||
302 | |||
303 | EXCEPTION_ENTRY(_alignment_handler) | ||
304 | /* r4: EA of fault (set by EXCEPTION_HANDLE) */ | ||
305 | l.jal do_unaligned_access | ||
306 | l.addi r3,r1,0 /* pt_regs */ | ||
307 | |||
308 | l.j _ret_from_exception | ||
309 | l.nop | ||
310 | |||
311 | #if 0 | ||
312 | EXCEPTION_ENTRY(_aligment_handler) | ||
313 | // l.mfspr r2,r0,SPR_EEAR_BASE /* Load the efective addres */ | ||
314 | l.addi r2,r4,0 | ||
315 | // l.mfspr r5,r0,SPR_EPCR_BASE /* Load the insn address */ | ||
316 | l.lwz r5,PT_PC(r1) | ||
317 | |||
318 | l.lwz r3,0(r5) /* Load insn */ | ||
319 | l.srli r4,r3,26 /* Shift left to get the insn opcode */ | ||
320 | |||
321 | l.sfeqi r4,0x00 /* Check if the load/store insn is in delay slot */ | ||
322 | l.bf jmp | ||
323 | l.sfeqi r4,0x01 | ||
324 | l.bf jmp | ||
325 | l.sfeqi r4,0x03 | ||
326 | l.bf jmp | ||
327 | l.sfeqi r4,0x04 | ||
328 | l.bf jmp | ||
329 | l.sfeqi r4,0x11 | ||
330 | l.bf jr | ||
331 | l.sfeqi r4,0x12 | ||
332 | l.bf jr | ||
333 | l.nop | ||
334 | l.j 1f | ||
335 | l.addi r5,r5,4 /* Increment PC to get return insn address */ | ||
336 | |||
337 | jmp: | ||
338 | l.slli r4,r3,6 /* Get the signed extended jump length */ | ||
339 | l.srai r4,r4,4 | ||
340 | |||
341 | l.lwz r3,4(r5) /* Load the real load/store insn */ | ||
342 | |||
343 | l.add r5,r5,r4 /* Calculate jump target address */ | ||
344 | |||
345 | l.j 1f | ||
346 | l.srli r4,r3,26 /* Shift left to get the insn opcode */ | ||
347 | |||
348 | jr: | ||
349 | l.slli r4,r3,9 /* Shift to get the reg nb */ | ||
350 | l.andi r4,r4,0x7c | ||
351 | |||
352 | l.lwz r3,4(r5) /* Load the real load/store insn */ | ||
353 | |||
354 | l.add r4,r4,r1 /* Load the jump register value from the stack */ | ||
355 | l.lwz r5,0(r4) | ||
356 | |||
357 | l.srli r4,r3,26 /* Shift left to get the insn opcode */ | ||
358 | |||
359 | |||
360 | 1: | ||
361 | // l.mtspr r0,r5,SPR_EPCR_BASE | ||
362 | l.sw PT_PC(r1),r5 | ||
363 | |||
364 | l.sfeqi r4,0x26 | ||
365 | l.bf lhs | ||
366 | l.sfeqi r4,0x25 | ||
367 | l.bf lhz | ||
368 | l.sfeqi r4,0x22 | ||
369 | l.bf lws | ||
370 | l.sfeqi r4,0x21 | ||
371 | l.bf lwz | ||
372 | l.sfeqi r4,0x37 | ||
373 | l.bf sh | ||
374 | l.sfeqi r4,0x35 | ||
375 | l.bf sw | ||
376 | l.nop | ||
377 | |||
378 | 1: l.j 1b /* I don't know what to do */ | ||
379 | l.nop | ||
380 | |||
381 | lhs: l.lbs r5,0(r2) | ||
382 | l.slli r5,r5,8 | ||
383 | l.lbz r6,1(r2) | ||
384 | l.or r5,r5,r6 | ||
385 | l.srli r4,r3,19 | ||
386 | l.andi r4,r4,0x7c | ||
387 | l.add r4,r4,r1 | ||
388 | l.j align_end | ||
389 | l.sw 0(r4),r5 | ||
390 | |||
391 | lhz: l.lbz r5,0(r2) | ||
392 | l.slli r5,r5,8 | ||
393 | l.lbz r6,1(r2) | ||
394 | l.or r5,r5,r6 | ||
395 | l.srli r4,r3,19 | ||
396 | l.andi r4,r4,0x7c | ||
397 | l.add r4,r4,r1 | ||
398 | l.j align_end | ||
399 | l.sw 0(r4),r5 | ||
400 | |||
401 | lws: l.lbs r5,0(r2) | ||
402 | l.slli r5,r5,24 | ||
403 | l.lbz r6,1(r2) | ||
404 | l.slli r6,r6,16 | ||
405 | l.or r5,r5,r6 | ||
406 | l.lbz r6,2(r2) | ||
407 | l.slli r6,r6,8 | ||
408 | l.or r5,r5,r6 | ||
409 | l.lbz r6,3(r2) | ||
410 | l.or r5,r5,r6 | ||
411 | l.srli r4,r3,19 | ||
412 | l.andi r4,r4,0x7c | ||
413 | l.add r4,r4,r1 | ||
414 | l.j align_end | ||
415 | l.sw 0(r4),r5 | ||
416 | |||
417 | lwz: l.lbz r5,0(r2) | ||
418 | l.slli r5,r5,24 | ||
419 | l.lbz r6,1(r2) | ||
420 | l.slli r6,r6,16 | ||
421 | l.or r5,r5,r6 | ||
422 | l.lbz r6,2(r2) | ||
423 | l.slli r6,r6,8 | ||
424 | l.or r5,r5,r6 | ||
425 | l.lbz r6,3(r2) | ||
426 | l.or r5,r5,r6 | ||
427 | l.srli r4,r3,19 | ||
428 | l.andi r4,r4,0x7c | ||
429 | l.add r4,r4,r1 | ||
430 | l.j align_end | ||
431 | l.sw 0(r4),r5 | ||
432 | |||
433 | sh: | ||
434 | l.srli r4,r3,9 | ||
435 | l.andi r4,r4,0x7c | ||
436 | l.add r4,r4,r1 | ||
437 | l.lwz r5,0(r4) | ||
438 | l.sb 1(r2),r5 | ||
439 | l.srli r5,r5,8 | ||
440 | l.j align_end | ||
441 | l.sb 0(r2),r5 | ||
442 | |||
443 | sw: | ||
444 | l.srli r4,r3,9 | ||
445 | l.andi r4,r4,0x7c | ||
446 | l.add r4,r4,r1 | ||
447 | l.lwz r5,0(r4) | ||
448 | l.sb 3(r2),r5 | ||
449 | l.srli r5,r5,8 | ||
450 | l.sb 2(r2),r5 | ||
451 | l.srli r5,r5,8 | ||
452 | l.sb 1(r2),r5 | ||
453 | l.srli r5,r5,8 | ||
454 | l.j align_end | ||
455 | l.sb 0(r2),r5 | ||
456 | |||
457 | align_end: | ||
458 | l.j _ret_from_intr | ||
459 | l.nop | ||
460 | #endif | ||
461 | |||
462 | /* ---[ 0x700: Illegal insn exception ]---------------------------------- */ | ||
463 | |||
464 | EXCEPTION_ENTRY(_illegal_instruction_handler) | ||
465 | /* r4: EA of fault (set by EXCEPTION_HANDLE) */ | ||
466 | l.jal do_illegal_instruction | ||
467 | l.addi r3,r1,0 /* pt_regs */ | ||
468 | |||
469 | l.j _ret_from_exception | ||
470 | l.nop | ||
471 | |||
472 | /* ---[ 0x800: External interrupt exception ]---------------------------- */ | ||
473 | |||
474 | EXCEPTION_ENTRY(_external_irq_handler) | ||
475 | #ifdef CONFIG_OPENRISC_ESR_EXCEPTION_BUG_CHECK | ||
476 | l.lwz r4,PT_SR(r1) // were interrupts enabled ? | ||
477 | l.andi r4,r4,SPR_SR_IEE | ||
478 | l.sfeqi r4,0 | ||
479 | l.bnf 1f // ext irq enabled, all ok. | ||
480 | l.nop | ||
481 | |||
482 | l.addi r1,r1,-0x8 | ||
483 | l.movhi r3,hi(42f) | ||
484 | l.ori r3,r3,lo(42f) | ||
485 | l.sw 0x0(r1),r3 | ||
486 | l.jal printk | ||
487 | l.sw 0x4(r1),r4 | ||
488 | l.addi r1,r1,0x8 | ||
489 | |||
490 | .section .rodata, "a" | ||
491 | 42: | ||
492 | .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r" | ||
493 | .align 4 | ||
494 | .previous | ||
495 | |||
496 | l.ori r4,r4,SPR_SR_IEE // fix the bug | ||
497 | // l.sw PT_SR(r1),r4 | ||
498 | 1: | ||
499 | #endif | ||
500 | l.addi r3,r1,0 | ||
501 | l.movhi r8,hi(do_IRQ) | ||
502 | l.ori r8,r8,lo(do_IRQ) | ||
503 | l.jalr r8 | ||
504 | l.nop | ||
505 | l.j _ret_from_intr | ||
506 | l.nop | ||
507 | |||
508 | /* ---[ 0x900: DTLB miss exception ]------------------------------------- */ | ||
509 | |||
510 | |||
511 | /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */ | ||
512 | |||
513 | |||
514 | /* ---[ 0xb00: Range exception ]----------------------------------------- */ | ||
515 | |||
516 | UNHANDLED_EXCEPTION(_vector_0xb00,0xb00) | ||
517 | |||
518 | /* ---[ 0xc00: Syscall exception ]--------------------------------------- */ | ||
519 | |||
520 | /* | ||
521 | * Syscalls are a special type of exception in that they are | ||
522 | * _explicitly_ invoked by userspace and can therefore be | ||
523 | * held to conform to the same ABI as normal functions with | ||
524 | * respect to whether registers are preserved across the call | ||
525 | * or not. | ||
526 | */ | ||
527 | |||
528 | /* Upon syscall entry we just save the callee-saved registers | ||
529 | * and not the call-clobbered ones. | ||
530 | */ | ||
531 | |||
532 | _string_syscall_return: | ||
533 | .string "syscall return %ld \n\r\0" | ||
534 | .align 4 | ||
535 | |||
536 | ENTRY(_sys_call_handler) | ||
537 | /* syscalls run with interrupts enabled */ | ||
538 | ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp | ||
539 | |||
540 | /* r1, EPCR, ESR a already saved */ | ||
541 | l.sw PT_GPR2(r1),r2 | ||
542 | /* r3-r8 must be saved because syscall restart relies | ||
543 | * on us being able to restart the syscall args... technically | ||
544 | * they should be clobbered, otherwise | ||
545 | */ | ||
546 | l.sw PT_GPR3(r1),r3 | ||
547 | /* r4 already saved */ | ||
548 | /* r4 holds the EEAR address of the fault, load the original r4 */ | ||
549 | l.lwz r4,PT_GPR4(r1) | ||
550 | l.sw PT_GPR5(r1),r5 | ||
551 | l.sw PT_GPR6(r1),r6 | ||
552 | l.sw PT_GPR7(r1),r7 | ||
553 | l.sw PT_GPR8(r1),r8 | ||
554 | l.sw PT_GPR9(r1),r9 | ||
555 | /* r10 already saved */ | ||
556 | l.sw PT_GPR11(r1),r11 | ||
557 | l.sw PT_ORIG_GPR11(r1),r11 | ||
558 | /* r12,r13 already saved */ | ||
559 | |||
560 | /* r14-r28 (even) aren't touched by the syscall fast path below | ||
561 | * so we don't need to save them. However, the functions that return | ||
562 | * to userspace via a call to switch() DO need to save these because | ||
563 | * switch() effectively clobbers them... saving these registers for | ||
564 | * such functions is handled in their syscall wrappers (see fork, vfork, | ||
565 | * and clone, below). | ||
566 | |||
567 | /* r30 is the only register we clobber in the fast path */ | ||
568 | /* r30 already saved */ | ||
569 | /* l.sw PT_GPR30(r1),r30 */ | ||
570 | /* This is used by do_signal to determine whether to check for | ||
571 | * syscall restart or not */ | ||
572 | l.sw PT_SYSCALLNO(r1),r11 | ||
573 | |||
574 | _syscall_check_trace_enter: | ||
575 | /* If TIF_SYSCALL_TRACE is set, then we want to do syscall tracing */ | ||
576 | l.lwz r30,TI_FLAGS(r10) | ||
577 | l.andi r30,r30,_TIF_SYSCALL_TRACE | ||
578 | l.sfne r30,r0 | ||
579 | l.bf _syscall_trace_enter | ||
580 | l.nop | ||
581 | |||
582 | _syscall_check: | ||
583 | /* Ensure that the syscall number is reasonable */ | ||
584 | l.sfgeui r11,__NR_syscalls | ||
585 | l.bf _syscall_badsys | ||
586 | l.nop | ||
587 | |||
588 | _syscall_call: | ||
589 | l.movhi r29,hi(sys_call_table) | ||
590 | l.ori r29,r29,lo(sys_call_table) | ||
591 | l.slli r11,r11,2 | ||
592 | l.add r29,r29,r11 | ||
593 | l.lwz r29,0(r29) | ||
594 | |||
595 | l.jalr r29 | ||
596 | l.nop | ||
597 | |||
598 | _syscall_return: | ||
599 | /* All syscalls return here... just pay attention to ret_from_fork | ||
600 | * which does it in a round-about way. | ||
601 | */ | ||
602 | l.sw PT_GPR11(r1),r11 // save return value | ||
603 | |||
604 | #if 0 | ||
605 | _syscall_debug: | ||
606 | l.movhi r3,hi(_string_syscall_return) | ||
607 | l.ori r3,r3,lo(_string_syscall_return) | ||
608 | l.ori r27,r0,1 | ||
609 | l.sw -4(r1),r27 | ||
610 | l.sw -8(r1),r11 | ||
611 | l.addi r1,r1,-8 | ||
612 | l.movhi r27,hi(printk) | ||
613 | l.ori r27,r27,lo(printk) | ||
614 | l.jalr r27 | ||
615 | l.nop | ||
616 | l.addi r1,r1,8 | ||
617 | #endif | ||
618 | |||
619 | _syscall_check_trace_leave: | ||
620 | /* r30 is a callee-saved register so this should still hold the | ||
621 | * _TIF_SYSCALL_TRACE flag from _syscall_check_trace_enter above... | ||
622 | * _syscall_trace_leave expects syscall result to be in pt_regs->r11. | ||
623 | */ | ||
624 | l.sfne r30,r0 | ||
625 | l.bf _syscall_trace_leave | ||
626 | l.nop | ||
627 | |||
628 | /* This is where the exception-return code begins... interrupts need to be | ||
629 | * disabled the rest of the way here because we can't afford to miss any | ||
630 | * interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */ | ||
631 | |||
632 | _syscall_check_work: | ||
633 | /* Here we need to disable interrupts */ | ||
634 | DISABLE_INTERRUPTS(r27,r29) | ||
635 | l.lwz r30,TI_FLAGS(r10) | ||
636 | l.andi r30,r30,_TIF_WORK_MASK | ||
637 | l.sfne r30,r0 | ||
638 | |||
639 | l.bnf _syscall_resume_userspace | ||
640 | l.nop | ||
641 | |||
642 | /* Work pending follows a different return path, so we need to | ||
643 | * make sure that all the call-saved registers get into pt_regs | ||
644 | * before branching... | ||
645 | */ | ||
646 | l.sw PT_GPR14(r1),r14 | ||
647 | l.sw PT_GPR16(r1),r16 | ||
648 | l.sw PT_GPR18(r1),r18 | ||
649 | l.sw PT_GPR20(r1),r20 | ||
650 | l.sw PT_GPR22(r1),r22 | ||
651 | l.sw PT_GPR24(r1),r24 | ||
652 | l.sw PT_GPR26(r1),r26 | ||
653 | l.sw PT_GPR28(r1),r28 | ||
654 | |||
655 | /* _work_pending needs to be called with interrupts disabled */ | ||
656 | l.j _work_pending | ||
657 | l.nop | ||
658 | |||
659 | _syscall_resume_userspace: | ||
660 | // ENABLE_INTERRUPTS(r29) | ||
661 | |||
662 | |||
663 | /* This is the hot path for returning to userspace from a syscall. If there's | ||
664 | * work to be done and the branch to _work_pending was taken above, then the | ||
665 | * return to userspace will be done via the normal exception return path... | ||
666 | * that path restores _all_ registers and will overwrite the "clobbered" | ||
667 | * registers with whatever garbage is in pt_regs -- that's OK because those | ||
668 | * registers are clobbered anyway and because the extra work is insignificant | ||
669 | * in the context of the extra work that _work_pending is doing. | ||
670 | |||
671 | /* Once again, syscalls are special and only guarantee to preserve the | ||
672 | * same registers as a normal function call */ | ||
673 | |||
674 | /* The assumption here is that the registers r14-r28 (even) are untouched and | ||
675 | * don't need to be restored... be sure that that's really the case! | ||
676 | */ | ||
677 | |||
678 | /* This is still too much... we should only be restoring what we actually | ||
679 | * clobbered... we should even be using 'scratch' (odd) regs above so that | ||
680 | * we don't need to restore anything, hardly... | ||
681 | */ | ||
682 | |||
683 | l.lwz r2,PT_GPR2(r1) | ||
684 | |||
685 | /* Restore args */ | ||
686 | /* r3-r8 are technically clobbered, but syscall restart needs these | ||
687 | * to be restored... | ||
688 | */ | ||
689 | l.lwz r3,PT_GPR3(r1) | ||
690 | l.lwz r4,PT_GPR4(r1) | ||
691 | l.lwz r5,PT_GPR5(r1) | ||
692 | l.lwz r6,PT_GPR6(r1) | ||
693 | l.lwz r7,PT_GPR7(r1) | ||
694 | l.lwz r8,PT_GPR8(r1) | ||
695 | |||
696 | l.lwz r9,PT_GPR9(r1) | ||
697 | l.lwz r10,PT_GPR10(r1) | ||
698 | l.lwz r11,PT_GPR11(r1) | ||
699 | |||
700 | /* r30 is the only register we clobber in the fast path */ | ||
701 | l.lwz r30,PT_GPR30(r1) | ||
702 | |||
703 | /* Here we use r13-r19 (odd) as scratch regs */ | ||
704 | l.lwz r13,PT_PC(r1) | ||
705 | l.lwz r15,PT_SR(r1) | ||
706 | l.lwz r1,PT_SP(r1) | ||
707 | /* Interrupts need to be disabled for setting EPCR and ESR | ||
708 | * so that another interrupt doesn't come in here and clobber | ||
709 | * them before we can use them for our l.rfe */ | ||
710 | DISABLE_INTERRUPTS(r17,r19) | ||
711 | l.mtspr r0,r13,SPR_EPCR_BASE | ||
712 | l.mtspr r0,r15,SPR_ESR_BASE | ||
713 | l.rfe | ||
714 | |||
715 | /* End of hot path! | ||
716 | * Keep the below tracing and error handling out of the hot path... | ||
717 | */ | ||
718 | |||
719 | _syscall_trace_enter: | ||
720 | /* Here we pass pt_regs to do_syscall_trace_enter. Make sure | ||
721 | * that function is really getting all the info it needs as | ||
722 | * pt_regs isn't a complete set of userspace regs, just the | ||
723 | * ones relevant to the syscall... | ||
724 | * | ||
725 | * Note use of delay slot for setting argument. | ||
726 | */ | ||
727 | l.jal do_syscall_trace_enter | ||
728 | l.addi r3,r1,0 | ||
729 | |||
730 | /* Restore arguments (not preserved across do_syscall_trace_enter) | ||
731 | * so that we can do the syscall for real and return to the syscall | ||
732 | * hot path. | ||
733 | */ | ||
734 | l.lwz r11,PT_SYSCALLNO(r1) | ||
735 | l.lwz r3,PT_GPR3(r1) | ||
736 | l.lwz r4,PT_GPR4(r1) | ||
737 | l.lwz r5,PT_GPR5(r1) | ||
738 | l.lwz r6,PT_GPR6(r1) | ||
739 | l.lwz r7,PT_GPR7(r1) | ||
740 | |||
741 | l.j _syscall_check | ||
742 | l.lwz r8,PT_GPR8(r1) | ||
743 | |||
744 | _syscall_trace_leave: | ||
745 | l.jal do_syscall_trace_leave | ||
746 | l.addi r3,r1,0 | ||
747 | |||
748 | l.j _syscall_check_work | ||
749 | l.nop | ||
750 | |||
751 | _syscall_badsys: | ||
752 | /* Here we effectively pretend to have executed an imaginary | ||
753 | * syscall that returns -ENOSYS and then return to the regular | ||
754 | * syscall hot path. | ||
755 | * Note that "return value" is set in the delay slot... | ||
756 | */ | ||
757 | l.j _syscall_return | ||
758 | l.addi r11,r0,-ENOSYS | ||
759 | |||
760 | /******* END SYSCALL HANDLING *******/ | ||
761 | |||
762 | /* ---[ 0xd00: Trap exception ]------------------------------------------ */ | ||
763 | |||
764 | UNHANDLED_EXCEPTION(_vector_0xd00,0xd00) | ||
765 | |||
766 | /* ---[ 0xe00: Trap exception ]------------------------------------------ */ | ||
767 | |||
768 | EXCEPTION_ENTRY(_trap_handler) | ||
769 | /* r4: EA of fault (set by EXCEPTION_HANDLE) */ | ||
770 | l.jal do_trap | ||
771 | l.addi r3,r1,0 /* pt_regs */ | ||
772 | |||
773 | l.j _ret_from_exception | ||
774 | l.nop | ||
775 | |||
776 | /* ---[ 0xf00: Reserved exception ]-------------------------------------- */ | ||
777 | |||
778 | UNHANDLED_EXCEPTION(_vector_0xf00,0xf00) | ||
779 | |||
780 | /* ---[ 0x1000: Reserved exception ]------------------------------------- */ | ||
781 | |||
782 | UNHANDLED_EXCEPTION(_vector_0x1000,0x1000) | ||
783 | |||
784 | /* ---[ 0x1100: Reserved exception ]------------------------------------- */ | ||
785 | |||
786 | UNHANDLED_EXCEPTION(_vector_0x1100,0x1100) | ||
787 | |||
788 | /* ---[ 0x1200: Reserved exception ]------------------------------------- */ | ||
789 | |||
790 | UNHANDLED_EXCEPTION(_vector_0x1200,0x1200) | ||
791 | |||
792 | /* ---[ 0x1300: Reserved exception ]------------------------------------- */ | ||
793 | |||
794 | UNHANDLED_EXCEPTION(_vector_0x1300,0x1300) | ||
795 | |||
796 | /* ---[ 0x1400: Reserved exception ]------------------------------------- */ | ||
797 | |||
798 | UNHANDLED_EXCEPTION(_vector_0x1400,0x1400) | ||
799 | |||
800 | /* ---[ 0x1500: Reserved exception ]------------------------------------- */ | ||
801 | |||
802 | UNHANDLED_EXCEPTION(_vector_0x1500,0x1500) | ||
803 | |||
804 | /* ---[ 0x1600: Reserved exception ]------------------------------------- */ | ||
805 | |||
806 | UNHANDLED_EXCEPTION(_vector_0x1600,0x1600) | ||
807 | |||
808 | /* ---[ 0x1700: Reserved exception ]------------------------------------- */ | ||
809 | |||
810 | UNHANDLED_EXCEPTION(_vector_0x1700,0x1700) | ||
811 | |||
812 | /* ---[ 0x1800: Reserved exception ]------------------------------------- */ | ||
813 | |||
814 | UNHANDLED_EXCEPTION(_vector_0x1800,0x1800) | ||
815 | |||
816 | /* ---[ 0x1900: Reserved exception ]------------------------------------- */ | ||
817 | |||
818 | UNHANDLED_EXCEPTION(_vector_0x1900,0x1900) | ||
819 | |||
820 | /* ---[ 0x1a00: Reserved exception ]------------------------------------- */ | ||
821 | |||
822 | UNHANDLED_EXCEPTION(_vector_0x1a00,0x1a00) | ||
823 | |||
824 | /* ---[ 0x1b00: Reserved exception ]------------------------------------- */ | ||
825 | |||
826 | UNHANDLED_EXCEPTION(_vector_0x1b00,0x1b00) | ||
827 | |||
828 | /* ---[ 0x1c00: Reserved exception ]------------------------------------- */ | ||
829 | |||
830 | UNHANDLED_EXCEPTION(_vector_0x1c00,0x1c00) | ||
831 | |||
832 | /* ---[ 0x1d00: Reserved exception ]------------------------------------- */ | ||
833 | |||
834 | UNHANDLED_EXCEPTION(_vector_0x1d00,0x1d00) | ||
835 | |||
836 | /* ---[ 0x1e00: Reserved exception ]------------------------------------- */ | ||
837 | |||
838 | UNHANDLED_EXCEPTION(_vector_0x1e00,0x1e00) | ||
839 | |||
840 | /* ---[ 0x1f00: Reserved exception ]------------------------------------- */ | ||
841 | |||
842 | UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00) | ||
843 | |||
844 | /* ========================================================[ return ] === */ | ||
845 | |||
846 | _work_pending: | ||
847 | /* | ||
848 | * if (current_thread_info->flags & _TIF_NEED_RESCHED) | ||
849 | * schedule(); | ||
850 | */ | ||
851 | l.lwz r5,TI_FLAGS(r10) | ||
852 | l.andi r3,r5,_TIF_NEED_RESCHED | ||
853 | l.sfnei r3,0 | ||
854 | l.bnf _work_notifysig | ||
855 | l.nop | ||
856 | l.jal schedule | ||
857 | l.nop | ||
858 | l.j _resume_userspace | ||
859 | l.nop | ||
860 | |||
861 | /* Handle pending signals and notify-resume requests. | ||
862 | * do_notify_resume must be passed the latest pushed pt_regs, not | ||
863 | * necessarily the "userspace" ones. Also, pt_regs->syscallno | ||
864 | * must be set so that the syscall restart functionality works. | ||
865 | */ | ||
866 | _work_notifysig: | ||
867 | l.jal do_notify_resume | ||
868 | l.ori r3,r1,0 /* pt_regs */ | ||
869 | |||
870 | _resume_userspace: | ||
871 | DISABLE_INTERRUPTS(r3,r4) | ||
872 | l.lwz r3,TI_FLAGS(r10) | ||
873 | l.andi r3,r3,_TIF_WORK_MASK | ||
874 | l.sfnei r3,0 | ||
875 | l.bf _work_pending | ||
876 | l.nop | ||
877 | |||
878 | _restore_all: | ||
879 | RESTORE_ALL | ||
880 | /* This returns to userspace code */ | ||
881 | |||
882 | |||
883 | ENTRY(_ret_from_intr) | ||
884 | ENTRY(_ret_from_exception) | ||
885 | l.lwz r4,PT_SR(r1) | ||
886 | l.andi r3,r4,SPR_SR_SM | ||
887 | l.sfeqi r3,0 | ||
888 | l.bnf _restore_all | ||
889 | l.nop | ||
890 | l.j _resume_userspace | ||
891 | l.nop | ||
892 | |||
893 | ENTRY(ret_from_fork) | ||
894 | l.jal schedule_tail | ||
895 | l.nop | ||
896 | |||
897 | /* _syscall_returns expect r11 to contain return value */ | ||
898 | l.lwz r11,PT_GPR11(r1) | ||
899 | |||
900 | /* The syscall fast path return expects call-saved registers | ||
901 | * r12-r28 to be untouched, so we restore them here as they | ||
902 | * will have been effectively clobbered when arriving here | ||
903 | * via the call to switch() | ||
904 | */ | ||
905 | l.lwz r12,PT_GPR12(r1) | ||
906 | l.lwz r14,PT_GPR14(r1) | ||
907 | l.lwz r16,PT_GPR16(r1) | ||
908 | l.lwz r18,PT_GPR18(r1) | ||
909 | l.lwz r20,PT_GPR20(r1) | ||
910 | l.lwz r22,PT_GPR22(r1) | ||
911 | l.lwz r24,PT_GPR24(r1) | ||
912 | l.lwz r26,PT_GPR26(r1) | ||
913 | l.lwz r28,PT_GPR28(r1) | ||
914 | |||
915 | l.j _syscall_return | ||
916 | l.nop | ||
917 | |||
918 | /* Since syscalls don't save call-clobbered registers, the args to | ||
919 | * kernel_thread_helper will need to be passed through callee-saved | ||
920 | * registers and copied to the parameter registers when the thread | ||
921 | * begins running. | ||
922 | * | ||
923 | * See arch/openrisc/kernel/process.c: | ||
924 | * The args are passed as follows: | ||
925 | * arg1 (r3) : passed in r20 | ||
926 | * arg2 (r4) : passed in r22 | ||
927 | */ | ||
928 | |||
929 | ENTRY(_kernel_thread_helper) | ||
930 | l.or r3,r20,r0 | ||
931 | l.or r4,r22,r0 | ||
932 | l.movhi r31,hi(kernel_thread_helper) | ||
933 | l.ori r31,r31,lo(kernel_thread_helper) | ||
934 | l.jr r31 | ||
935 | l.nop | ||
936 | |||
937 | |||
938 | /* ========================================================[ switch ] === */ | ||
939 | |||
940 | /* | ||
941 | * This routine switches between two different tasks. The process | ||
942 | * state of one is saved on its kernel stack. Then the state | ||
943 | * of the other is restored from its kernel stack. The memory | ||
944 | * management hardware is updated to the second process's state. | ||
945 | * Finally, we can return to the second process, via the 'return'. | ||
946 | * | ||
947 | * Note: there are two ways to get to the "going out" portion | ||
948 | * of this code; either by coming in via the entry (_switch) | ||
949 | * or via "fork" which must set up an environment equivalent | ||
950 | * to the "_switch" path. If you change this (or in particular, the | ||
951 | * SAVE_REGS macro), you'll have to change the fork code also. | ||
952 | */ | ||
953 | |||
954 | |||
955 | /* _switch MUST never lay on page boundry, cause it runs from | ||
956 | * effective addresses and beeing interrupted by iTLB miss would kill it. | ||
957 | * dTLB miss seams to never accour in the bad place since data accesses | ||
958 | * are from task structures which are always page aligned. | ||
959 | * | ||
960 | * The problem happens in RESTORE_ALL_NO_R11 where we first set the EPCR | ||
961 | * register, then load the previous register values and only at the end call | ||
962 | * the l.rfe instruction. If get TLB miss in beetwen the EPCR register gets | ||
963 | * garbled and we end up calling l.rfe with the wrong EPCR. (same probably | ||
964 | * holds for ESR) | ||
965 | * | ||
966 | * To avoid this problems it is sufficient to align _switch to | ||
967 | * some nice round number smaller than it's size... | ||
968 | */ | ||
969 | |||
970 | /* ABI rules apply here... we either enter _switch via schedule() or via | ||
971 | * an imaginary call to which we shall return at return_from_fork. Either | ||
972 | * way, we are a function call and only need to preserve the callee-saved | ||
973 | * registers when we return. As such, we don't need to save the registers | ||
974 | * on the stack that we won't be returning as they were... | ||
975 | */ | ||
976 | |||
977 | .align 0x400 | ||
978 | ENTRY(_switch) | ||
979 | /* We don't store SR as _switch only gets called in a context where | ||
980 | * the SR will be the same going in and coming out... */ | ||
981 | |||
982 | /* Set up new pt_regs struct for saving task state */ | ||
983 | l.addi r1,r1,-(INT_FRAME_SIZE) | ||
984 | |||
985 | /* No need to store r1/PT_SP as it goes into KSP below */ | ||
986 | l.sw PT_GPR2(r1),r2 | ||
987 | l.sw PT_GPR9(r1),r9 | ||
988 | /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being | ||
989 | * and expects r12 to be callee-saved... */ | ||
990 | l.sw PT_GPR12(r1),r12 | ||
991 | l.sw PT_GPR14(r1),r14 | ||
992 | l.sw PT_GPR16(r1),r16 | ||
993 | l.sw PT_GPR18(r1),r18 | ||
994 | l.sw PT_GPR20(r1),r20 | ||
995 | l.sw PT_GPR22(r1),r22 | ||
996 | l.sw PT_GPR24(r1),r24 | ||
997 | l.sw PT_GPR26(r1),r26 | ||
998 | l.sw PT_GPR28(r1),r28 | ||
999 | l.sw PT_GPR30(r1),r30 | ||
1000 | |||
1001 | l.addi r11,r10,0 /* Save old 'current' to 'last' return value*/ | ||
1002 | |||
1003 | /* We use thread_info->ksp for storing the address of the above | ||
1004 | * structure so that we can get back to it later... we don't want | ||
1005 | * to lose the value of thread_info->ksp, though, so store it as | ||
1006 | * pt_regs->sp so that we can easily restore it when we are made | ||
1007 | * live again... | ||
1008 | */ | ||
1009 | |||
1010 | /* Save the old value of thread_info->ksp as pt_regs->sp */ | ||
1011 | l.lwz r29,TI_KSP(r10) | ||
1012 | l.sw PT_SP(r1),r29 | ||
1013 | |||
1014 | /* Swap kernel stack pointers */ | ||
1015 | l.sw TI_KSP(r10),r1 /* Save old stack pointer */ | ||
1016 | l.or r10,r4,r0 /* Set up new current_thread_info */ | ||
1017 | l.lwz r1,TI_KSP(r10) /* Load new stack pointer */ | ||
1018 | |||
1019 | /* Restore the old value of thread_info->ksp */ | ||
1020 | l.lwz r29,PT_SP(r1) | ||
1021 | l.sw TI_KSP(r10),r29 | ||
1022 | |||
1023 | /* ...and restore the registers, except r11 because the return value | ||
1024 | * has already been set above. | ||
1025 | */ | ||
1026 | l.lwz r2,PT_GPR2(r1) | ||
1027 | l.lwz r9,PT_GPR9(r1) | ||
1028 | /* No need to restore r10 */ | ||
1029 | /* ...and do not restore r11 */ | ||
1030 | |||
1031 | /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being | ||
1032 | * and expects r12 to be callee-saved... */ | ||
1033 | l.lwz r12,PT_GPR12(r1) | ||
1034 | l.lwz r14,PT_GPR14(r1) | ||
1035 | l.lwz r16,PT_GPR16(r1) | ||
1036 | l.lwz r18,PT_GPR18(r1) | ||
1037 | l.lwz r20,PT_GPR20(r1) | ||
1038 | l.lwz r22,PT_GPR22(r1) | ||
1039 | l.lwz r24,PT_GPR24(r1) | ||
1040 | l.lwz r26,PT_GPR26(r1) | ||
1041 | l.lwz r28,PT_GPR28(r1) | ||
1042 | l.lwz r30,PT_GPR30(r1) | ||
1043 | |||
1044 | /* Unwind stack to pre-switch state */ | ||
1045 | l.addi r1,r1,(INT_FRAME_SIZE) | ||
1046 | |||
1047 | /* Return via the link-register back to where we 'came from', where that can be | ||
1048 | * either schedule() or return_from_fork()... */ | ||
1049 | l.jr r9 | ||
1050 | l.nop | ||
1051 | |||
1052 | /* ==================================================================== */ | ||
1053 | |||
1054 | /* These all use the delay slot for setting the argument register, so the | ||
1055 | * jump is always happening after the l.addi instruction. | ||
1056 | * | ||
1057 | * These are all just wrappers that don't touch the link-register r9, so the | ||
1058 | * return from the "real" syscall function will return back to the syscall | ||
1059 | * code that did the l.jal that brought us here. | ||
1060 | */ | ||
1061 | |||
1062 | /* fork requires that we save all the callee-saved registers because they | ||
1063 | * are all effectively clobbered by the call to _switch. Here we store | ||
1064 | * all the registers that aren't touched by the syscall fast path and thus | ||
1065 | * weren't saved there. | ||
1066 | */ | ||
1067 | |||
1068 | _fork_save_extra_regs_and_call: | ||
1069 | l.sw PT_GPR14(r1),r14 | ||
1070 | l.sw PT_GPR16(r1),r16 | ||
1071 | l.sw PT_GPR18(r1),r18 | ||
1072 | l.sw PT_GPR20(r1),r20 | ||
1073 | l.sw PT_GPR22(r1),r22 | ||
1074 | l.sw PT_GPR24(r1),r24 | ||
1075 | l.sw PT_GPR26(r1),r26 | ||
1076 | l.jr r29 | ||
1077 | l.sw PT_GPR28(r1),r28 | ||
1078 | |||
1079 | ENTRY(sys_clone) | ||
1080 | l.movhi r29,hi(_sys_clone) | ||
1081 | l.ori r29,r29,lo(_sys_clone) | ||
1082 | l.j _fork_save_extra_regs_and_call | ||
1083 | l.addi r7,r1,0 | ||
1084 | |||
1085 | ENTRY(sys_fork) | ||
1086 | l.movhi r29,hi(_sys_fork) | ||
1087 | l.ori r29,r29,lo(_sys_fork) | ||
1088 | l.j _fork_save_extra_regs_and_call | ||
1089 | l.addi r3,r1,0 | ||
1090 | |||
1091 | ENTRY(sys_execve) | ||
1092 | l.j _sys_execve | ||
1093 | l.addi r6,r1,0 | ||
1094 | |||
1095 | ENTRY(sys_sigaltstack) | ||
1096 | l.j _sys_sigaltstack | ||
1097 | l.addi r5,r1,0 | ||
1098 | |||
1099 | ENTRY(sys_rt_sigreturn) | ||
1100 | l.j _sys_rt_sigreturn | ||
1101 | l.addi r3,r1,0 | ||
1102 | |||
1103 | /* This is a catch-all syscall for atomic instructions for the OpenRISC 1000. | ||
1104 | * The functions takes a variable number of parameters depending on which | ||
1105 | * particular flavour of atomic you want... parameter 1 is a flag identifying | ||
1106 | * the atomic in question. Currently, this function implements the | ||
1107 | * following variants: | ||
1108 | * | ||
1109 | * XCHG: | ||
1110 | * @flag: 1 | ||
1111 | * @ptr1: | ||
1112 | * @ptr2: | ||
1113 | * Atomically exchange the values in pointers 1 and 2. | ||
1114 | * | ||
1115 | */ | ||
1116 | |||
1117 | ENTRY(sys_or1k_atomic) | ||
1118 | /* FIXME: This ignores r3 and always does an XCHG */ | ||
1119 | DISABLE_INTERRUPTS(r17,r19) | ||
1120 | l.lwz r30,0(r4) | ||
1121 | l.lwz r28,0(r5) | ||
1122 | l.sw 0(r4),r28 | ||
1123 | l.sw 0(r5),r30 | ||
1124 | ENABLE_INTERRUPTS(r17) | ||
1125 | l.jr r9 | ||
1126 | l.or r11,r0,r0 | ||
1127 | |||
1128 | /* ============================================================[ EOF ]=== */ | ||
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S new file mode 100644 index 00000000000..c75018d2264 --- /dev/null +++ b/arch/openrisc/kernel/head.S | |||
@@ -0,0 +1,1607 @@ | |||
1 | /* | ||
2 | * OpenRISC head.S | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <linux/threads.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <asm/processor.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/mmu.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/cache.h> | ||
27 | #include <asm/spr_defs.h> | ||
28 | #include <asm/asm-offsets.h> | ||
29 | |||
30 | #define tophys(rd,rs) \ | ||
31 | l.movhi rd,hi(-KERNELBASE) ;\ | ||
32 | l.add rd,rd,rs | ||
33 | |||
34 | #define CLEAR_GPR(gpr) \ | ||
35 | l.or gpr,r0,r0 | ||
36 | |||
37 | #define LOAD_SYMBOL_2_GPR(gpr,symbol) \ | ||
38 | l.movhi gpr,hi(symbol) ;\ | ||
39 | l.ori gpr,gpr,lo(symbol) | ||
40 | |||
41 | |||
42 | #define UART_BASE_ADD 0x90000000 | ||
43 | |||
44 | #define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM) | ||
45 | #define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM) | ||
46 | |||
47 | /* ============================================[ tmp store locations ]=== */ | ||
48 | |||
49 | /* | ||
50 | * emergency_print temporary stores | ||
51 | */ | ||
52 | #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4 | ||
53 | #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0) | ||
54 | |||
55 | #define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5 | ||
56 | #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0) | ||
57 | |||
58 | #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6 | ||
59 | #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0) | ||
60 | |||
61 | #define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7 | ||
62 | #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0) | ||
63 | |||
64 | #define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8 | ||
65 | #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0) | ||
66 | |||
67 | #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9 | ||
68 | #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0) | ||
69 | |||
70 | |||
71 | /* | ||
72 | * TLB miss handlers temorary stores | ||
73 | */ | ||
74 | #define EXCEPTION_STORE_GPR9 l.sw 0x10(r0),r9 | ||
75 | #define EXCEPTION_LOAD_GPR9 l.lwz r9,0x10(r0) | ||
76 | |||
77 | #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2 | ||
78 | #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0) | ||
79 | |||
80 | #define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3 | ||
81 | #define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0) | ||
82 | |||
83 | #define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4 | ||
84 | #define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0) | ||
85 | |||
86 | #define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5 | ||
87 | #define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0) | ||
88 | |||
89 | #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6 | ||
90 | #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0) | ||
91 | |||
92 | |||
93 | /* | ||
94 | * EXCEPTION_HANDLE temporary stores | ||
95 | */ | ||
96 | |||
97 | #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30 | ||
98 | #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0) | ||
99 | |||
100 | #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10 | ||
101 | #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0) | ||
102 | |||
103 | #define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1 | ||
104 | #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0) | ||
105 | |||
106 | /* | ||
107 | * For UNHANLDED_EXCEPTION | ||
108 | */ | ||
109 | |||
110 | #define EXCEPTION_T_STORE_GPR31 l.sw 0x84(r0),r31 | ||
111 | #define EXCEPTION_T_LOAD_GPR31(reg) l.lwz reg,0x84(r0) | ||
112 | |||
113 | /* =========================================================[ macros ]=== */ | ||
114 | |||
115 | |||
116 | #define GET_CURRENT_PGD(reg,t1) \ | ||
117 | LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ | ||
118 | tophys (t1,reg) ;\ | ||
119 | l.lwz reg,0(t1) | ||
120 | |||
121 | |||
122 | /* | ||
123 | * DSCR: this is a common hook for handling exceptions. it will save | ||
124 | * the needed registers, set up stack and pointer to current | ||
125 | * then jump to the handler while enabling MMU | ||
126 | * | ||
127 | * PRMS: handler - a function to jump to. it has to save the | ||
128 | * remaining registers to kernel stack, call | ||
129 | * appropriate arch-independant exception handler | ||
130 | * and finaly jump to ret_from_except | ||
131 | * | ||
132 | * PREQ: unchanged state from the time exception happened | ||
133 | * | ||
134 | * POST: SAVED the following registers original value | ||
135 | * to the new created exception frame pointed to by r1 | ||
136 | * | ||
137 | * r1 - ksp pointing to the new (exception) frame | ||
138 | * r4 - EEAR exception EA | ||
139 | * r10 - current pointing to current_thread_info struct | ||
140 | * r12 - syscall 0, since we didn't come from syscall | ||
141 | * r13 - temp it actually contains new SR, not needed anymore | ||
142 | * r31 - handler address of the handler we'll jump to | ||
143 | * | ||
144 | * handler has to save remaining registers to the exception | ||
145 | * ksp frame *before* tainting them! | ||
146 | * | ||
147 | * NOTE: this function is not reentrant per se. reentrancy is guaranteed | ||
148 | * by processor disabling all exceptions/interrupts when exception | ||
149 | * accours. | ||
150 | * | ||
151 | * OPTM: no need to make it so wasteful to extract ksp when in user mode | ||
152 | */ | ||
153 | |||
154 | #define EXCEPTION_HANDLE(handler) \ | ||
155 | EXCEPTION_T_STORE_GPR30 ;\ | ||
156 | l.mfspr r30,r0,SPR_ESR_BASE ;\ | ||
157 | l.andi r30,r30,SPR_SR_SM ;\ | ||
158 | l.sfeqi r30,0 ;\ | ||
159 | EXCEPTION_T_STORE_GPR10 ;\ | ||
160 | l.bnf 2f /* kernel_mode */ ;\ | ||
161 | EXCEPTION_T_STORE_SP /* delay slot */ ;\ | ||
162 | 1: /* user_mode: */ ;\ | ||
163 | LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ | ||
164 | tophys (r30,r1) ;\ | ||
165 | /* r10: current_thread_info */ ;\ | ||
166 | l.lwz r10,0(r30) ;\ | ||
167 | tophys (r30,r10) ;\ | ||
168 | l.lwz r1,(TI_KSP)(r30) ;\ | ||
169 | /* fall through */ ;\ | ||
170 | 2: /* kernel_mode: */ ;\ | ||
171 | /* create new stack frame, save only needed gprs */ ;\ | ||
172 | /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\ | ||
173 | /* r12: temp, syscall indicator */ ;\ | ||
174 | l.addi r1,r1,-(INT_FRAME_SIZE) ;\ | ||
175 | /* r1 is KSP, r30 is __pa(KSP) */ ;\ | ||
176 | tophys (r30,r1) ;\ | ||
177 | l.sw PT_GPR12(r30),r12 ;\ | ||
178 | l.mfspr r12,r0,SPR_EPCR_BASE ;\ | ||
179 | l.sw PT_PC(r30),r12 ;\ | ||
180 | l.mfspr r12,r0,SPR_ESR_BASE ;\ | ||
181 | l.sw PT_SR(r30),r12 ;\ | ||
182 | /* save r30 */ ;\ | ||
183 | EXCEPTION_T_LOAD_GPR30(r12) ;\ | ||
184 | l.sw PT_GPR30(r30),r12 ;\ | ||
185 | /* save r10 as was prior to exception */ ;\ | ||
186 | EXCEPTION_T_LOAD_GPR10(r12) ;\ | ||
187 | l.sw PT_GPR10(r30),r12 ;\ | ||
188 | /* save PT_SP as was prior to exception */ ;\ | ||
189 | EXCEPTION_T_LOAD_SP(r12) ;\ | ||
190 | l.sw PT_SP(r30),r12 ;\ | ||
191 | /* save exception r4, set r4 = EA */ ;\ | ||
192 | l.sw PT_GPR4(r30),r4 ;\ | ||
193 | l.mfspr r4,r0,SPR_EEAR_BASE ;\ | ||
194 | /* r12 == 1 if we come from syscall */ ;\ | ||
195 | CLEAR_GPR(r12) ;\ | ||
196 | /* ----- turn on MMU ----- */ ;\ | ||
197 | l.ori r30,r0,(EXCEPTION_SR) ;\ | ||
198 | l.mtspr r0,r30,SPR_ESR_BASE ;\ | ||
199 | /* r30: EA address of handler */ ;\ | ||
200 | LOAD_SYMBOL_2_GPR(r30,handler) ;\ | ||
201 | l.mtspr r0,r30,SPR_EPCR_BASE ;\ | ||
202 | l.rfe | ||
203 | |||
204 | /* | ||
205 | * this doesn't work | ||
206 | * | ||
207 | * | ||
208 | * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION | ||
209 | * #define UNHANDLED_EXCEPTION(handler) \ | ||
210 | * l.ori r3,r0,0x1 ;\ | ||
211 | * l.mtspr r0,r3,SPR_SR ;\ | ||
212 | * l.movhi r3,hi(0xf0000100) ;\ | ||
213 | * l.ori r3,r3,lo(0xf0000100) ;\ | ||
214 | * l.jr r3 ;\ | ||
215 | * l.nop 1 | ||
216 | * | ||
217 | * #endif | ||
218 | */ | ||
219 | |||
220 | /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just | ||
221 | * a bit more carefull (if we have a PT_SP or current pointer | ||
222 | * corruption) and set them up from 'current_set' | ||
223 | * | ||
224 | */ | ||
225 | #define UNHANDLED_EXCEPTION(handler) \ | ||
226 | EXCEPTION_T_STORE_GPR31 ;\ | ||
227 | EXCEPTION_T_STORE_GPR10 ;\ | ||
228 | EXCEPTION_T_STORE_SP ;\ | ||
229 | /* temporary store r3, r9 into r1, r10 */ ;\ | ||
230 | l.addi r1,r3,0x0 ;\ | ||
231 | l.addi r10,r9,0x0 ;\ | ||
232 | /* the string referenced by r3 must be low enough */ ;\ | ||
233 | l.jal _emergency_print ;\ | ||
234 | l.ori r3,r0,lo(_string_unhandled_exception) ;\ | ||
235 | l.mfspr r3,r0,SPR_NPC ;\ | ||
236 | l.jal _emergency_print_nr ;\ | ||
237 | l.andi r3,r3,0x1f00 ;\ | ||
238 | /* the string referenced by r3 must be low enough */ ;\ | ||
239 | l.jal _emergency_print ;\ | ||
240 | l.ori r3,r0,lo(_string_epc_prefix) ;\ | ||
241 | l.jal _emergency_print_nr ;\ | ||
242 | l.mfspr r3,r0,SPR_EPCR_BASE ;\ | ||
243 | l.jal _emergency_print ;\ | ||
244 | l.ori r3,r0,lo(_string_nl) ;\ | ||
245 | /* end of printing */ ;\ | ||
246 | l.addi r3,r1,0x0 ;\ | ||
247 | l.addi r9,r10,0x0 ;\ | ||
248 | /* extract current, ksp from current_set */ ;\ | ||
249 | LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\ | ||
250 | LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\ | ||
251 | /* create new stack frame, save only needed gprs */ ;\ | ||
252 | /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\ | ||
253 | /* r12: temp, syscall indicator, r13 temp */ ;\ | ||
254 | l.addi r1,r1,-(INT_FRAME_SIZE) ;\ | ||
255 | /* r1 is KSP, r31 is __pa(KSP) */ ;\ | ||
256 | tophys (r31,r1) ;\ | ||
257 | l.sw PT_GPR12(r31),r12 ;\ | ||
258 | l.mfspr r12,r0,SPR_EPCR_BASE ;\ | ||
259 | l.sw PT_PC(r31),r12 ;\ | ||
260 | l.mfspr r12,r0,SPR_ESR_BASE ;\ | ||
261 | l.sw PT_SR(r31),r12 ;\ | ||
262 | /* save r31 */ ;\ | ||
263 | EXCEPTION_T_LOAD_GPR31(r12) ;\ | ||
264 | l.sw PT_GPR31(r31),r12 ;\ | ||
265 | /* save r10 as was prior to exception */ ;\ | ||
266 | EXCEPTION_T_LOAD_GPR10(r12) ;\ | ||
267 | l.sw PT_GPR10(r31),r12 ;\ | ||
268 | /* save PT_SP as was prior to exception */ ;\ | ||
269 | EXCEPTION_T_LOAD_SP(r12) ;\ | ||
270 | l.sw PT_SP(r31),r12 ;\ | ||
271 | l.sw PT_GPR13(r31),r13 ;\ | ||
272 | /* --> */ ;\ | ||
273 | /* save exception r4, set r4 = EA */ ;\ | ||
274 | l.sw PT_GPR4(r31),r4 ;\ | ||
275 | l.mfspr r4,r0,SPR_EEAR_BASE ;\ | ||
276 | /* r12 == 1 if we come from syscall */ ;\ | ||
277 | CLEAR_GPR(r12) ;\ | ||
278 | /* ----- play a MMU trick ----- */ ;\ | ||
279 | l.ori r31,r0,(EXCEPTION_SR) ;\ | ||
280 | l.mtspr r0,r31,SPR_ESR_BASE ;\ | ||
281 | /* r31: EA address of handler */ ;\ | ||
282 | LOAD_SYMBOL_2_GPR(r31,handler) ;\ | ||
283 | l.mtspr r0,r31,SPR_EPCR_BASE ;\ | ||
284 | l.rfe | ||
285 | |||
286 | /* =====================================================[ exceptions] === */ | ||
287 | |||
288 | /* ---[ 0x100: RESET exception ]----------------------------------------- */ | ||
289 | .org 0x100 | ||
290 | /* Jump to .init code at _start which lives in the .head section | ||
291 | * and will be discarded after boot. | ||
292 | */ | ||
293 | LOAD_SYMBOL_2_GPR(r4, _start) | ||
294 | tophys (r3,r4) /* MMU disabled */ | ||
295 | l.jr r3 | ||
296 | l.nop | ||
297 | |||
298 | /* ---[ 0x200: BUS exception ]------------------------------------------- */ | ||
299 | .org 0x200 | ||
300 | _dispatch_bus_fault: | ||
301 | EXCEPTION_HANDLE(_bus_fault_handler) | ||
302 | |||
303 | /* ---[ 0x300: Data Page Fault exception ]------------------------------- */ | ||
304 | .org 0x300 | ||
305 | _dispatch_do_dpage_fault: | ||
306 | // totaly disable timer interrupt | ||
307 | // l.mtspr r0,r0,SPR_TTMR | ||
308 | // DEBUG_TLB_PROBE(0x300) | ||
309 | // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300) | ||
310 | EXCEPTION_HANDLE(_data_page_fault_handler) | ||
311 | |||
312 | /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */ | ||
313 | .org 0x400 | ||
314 | _dispatch_do_ipage_fault: | ||
315 | // totaly disable timer interrupt | ||
316 | // l.mtspr r0,r0,SPR_TTMR | ||
317 | // DEBUG_TLB_PROBE(0x400) | ||
318 | // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400) | ||
319 | EXCEPTION_HANDLE(_insn_page_fault_handler) | ||
320 | |||
321 | /* ---[ 0x500: Timer exception ]----------------------------------------- */ | ||
322 | .org 0x500 | ||
323 | EXCEPTION_HANDLE(_timer_handler) | ||
324 | |||
325 | /* ---[ 0x600: Aligment exception ]-------------------------------------- */ | ||
326 | .org 0x600 | ||
327 | EXCEPTION_HANDLE(_alignment_handler) | ||
328 | |||
329 | /* ---[ 0x700: Illegal insn exception ]---------------------------------- */ | ||
330 | .org 0x700 | ||
331 | EXCEPTION_HANDLE(_illegal_instruction_handler) | ||
332 | |||
333 | /* ---[ 0x800: External interrupt exception ]---------------------------- */ | ||
334 | .org 0x800 | ||
335 | EXCEPTION_HANDLE(_external_irq_handler) | ||
336 | |||
337 | /* ---[ 0x900: DTLB miss exception ]------------------------------------- */ | ||
338 | .org 0x900 | ||
339 | l.j boot_dtlb_miss_handler | ||
340 | l.nop | ||
341 | |||
342 | /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */ | ||
343 | .org 0xa00 | ||
344 | l.j boot_itlb_miss_handler | ||
345 | l.nop | ||
346 | |||
347 | /* ---[ 0xb00: Range exception ]----------------------------------------- */ | ||
348 | .org 0xb00 | ||
349 | UNHANDLED_EXCEPTION(_vector_0xb00) | ||
350 | |||
351 | /* ---[ 0xc00: Syscall exception ]--------------------------------------- */ | ||
352 | .org 0xc00 | ||
353 | EXCEPTION_HANDLE(_sys_call_handler) | ||
354 | |||
355 | /* ---[ 0xd00: Trap exception ]------------------------------------------ */ | ||
356 | .org 0xd00 | ||
357 | UNHANDLED_EXCEPTION(_vector_0xd00) | ||
358 | |||
359 | /* ---[ 0xe00: Trap exception ]------------------------------------------ */ | ||
360 | .org 0xe00 | ||
361 | // UNHANDLED_EXCEPTION(_vector_0xe00) | ||
362 | EXCEPTION_HANDLE(_trap_handler) | ||
363 | |||
364 | /* ---[ 0xf00: Reserved exception ]-------------------------------------- */ | ||
365 | .org 0xf00 | ||
366 | UNHANDLED_EXCEPTION(_vector_0xf00) | ||
367 | |||
368 | /* ---[ 0x1000: Reserved exception ]------------------------------------- */ | ||
369 | .org 0x1000 | ||
370 | UNHANDLED_EXCEPTION(_vector_0x1000) | ||
371 | |||
372 | /* ---[ 0x1100: Reserved exception ]------------------------------------- */ | ||
373 | .org 0x1100 | ||
374 | UNHANDLED_EXCEPTION(_vector_0x1100) | ||
375 | |||
376 | /* ---[ 0x1200: Reserved exception ]------------------------------------- */ | ||
377 | .org 0x1200 | ||
378 | UNHANDLED_EXCEPTION(_vector_0x1200) | ||
379 | |||
380 | /* ---[ 0x1300: Reserved exception ]------------------------------------- */ | ||
381 | .org 0x1300 | ||
382 | UNHANDLED_EXCEPTION(_vector_0x1300) | ||
383 | |||
384 | /* ---[ 0x1400: Reserved exception ]------------------------------------- */ | ||
385 | .org 0x1400 | ||
386 | UNHANDLED_EXCEPTION(_vector_0x1400) | ||
387 | |||
388 | /* ---[ 0x1500: Reserved exception ]------------------------------------- */ | ||
389 | .org 0x1500 | ||
390 | UNHANDLED_EXCEPTION(_vector_0x1500) | ||
391 | |||
392 | /* ---[ 0x1600: Reserved exception ]------------------------------------- */ | ||
393 | .org 0x1600 | ||
394 | UNHANDLED_EXCEPTION(_vector_0x1600) | ||
395 | |||
396 | /* ---[ 0x1700: Reserved exception ]------------------------------------- */ | ||
397 | .org 0x1700 | ||
398 | UNHANDLED_EXCEPTION(_vector_0x1700) | ||
399 | |||
400 | /* ---[ 0x1800: Reserved exception ]------------------------------------- */ | ||
401 | .org 0x1800 | ||
402 | UNHANDLED_EXCEPTION(_vector_0x1800) | ||
403 | |||
404 | /* ---[ 0x1900: Reserved exception ]------------------------------------- */ | ||
405 | .org 0x1900 | ||
406 | UNHANDLED_EXCEPTION(_vector_0x1900) | ||
407 | |||
408 | /* ---[ 0x1a00: Reserved exception ]------------------------------------- */ | ||
409 | .org 0x1a00 | ||
410 | UNHANDLED_EXCEPTION(_vector_0x1a00) | ||
411 | |||
412 | /* ---[ 0x1b00: Reserved exception ]------------------------------------- */ | ||
413 | .org 0x1b00 | ||
414 | UNHANDLED_EXCEPTION(_vector_0x1b00) | ||
415 | |||
416 | /* ---[ 0x1c00: Reserved exception ]------------------------------------- */ | ||
417 | .org 0x1c00 | ||
418 | UNHANDLED_EXCEPTION(_vector_0x1c00) | ||
419 | |||
420 | /* ---[ 0x1d00: Reserved exception ]------------------------------------- */ | ||
421 | .org 0x1d00 | ||
422 | UNHANDLED_EXCEPTION(_vector_0x1d00) | ||
423 | |||
424 | /* ---[ 0x1e00: Reserved exception ]------------------------------------- */ | ||
425 | .org 0x1e00 | ||
426 | UNHANDLED_EXCEPTION(_vector_0x1e00) | ||
427 | |||
428 | /* ---[ 0x1f00: Reserved exception ]------------------------------------- */ | ||
429 | .org 0x1f00 | ||
430 | UNHANDLED_EXCEPTION(_vector_0x1f00) | ||
431 | |||
432 | .org 0x2000 | ||
433 | /* ===================================================[ kernel start ]=== */ | ||
434 | |||
435 | /* .text*/ | ||
436 | |||
437 | /* This early stuff belongs in HEAD, but some of the functions below definitely | ||
438 | * don't... */ | ||
439 | |||
440 | __HEAD | ||
441 | .global _start | ||
442 | _start: | ||
443 | /* | ||
444 | * ensure a deterministic start | ||
445 | */ | ||
446 | |||
447 | l.ori r3,r0,0x1 | ||
448 | l.mtspr r0,r3,SPR_SR | ||
449 | |||
450 | CLEAR_GPR(r1) | ||
451 | CLEAR_GPR(r2) | ||
452 | CLEAR_GPR(r3) | ||
453 | CLEAR_GPR(r4) | ||
454 | CLEAR_GPR(r5) | ||
455 | CLEAR_GPR(r6) | ||
456 | CLEAR_GPR(r7) | ||
457 | CLEAR_GPR(r8) | ||
458 | CLEAR_GPR(r9) | ||
459 | CLEAR_GPR(r10) | ||
460 | CLEAR_GPR(r11) | ||
461 | CLEAR_GPR(r12) | ||
462 | CLEAR_GPR(r13) | ||
463 | CLEAR_GPR(r14) | ||
464 | CLEAR_GPR(r15) | ||
465 | CLEAR_GPR(r16) | ||
466 | CLEAR_GPR(r17) | ||
467 | CLEAR_GPR(r18) | ||
468 | CLEAR_GPR(r19) | ||
469 | CLEAR_GPR(r20) | ||
470 | CLEAR_GPR(r21) | ||
471 | CLEAR_GPR(r22) | ||
472 | CLEAR_GPR(r23) | ||
473 | CLEAR_GPR(r24) | ||
474 | CLEAR_GPR(r25) | ||
475 | CLEAR_GPR(r26) | ||
476 | CLEAR_GPR(r27) | ||
477 | CLEAR_GPR(r28) | ||
478 | CLEAR_GPR(r29) | ||
479 | CLEAR_GPR(r30) | ||
480 | CLEAR_GPR(r31) | ||
481 | |||
482 | /* | ||
483 | * set up initial ksp and current | ||
484 | */ | ||
485 | LOAD_SYMBOL_2_GPR(r1,init_thread_union+0x2000) // setup kernel stack | ||
486 | LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current | ||
487 | tophys (r31,r10) | ||
488 | l.sw TI_KSP(r31), r1 | ||
489 | |||
490 | l.ori r4,r0,0x0 | ||
491 | |||
492 | |||
493 | /* | ||
494 | * .data contains initialized data, | ||
495 | * .bss contains uninitialized data - clear it up | ||
496 | */ | ||
497 | clear_bss: | ||
498 | LOAD_SYMBOL_2_GPR(r24, __bss_start) | ||
499 | LOAD_SYMBOL_2_GPR(r26, _end) | ||
500 | tophys(r28,r24) | ||
501 | tophys(r30,r26) | ||
502 | CLEAR_GPR(r24) | ||
503 | CLEAR_GPR(r26) | ||
504 | 1: | ||
505 | l.sw (0)(r28),r0 | ||
506 | l.sfltu r28,r30 | ||
507 | l.bf 1b | ||
508 | l.addi r28,r28,4 | ||
509 | |||
510 | enable_ic: | ||
511 | l.jal _ic_enable | ||
512 | l.nop | ||
513 | |||
514 | enable_dc: | ||
515 | l.jal _dc_enable | ||
516 | l.nop | ||
517 | |||
518 | flush_tlb: | ||
519 | /* | ||
520 | * I N V A L I D A T E T L B e n t r i e s | ||
521 | */ | ||
522 | LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0)) | ||
523 | LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0)) | ||
524 | l.addi r7,r0,128 /* Maximum number of sets */ | ||
525 | 1: | ||
526 | l.mtspr r5,r0,0x0 | ||
527 | l.mtspr r6,r0,0x0 | ||
528 | |||
529 | l.addi r5,r5,1 | ||
530 | l.addi r6,r6,1 | ||
531 | l.sfeq r7,r0 | ||
532 | l.bnf 1b | ||
533 | l.addi r7,r7,-1 | ||
534 | |||
535 | |||
536 | /* The MMU needs to be enabled before or32_early_setup is called */ | ||
537 | |||
538 | enable_mmu: | ||
539 | /* | ||
540 | * enable dmmu & immu | ||
541 | * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0 | ||
542 | */ | ||
543 | l.mfspr r30,r0,SPR_SR | ||
544 | l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME) | ||
545 | l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME) | ||
546 | l.or r30,r30,r28 | ||
547 | l.mtspr r0,r30,SPR_SR | ||
548 | l.nop | ||
549 | l.nop | ||
550 | l.nop | ||
551 | l.nop | ||
552 | l.nop | ||
553 | l.nop | ||
554 | l.nop | ||
555 | l.nop | ||
556 | l.nop | ||
557 | l.nop | ||
558 | l.nop | ||
559 | l.nop | ||
560 | l.nop | ||
561 | l.nop | ||
562 | l.nop | ||
563 | l.nop | ||
564 | |||
565 | // reset the simulation counters | ||
566 | l.nop 5 | ||
567 | |||
568 | LOAD_SYMBOL_2_GPR(r24, or32_early_setup) | ||
569 | l.jalr r24 | ||
570 | l.nop | ||
571 | |||
572 | clear_regs: | ||
573 | /* | ||
574 | * clear all GPRS to increase determinism | ||
575 | */ | ||
576 | CLEAR_GPR(r2) | ||
577 | CLEAR_GPR(r3) | ||
578 | CLEAR_GPR(r4) | ||
579 | CLEAR_GPR(r5) | ||
580 | CLEAR_GPR(r6) | ||
581 | CLEAR_GPR(r7) | ||
582 | CLEAR_GPR(r8) | ||
583 | CLEAR_GPR(r9) | ||
584 | CLEAR_GPR(r11) | ||
585 | CLEAR_GPR(r12) | ||
586 | CLEAR_GPR(r13) | ||
587 | CLEAR_GPR(r14) | ||
588 | CLEAR_GPR(r15) | ||
589 | CLEAR_GPR(r16) | ||
590 | CLEAR_GPR(r17) | ||
591 | CLEAR_GPR(r18) | ||
592 | CLEAR_GPR(r19) | ||
593 | CLEAR_GPR(r20) | ||
594 | CLEAR_GPR(r21) | ||
595 | CLEAR_GPR(r22) | ||
596 | CLEAR_GPR(r23) | ||
597 | CLEAR_GPR(r24) | ||
598 | CLEAR_GPR(r25) | ||
599 | CLEAR_GPR(r26) | ||
600 | CLEAR_GPR(r27) | ||
601 | CLEAR_GPR(r28) | ||
602 | CLEAR_GPR(r29) | ||
603 | CLEAR_GPR(r30) | ||
604 | CLEAR_GPR(r31) | ||
605 | |||
606 | jump_start_kernel: | ||
607 | /* | ||
608 | * jump to kernel entry (start_kernel) | ||
609 | */ | ||
610 | LOAD_SYMBOL_2_GPR(r30, start_kernel) | ||
611 | l.jr r30 | ||
612 | l.nop | ||
613 | |||
614 | /* ========================================[ cache ]=== */ | ||
615 | |||
616 | /* aligment here so we don't change memory offsets with | ||
617 | * memory controler defined | ||
618 | */ | ||
619 | .align 0x2000 | ||
620 | |||
621 | _ic_enable: | ||
622 | /* Check if IC present and skip enabling otherwise */ | ||
623 | l.mfspr r24,r0,SPR_UPR | ||
624 | l.andi r26,r24,SPR_UPR_ICP | ||
625 | l.sfeq r26,r0 | ||
626 | l.bf 9f | ||
627 | l.nop | ||
628 | |||
629 | /* Disable IC */ | ||
630 | l.mfspr r6,r0,SPR_SR | ||
631 | l.addi r5,r0,-1 | ||
632 | l.xori r5,r5,SPR_SR_ICE | ||
633 | l.and r5,r6,r5 | ||
634 | l.mtspr r0,r5,SPR_SR | ||
635 | |||
636 | /* Establish cache block size | ||
637 | If BS=0, 16; | ||
638 | If BS=1, 32; | ||
639 | r14 contain block size | ||
640 | */ | ||
641 | l.mfspr r24,r0,SPR_ICCFGR | ||
642 | l.andi r26,r24,SPR_ICCFGR_CBS | ||
643 | l.srli r28,r26,7 | ||
644 | l.ori r30,r0,16 | ||
645 | l.sll r14,r30,r28 | ||
646 | |||
647 | /* Establish number of cache sets | ||
648 | r16 contains number of cache sets | ||
649 | r28 contains log(# of cache sets) | ||
650 | */ | ||
651 | l.andi r26,r24,SPR_ICCFGR_NCS | ||
652 | l.srli r28,r26,3 | ||
653 | l.ori r30,r0,1 | ||
654 | l.sll r16,r30,r28 | ||
655 | |||
656 | /* Invalidate IC */ | ||
657 | l.addi r6,r0,0 | ||
658 | l.sll r5,r14,r28 | ||
659 | // l.mul r5,r14,r16 | ||
660 | // l.trap 1 | ||
661 | // l.addi r5,r0,IC_SIZE | ||
662 | 1: | ||
663 | l.mtspr r0,r6,SPR_ICBIR | ||
664 | l.sfne r6,r5 | ||
665 | l.bf 1b | ||
666 | l.add r6,r6,r14 | ||
667 | // l.addi r6,r6,IC_LINE | ||
668 | |||
669 | /* Enable IC */ | ||
670 | l.mfspr r6,r0,SPR_SR | ||
671 | l.ori r6,r6,SPR_SR_ICE | ||
672 | l.mtspr r0,r6,SPR_SR | ||
673 | l.nop | ||
674 | l.nop | ||
675 | l.nop | ||
676 | l.nop | ||
677 | l.nop | ||
678 | l.nop | ||
679 | l.nop | ||
680 | l.nop | ||
681 | l.nop | ||
682 | l.nop | ||
683 | 9: | ||
684 | l.jr r9 | ||
685 | l.nop | ||
686 | |||
687 | _dc_enable: | ||
688 | /* Check if DC present and skip enabling otherwise */ | ||
689 | l.mfspr r24,r0,SPR_UPR | ||
690 | l.andi r26,r24,SPR_UPR_DCP | ||
691 | l.sfeq r26,r0 | ||
692 | l.bf 9f | ||
693 | l.nop | ||
694 | |||
695 | /* Disable DC */ | ||
696 | l.mfspr r6,r0,SPR_SR | ||
697 | l.addi r5,r0,-1 | ||
698 | l.xori r5,r5,SPR_SR_DCE | ||
699 | l.and r5,r6,r5 | ||
700 | l.mtspr r0,r5,SPR_SR | ||
701 | |||
702 | /* Establish cache block size | ||
703 | If BS=0, 16; | ||
704 | If BS=1, 32; | ||
705 | r14 contain block size | ||
706 | */ | ||
707 | l.mfspr r24,r0,SPR_DCCFGR | ||
708 | l.andi r26,r24,SPR_DCCFGR_CBS | ||
709 | l.srli r28,r26,7 | ||
710 | l.ori r30,r0,16 | ||
711 | l.sll r14,r30,r28 | ||
712 | |||
713 | /* Establish number of cache sets | ||
714 | r16 contains number of cache sets | ||
715 | r28 contains log(# of cache sets) | ||
716 | */ | ||
717 | l.andi r26,r24,SPR_DCCFGR_NCS | ||
718 | l.srli r28,r26,3 | ||
719 | l.ori r30,r0,1 | ||
720 | l.sll r16,r30,r28 | ||
721 | |||
722 | /* Invalidate DC */ | ||
723 | l.addi r6,r0,0 | ||
724 | l.sll r5,r14,r28 | ||
725 | 1: | ||
726 | l.mtspr r0,r6,SPR_DCBIR | ||
727 | l.sfne r6,r5 | ||
728 | l.bf 1b | ||
729 | l.add r6,r6,r14 | ||
730 | |||
731 | /* Enable DC */ | ||
732 | l.mfspr r6,r0,SPR_SR | ||
733 | l.ori r6,r6,SPR_SR_DCE | ||
734 | l.mtspr r0,r6,SPR_SR | ||
735 | 9: | ||
736 | l.jr r9 | ||
737 | l.nop | ||
738 | |||
739 | /* ===============================================[ page table masks ]=== */ | ||
740 | |||
741 | /* bit 4 is used in hardware as write back cache bit. we never use this bit | ||
742 | * explicitly, so we can reuse it as _PAGE_FILE bit and mask it out when | ||
743 | * writing into hardware pte's | ||
744 | */ | ||
745 | |||
746 | #define DTLB_UP_CONVERT_MASK 0x3fa | ||
747 | #define ITLB_UP_CONVERT_MASK 0x3a | ||
748 | |||
749 | /* for SMP we'd have (this is a bit subtle, CC must be always set | ||
750 | * for SMP, but since we have _PAGE_PRESENT bit always defined | ||
751 | * we can just modify the mask) | ||
752 | */ | ||
753 | #define DTLB_SMP_CONVERT_MASK 0x3fb | ||
754 | #define ITLB_SMP_CONVERT_MASK 0x3b | ||
755 | |||
756 | /* ---[ boot dtlb miss handler ]----------------------------------------- */ | ||
757 | |||
758 | boot_dtlb_miss_handler: | ||
759 | |||
760 | /* mask for DTLB_MR register: - (0) sets V (valid) bit, | ||
761 | * - (31-12) sets bits belonging to VPN (31-12) | ||
762 | */ | ||
763 | #define DTLB_MR_MASK 0xfffff001 | ||
764 | |||
765 | /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit, | ||
766 | * - (4) sets A (access) bit, | ||
767 | * - (5) sets D (dirty) bit, | ||
768 | * - (8) sets SRE (superuser read) bit | ||
769 | * - (9) sets SWE (superuser write) bit | ||
770 | * - (31-12) sets bits belonging to VPN (31-12) | ||
771 | */ | ||
772 | #define DTLB_TR_MASK 0xfffff332 | ||
773 | |||
774 | /* These are for masking out the VPN/PPN value from the MR/TR registers... | ||
775 | * it's not the same as the PFN */ | ||
776 | #define VPN_MASK 0xfffff000 | ||
777 | #define PPN_MASK 0xfffff000 | ||
778 | |||
779 | |||
780 | EXCEPTION_STORE_GPR6 | ||
781 | |||
782 | #if 0 | ||
783 | l.mfspr r6,r0,SPR_ESR_BASE // | ||
784 | l.andi r6,r6,SPR_SR_SM // are we in kernel mode ? | ||
785 | l.sfeqi r6,0 // r6 == 0x1 --> SM | ||
786 | l.bf exit_with_no_dtranslation // | ||
787 | l.nop | ||
788 | #endif | ||
789 | |||
790 | /* this could be optimized by moving storing of | ||
791 | * non r6 registers here, and jumping r6 restore | ||
792 | * if not in supervisor mode | ||
793 | */ | ||
794 | |||
795 | EXCEPTION_STORE_GPR2 | ||
796 | EXCEPTION_STORE_GPR3 | ||
797 | EXCEPTION_STORE_GPR4 | ||
798 | EXCEPTION_STORE_GPR5 | ||
799 | |||
800 | l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA | ||
801 | |||
802 | immediate_translation: | ||
803 | CLEAR_GPR(r6) | ||
804 | |||
805 | l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb) | ||
806 | |||
807 | l.mfspr r6, r0, SPR_DMMUCFGR | ||
808 | l.andi r6, r6, SPR_DMMUCFGR_NTS | ||
809 | l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF | ||
810 | l.ori r5, r0, 0x1 | ||
811 | l.sll r5, r5, r6 // r5 = number DMMU sets | ||
812 | l.addi r6, r5, -1 // r6 = nsets mask | ||
813 | l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK | ||
814 | |||
815 | l.or r6,r6,r4 // r6 <- r4 | ||
816 | l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff | ||
817 | l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000 | ||
818 | l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK | ||
819 | l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry | ||
820 | l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR | ||
821 | |||
822 | /* set up DTLB with no translation for EA <= 0xbfffffff */ | ||
823 | LOAD_SYMBOL_2_GPR(r6,0xbfffffff) | ||
824 | l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA) | ||
825 | l.bf 1f // goto out | ||
826 | l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1) | ||
827 | |||
828 | tophys(r3,r4) // r3 <- PA | ||
829 | 1: | ||
830 | l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff | ||
831 | l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000 | ||
832 | l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK | ||
833 | l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry | ||
834 | l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR | ||
835 | |||
836 | EXCEPTION_LOAD_GPR6 | ||
837 | EXCEPTION_LOAD_GPR5 | ||
838 | EXCEPTION_LOAD_GPR4 | ||
839 | EXCEPTION_LOAD_GPR3 | ||
840 | EXCEPTION_LOAD_GPR2 | ||
841 | |||
842 | l.rfe // SR <- ESR, PC <- EPC | ||
843 | |||
844 | exit_with_no_dtranslation: | ||
845 | /* EA out of memory or not in supervisor mode */ | ||
846 | EXCEPTION_LOAD_GPR6 | ||
847 | EXCEPTION_LOAD_GPR4 | ||
848 | l.j _dispatch_bus_fault | ||
849 | |||
850 | /* ---[ boot itlb miss handler ]----------------------------------------- */ | ||
851 | |||
852 | boot_itlb_miss_handler: | ||
853 | |||
854 | /* mask for ITLB_MR register: - sets V (valid) bit, | ||
855 | * - sets bits belonging to VPN (15-12) | ||
856 | */ | ||
857 | #define ITLB_MR_MASK 0xfffff001 | ||
858 | |||
859 | /* mask for ITLB_TR register: - sets A (access) bit, | ||
860 | * - sets SXE (superuser execute) bit | ||
861 | * - sets bits belonging to VPN (15-12) | ||
862 | */ | ||
863 | #define ITLB_TR_MASK 0xfffff050 | ||
864 | |||
865 | /* | ||
866 | #define VPN_MASK 0xffffe000 | ||
867 | #define PPN_MASK 0xffffe000 | ||
868 | */ | ||
869 | |||
870 | |||
871 | |||
872 | EXCEPTION_STORE_GPR2 | ||
873 | EXCEPTION_STORE_GPR3 | ||
874 | EXCEPTION_STORE_GPR4 | ||
875 | EXCEPTION_STORE_GPR5 | ||
876 | EXCEPTION_STORE_GPR6 | ||
877 | |||
878 | #if 0 | ||
879 | l.mfspr r6,r0,SPR_ESR_BASE // | ||
880 | l.andi r6,r6,SPR_SR_SM // are we in kernel mode ? | ||
881 | l.sfeqi r6,0 // r6 == 0x1 --> SM | ||
882 | l.bf exit_with_no_itranslation | ||
883 | l.nop | ||
884 | #endif | ||
885 | |||
886 | |||
887 | l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA | ||
888 | |||
889 | earlyearly: | ||
890 | CLEAR_GPR(r6) | ||
891 | |||
892 | l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb) | ||
893 | |||
894 | l.mfspr r6, r0, SPR_IMMUCFGR | ||
895 | l.andi r6, r6, SPR_IMMUCFGR_NTS | ||
896 | l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF | ||
897 | l.ori r5, r0, 0x1 | ||
898 | l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR | ||
899 | l.addi r6, r5, -1 // r6 = nsets mask | ||
900 | l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK | ||
901 | |||
902 | l.or r6,r6,r4 // r6 <- r4 | ||
903 | l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff | ||
904 | l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000 | ||
905 | l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK | ||
906 | l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry | ||
907 | l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR | ||
908 | |||
909 | /* | ||
910 | * set up ITLB with no translation for EA <= 0x0fffffff | ||
911 | * | ||
912 | * we need this for head.S mapping (EA = PA). if we move all functions | ||
913 | * which run with mmu enabled into entry.S, we might be able to eliminate this. | ||
914 | * | ||
915 | */ | ||
916 | LOAD_SYMBOL_2_GPR(r6,0x0fffffff) | ||
917 | l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA) | ||
918 | l.bf 1f // goto out | ||
919 | l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1) | ||
920 | |||
921 | tophys(r3,r4) // r3 <- PA | ||
922 | 1: | ||
923 | l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff | ||
924 | l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000 | ||
925 | l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK | ||
926 | l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry | ||
927 | l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR | ||
928 | |||
929 | EXCEPTION_LOAD_GPR6 | ||
930 | EXCEPTION_LOAD_GPR5 | ||
931 | EXCEPTION_LOAD_GPR4 | ||
932 | EXCEPTION_LOAD_GPR3 | ||
933 | EXCEPTION_LOAD_GPR2 | ||
934 | |||
935 | l.rfe // SR <- ESR, PC <- EPC | ||
936 | |||
937 | exit_with_no_itranslation: | ||
938 | EXCEPTION_LOAD_GPR4 | ||
939 | EXCEPTION_LOAD_GPR6 | ||
940 | l.j _dispatch_bus_fault | ||
941 | l.nop | ||
942 | |||
943 | /* ====================================================================== */ | ||
944 | /* | ||
945 | * Stuff below here shouldn't go into .head section... maybe this stuff | ||
946 | * can be moved to entry.S ??? | ||
947 | */ | ||
948 | |||
949 | /* ==============================================[ DTLB miss handler ]=== */ | ||
950 | |||
951 | /* | ||
952 | * Comments: | ||
953 | * Exception handlers are entered with MMU off so the following handler | ||
954 | * needs to use physical addressing | ||
955 | * | ||
956 | */ | ||
957 | |||
958 | .text | ||
959 | ENTRY(dtlb_miss_handler) | ||
960 | EXCEPTION_STORE_GPR2 | ||
961 | EXCEPTION_STORE_GPR3 | ||
962 | EXCEPTION_STORE_GPR4 | ||
963 | EXCEPTION_STORE_GPR5 | ||
964 | EXCEPTION_STORE_GPR6 | ||
965 | /* | ||
966 | * get EA of the miss | ||
967 | */ | ||
968 | l.mfspr r2,r0,SPR_EEAR_BASE | ||
969 | /* | ||
970 | * pmd = (pmd_t *)(current_pgd + pgd_index(daddr)); | ||
971 | */ | ||
972 | GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp | ||
973 | l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2) | ||
974 | l.slli r4,r4,0x2 // to get address << 2 | ||
975 | l.add r5,r4,r3 // r4 is pgd_index(daddr) | ||
976 | /* | ||
977 | * if (pmd_none(*pmd)) | ||
978 | * goto pmd_none: | ||
979 | */ | ||
980 | tophys (r4,r5) | ||
981 | l.lwz r3,0x0(r4) // get *pmd value | ||
982 | l.sfne r3,r0 | ||
983 | l.bnf d_pmd_none | ||
984 | l.andi r3,r3,~PAGE_MASK //0x1fff // ~PAGE_MASK | ||
985 | /* | ||
986 | * if (pmd_bad(*pmd)) | ||
987 | * pmd_clear(pmd) | ||
988 | * goto pmd_bad: | ||
989 | */ | ||
990 | // l.sfeq r3,r0 // check *pmd value | ||
991 | // l.bf d_pmd_good | ||
992 | l.addi r3,r0,0xffffe000 // PAGE_MASK | ||
993 | // l.j d_pmd_bad | ||
994 | // l.sw 0x0(r4),r0 // clear pmd | ||
995 | d_pmd_good: | ||
996 | /* | ||
997 | * pte = *pte_offset(pmd, daddr); | ||
998 | */ | ||
999 | l.lwz r4,0x0(r4) // get **pmd value | ||
1000 | l.and r4,r4,r3 // & PAGE_MASK | ||
1001 | l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR | ||
1002 | l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1 | ||
1003 | l.slli r3,r3,0x2 // to get address << 2 | ||
1004 | l.add r3,r3,r4 | ||
1005 | l.lwz r2,0x0(r3) // this is pte at last | ||
1006 | /* | ||
1007 | * if (!pte_present(pte)) | ||
1008 | */ | ||
1009 | l.andi r4,r2,0x1 | ||
1010 | l.sfne r4,r0 // is pte present | ||
1011 | l.bnf d_pte_not_present | ||
1012 | l.addi r3,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK | ||
1013 | /* | ||
1014 | * fill DTLB TR register | ||
1015 | */ | ||
1016 | l.and r4,r2,r3 // apply the mask | ||
1017 | // Determine number of DMMU sets | ||
1018 | l.mfspr r6, r0, SPR_DMMUCFGR | ||
1019 | l.andi r6, r6, SPR_DMMUCFGR_NTS | ||
1020 | l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF | ||
1021 | l.ori r3, r0, 0x1 | ||
1022 | l.sll r3, r3, r6 // r3 = number DMMU sets DMMUCFGR | ||
1023 | l.addi r6, r3, -1 // r6 = nsets mask | ||
1024 | l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1) | ||
1025 | //NUM_TLB_ENTRIES | ||
1026 | l.mtspr r5,r4,SPR_DTLBTR_BASE(0) | ||
1027 | /* | ||
1028 | * fill DTLB MR register | ||
1029 | */ | ||
1030 | l.mfspr r2,r0,SPR_EEAR_BASE | ||
1031 | l.addi r3,r0,0xffffe000 // PAGE_MASK | ||
1032 | l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?) | ||
1033 | l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry | ||
1034 | l.mtspr r5,r4,SPR_DTLBMR_BASE(0) | ||
1035 | |||
1036 | EXCEPTION_LOAD_GPR2 | ||
1037 | EXCEPTION_LOAD_GPR3 | ||
1038 | EXCEPTION_LOAD_GPR4 | ||
1039 | EXCEPTION_LOAD_GPR5 | ||
1040 | EXCEPTION_LOAD_GPR6 | ||
1041 | l.rfe | ||
1042 | d_pmd_bad: | ||
1043 | l.nop 1 | ||
1044 | EXCEPTION_LOAD_GPR2 | ||
1045 | EXCEPTION_LOAD_GPR3 | ||
1046 | EXCEPTION_LOAD_GPR4 | ||
1047 | EXCEPTION_LOAD_GPR5 | ||
1048 | EXCEPTION_LOAD_GPR6 | ||
1049 | l.rfe | ||
1050 | d_pmd_none: | ||
1051 | d_pte_not_present: | ||
1052 | EXCEPTION_LOAD_GPR2 | ||
1053 | EXCEPTION_LOAD_GPR3 | ||
1054 | EXCEPTION_LOAD_GPR4 | ||
1055 | EXCEPTION_LOAD_GPR5 | ||
1056 | EXCEPTION_LOAD_GPR6 | ||
1057 | l.j _dispatch_do_dpage_fault | ||
1058 | l.nop | ||
1059 | |||
1060 | /* ==============================================[ ITLB miss handler ]=== */ | ||
1061 | ENTRY(itlb_miss_handler) | ||
1062 | EXCEPTION_STORE_GPR2 | ||
1063 | EXCEPTION_STORE_GPR3 | ||
1064 | EXCEPTION_STORE_GPR4 | ||
1065 | EXCEPTION_STORE_GPR5 | ||
1066 | EXCEPTION_STORE_GPR6 | ||
1067 | /* | ||
1068 | * get EA of the miss | ||
1069 | */ | ||
1070 | l.mfspr r2,r0,SPR_EEAR_BASE | ||
1071 | |||
1072 | /* | ||
1073 | * pmd = (pmd_t *)(current_pgd + pgd_index(daddr)); | ||
1074 | * | ||
1075 | */ | ||
1076 | GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp | ||
1077 | l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2) | ||
1078 | l.slli r4,r4,0x2 // to get address << 2 | ||
1079 | l.add r5,r4,r3 // r4 is pgd_index(daddr) | ||
1080 | /* | ||
1081 | * if (pmd_none(*pmd)) | ||
1082 | * goto pmd_none: | ||
1083 | */ | ||
1084 | tophys (r4,r5) | ||
1085 | l.lwz r3,0x0(r4) // get *pmd value | ||
1086 | l.sfne r3,r0 | ||
1087 | l.bnf i_pmd_none | ||
1088 | l.andi r3,r3,0x1fff // ~PAGE_MASK | ||
1089 | /* | ||
1090 | * if (pmd_bad(*pmd)) | ||
1091 | * pmd_clear(pmd) | ||
1092 | * goto pmd_bad: | ||
1093 | */ | ||
1094 | |||
1095 | // l.sfeq r3,r0 // check *pmd value | ||
1096 | // l.bf i_pmd_good | ||
1097 | l.addi r3,r0,0xffffe000 // PAGE_MASK | ||
1098 | // l.j i_pmd_bad | ||
1099 | // l.sw 0x0(r4),r0 // clear pmd | ||
1100 | |||
1101 | i_pmd_good: | ||
1102 | /* | ||
1103 | * pte = *pte_offset(pmd, iaddr); | ||
1104 | * | ||
1105 | */ | ||
1106 | l.lwz r4,0x0(r4) // get **pmd value | ||
1107 | l.and r4,r4,r3 // & PAGE_MASK | ||
1108 | l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR | ||
1109 | l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1 | ||
1110 | l.slli r3,r3,0x2 // to get address << 2 | ||
1111 | l.add r3,r3,r4 | ||
1112 | l.lwz r2,0x0(r3) // this is pte at last | ||
1113 | /* | ||
1114 | * if (!pte_present(pte)) | ||
1115 | * | ||
1116 | */ | ||
1117 | l.andi r4,r2,0x1 | ||
1118 | l.sfne r4,r0 // is pte present | ||
1119 | l.bnf i_pte_not_present | ||
1120 | l.addi r3,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK | ||
1121 | /* | ||
1122 | * fill ITLB TR register | ||
1123 | */ | ||
1124 | l.and r4,r2,r3 // apply the mask | ||
1125 | l.andi r3,r2,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE | ||
1126 | // l.andi r3,r2,0x400 // _PAGE_EXEC | ||
1127 | l.sfeq r3,r0 | ||
1128 | l.bf itlb_tr_fill //_workaround | ||
1129 | // Determine number of IMMU sets | ||
1130 | l.mfspr r6, r0, SPR_IMMUCFGR | ||
1131 | l.andi r6, r6, SPR_IMMUCFGR_NTS | ||
1132 | l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF | ||
1133 | l.ori r3, r0, 0x1 | ||
1134 | l.sll r3, r3, r6 // r3 = number IMMU sets IMMUCFGR | ||
1135 | l.addi r6, r3, -1 // r6 = nsets mask | ||
1136 | l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1) | ||
1137 | |||
1138 | /* | ||
1139 | * __PHX__ :: fixme | ||
1140 | * we should not just blindly set executable flags, | ||
1141 | * but it does help with ping. the clean way would be to find out | ||
1142 | * (and fix it) why stack doesn't have execution permissions | ||
1143 | */ | ||
1144 | |||
1145 | itlb_tr_fill_workaround: | ||
1146 | l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE) | ||
1147 | itlb_tr_fill: | ||
1148 | l.mtspr r5,r4,SPR_ITLBTR_BASE(0) | ||
1149 | /* | ||
1150 | * fill DTLB MR register | ||
1151 | */ | ||
1152 | l.mfspr r2,r0,SPR_EEAR_BASE | ||
1153 | l.addi r3,r0,0xffffe000 // PAGE_MASK | ||
1154 | l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?) | ||
1155 | l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry | ||
1156 | l.mtspr r5,r4,SPR_ITLBMR_BASE(0) | ||
1157 | |||
1158 | EXCEPTION_LOAD_GPR2 | ||
1159 | EXCEPTION_LOAD_GPR3 | ||
1160 | EXCEPTION_LOAD_GPR4 | ||
1161 | EXCEPTION_LOAD_GPR5 | ||
1162 | EXCEPTION_LOAD_GPR6 | ||
1163 | l.rfe | ||
1164 | |||
1165 | i_pmd_bad: | ||
1166 | l.nop 1 | ||
1167 | EXCEPTION_LOAD_GPR2 | ||
1168 | EXCEPTION_LOAD_GPR3 | ||
1169 | EXCEPTION_LOAD_GPR4 | ||
1170 | EXCEPTION_LOAD_GPR5 | ||
1171 | EXCEPTION_LOAD_GPR6 | ||
1172 | l.rfe | ||
1173 | i_pmd_none: | ||
1174 | i_pte_not_present: | ||
1175 | EXCEPTION_LOAD_GPR2 | ||
1176 | EXCEPTION_LOAD_GPR3 | ||
1177 | EXCEPTION_LOAD_GPR4 | ||
1178 | EXCEPTION_LOAD_GPR5 | ||
1179 | EXCEPTION_LOAD_GPR6 | ||
1180 | l.j _dispatch_do_ipage_fault | ||
1181 | l.nop | ||
1182 | |||
1183 | /* ==============================================[ boot tlb handlers ]=== */ | ||
1184 | |||
1185 | |||
1186 | /* =================================================[ debugging aids ]=== */ | ||
1187 | |||
1188 | .align 64 | ||
1189 | _immu_trampoline: | ||
1190 | .space 64 | ||
1191 | _immu_trampoline_top: | ||
1192 | |||
1193 | #define TRAMP_SLOT_0 (0x0) | ||
1194 | #define TRAMP_SLOT_1 (0x4) | ||
1195 | #define TRAMP_SLOT_2 (0x8) | ||
1196 | #define TRAMP_SLOT_3 (0xc) | ||
1197 | #define TRAMP_SLOT_4 (0x10) | ||
1198 | #define TRAMP_SLOT_5 (0x14) | ||
1199 | #define TRAMP_FRAME_SIZE (0x18) | ||
1200 | |||
1201 | ENTRY(_immu_trampoline_workaround) | ||
1202 | // r2 EEA | ||
1203 | // r6 is physical EEA | ||
1204 | tophys(r6,r2) | ||
1205 | |||
1206 | LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) | ||
1207 | tophys (r3,r5) // r3 is trampoline (physical) | ||
1208 | |||
1209 | LOAD_SYMBOL_2_GPR(r4,0x15000000) | ||
1210 | l.sw TRAMP_SLOT_0(r3),r4 | ||
1211 | l.sw TRAMP_SLOT_1(r3),r4 | ||
1212 | l.sw TRAMP_SLOT_4(r3),r4 | ||
1213 | l.sw TRAMP_SLOT_5(r3),r4 | ||
1214 | |||
1215 | // EPC = EEA - 0x4 | ||
1216 | l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address) | ||
1217 | l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data | ||
1218 | l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address) | ||
1219 | l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data | ||
1220 | |||
1221 | l.srli r5,r4,26 // check opcode for write access | ||
1222 | l.sfeqi r5,0 // l.j | ||
1223 | l.bf 0f | ||
1224 | l.sfeqi r5,0x11 // l.jr | ||
1225 | l.bf 1f | ||
1226 | l.sfeqi r5,1 // l.jal | ||
1227 | l.bf 2f | ||
1228 | l.sfeqi r5,0x12 // l.jalr | ||
1229 | l.bf 3f | ||
1230 | l.sfeqi r5,3 // l.bnf | ||
1231 | l.bf 4f | ||
1232 | l.sfeqi r5,4 // l.bf | ||
1233 | l.bf 5f | ||
1234 | 99: | ||
1235 | l.nop | ||
1236 | l.j 99b // should never happen | ||
1237 | l.nop 1 | ||
1238 | |||
1239 | // r2 is EEA | ||
1240 | // r3 is trampoline address (physical) | ||
1241 | // r4 is instruction | ||
1242 | // r6 is physical(EEA) | ||
1243 | // | ||
1244 | // r5 | ||
1245 | |||
1246 | 2: // l.jal | ||
1247 | |||
1248 | /* 19 20 aa aa l.movhi r9,0xaaaa | ||
1249 | * a9 29 bb bb l.ori r9,0xbbbb | ||
1250 | * | ||
1251 | * where 0xaaaabbbb is EEA + 0x4 shifted right 2 | ||
1252 | */ | ||
1253 | |||
1254 | l.addi r6,r2,0x4 // this is 0xaaaabbbb | ||
1255 | |||
1256 | // l.movhi r9,0xaaaa | ||
1257 | l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 | ||
1258 | l.sh (TRAMP_SLOT_0+0x0)(r3),r5 | ||
1259 | l.srli r5,r6,16 | ||
1260 | l.sh (TRAMP_SLOT_0+0x2)(r3),r5 | ||
1261 | |||
1262 | // l.ori r9,0xbbbb | ||
1263 | l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 | ||
1264 | l.sh (TRAMP_SLOT_1+0x0)(r3),r5 | ||
1265 | l.andi r5,r6,0xffff | ||
1266 | l.sh (TRAMP_SLOT_1+0x2)(r3),r5 | ||
1267 | |||
1268 | /* falthrough, need to set up new jump offset */ | ||
1269 | |||
1270 | |||
1271 | 0: // l.j | ||
1272 | l.slli r6,r4,6 // original offset shifted left 6 - 2 | ||
1273 | // l.srli r6,r6,6 // original offset shifted right 2 | ||
1274 | |||
1275 | l.slli r4,r2,4 // old jump position: EEA shifted left 4 | ||
1276 | // l.srli r4,r4,6 // old jump position: shifted right 2 | ||
1277 | |||
1278 | l.addi r5,r3,0xc // new jump position (physical) | ||
1279 | l.slli r5,r5,4 // new jump position: shifted left 4 | ||
1280 | |||
1281 | // calculate new jump offset | ||
1282 | // new_off = old_off + (old_jump - new_jump) | ||
1283 | |||
1284 | l.sub r5,r4,r5 // old_jump - new_jump | ||
1285 | l.add r5,r6,r5 // orig_off + (old_jump - new_jump) | ||
1286 | l.srli r5,r5,6 // new offset shifted right 2 | ||
1287 | |||
1288 | // r5 is new jump offset | ||
1289 | // l.j has opcode 0x0... | ||
1290 | l.sw TRAMP_SLOT_2(r3),r5 // write it back | ||
1291 | |||
1292 | l.j trampoline_out | ||
1293 | l.nop | ||
1294 | |||
1295 | /* ----------------------------- */ | ||
1296 | |||
1297 | 3: // l.jalr | ||
1298 | |||
1299 | /* 19 20 aa aa l.movhi r9,0xaaaa | ||
1300 | * a9 29 bb bb l.ori r9,0xbbbb | ||
1301 | * | ||
1302 | * where 0xaaaabbbb is EEA + 0x4 shifted right 2 | ||
1303 | */ | ||
1304 | |||
1305 | l.addi r6,r2,0x4 // this is 0xaaaabbbb | ||
1306 | |||
1307 | // l.movhi r9,0xaaaa | ||
1308 | l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 | ||
1309 | l.sh (TRAMP_SLOT_0+0x0)(r3),r5 | ||
1310 | l.srli r5,r6,16 | ||
1311 | l.sh (TRAMP_SLOT_0+0x2)(r3),r5 | ||
1312 | |||
1313 | // l.ori r9,0xbbbb | ||
1314 | l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 | ||
1315 | l.sh (TRAMP_SLOT_1+0x0)(r3),r5 | ||
1316 | l.andi r5,r6,0xffff | ||
1317 | l.sh (TRAMP_SLOT_1+0x2)(r3),r5 | ||
1318 | |||
1319 | l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction | ||
1320 | l.andi r5,r5,0x3ff // clear out opcode part | ||
1321 | l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr | ||
1322 | l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back | ||
1323 | |||
1324 | /* falthrough */ | ||
1325 | |||
1326 | 1: // l.jr | ||
1327 | l.j trampoline_out | ||
1328 | l.nop | ||
1329 | |||
1330 | /* ----------------------------- */ | ||
1331 | |||
1332 | 4: // l.bnf | ||
1333 | 5: // l.bf | ||
1334 | l.slli r6,r4,6 // original offset shifted left 6 - 2 | ||
1335 | // l.srli r6,r6,6 // original offset shifted right 2 | ||
1336 | |||
1337 | l.slli r4,r2,4 // old jump position: EEA shifted left 4 | ||
1338 | // l.srli r4,r4,6 // old jump position: shifted right 2 | ||
1339 | |||
1340 | l.addi r5,r3,0xc // new jump position (physical) | ||
1341 | l.slli r5,r5,4 // new jump position: shifted left 4 | ||
1342 | |||
1343 | // calculate new jump offset | ||
1344 | // new_off = old_off + (old_jump - new_jump) | ||
1345 | |||
1346 | l.add r6,r6,r4 // (orig_off + old_jump) | ||
1347 | l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump | ||
1348 | l.srli r6,r6,6 // new offset shifted right 2 | ||
1349 | |||
1350 | // r6 is new jump offset | ||
1351 | l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction | ||
1352 | l.srli r4,r4,16 | ||
1353 | l.andi r4,r4,0xfc00 // get opcode part | ||
1354 | l.slli r4,r4,16 | ||
1355 | l.or r6,r4,r6 // l.b(n)f new offset | ||
1356 | l.sw TRAMP_SLOT_2(r3),r6 // write it back | ||
1357 | |||
1358 | /* we need to add l.j to EEA + 0x8 */ | ||
1359 | tophys (r4,r2) // may not be needed (due to shifts down_ | ||
1360 | l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8) | ||
1361 | // jump position = r5 + 0x8 (0x8 compensated) | ||
1362 | l.sub r4,r4,r5 // jump offset = target - new_position + 0x8 | ||
1363 | |||
1364 | l.slli r4,r4,4 // the amount of info in imediate of jump | ||
1365 | l.srli r4,r4,6 // jump instruction with offset | ||
1366 | l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot | ||
1367 | |||
1368 | /* fallthrough */ | ||
1369 | |||
1370 | trampoline_out: | ||
1371 | // set up new EPC to point to our trampoline code | ||
1372 | LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) | ||
1373 | l.mtspr r0,r5,SPR_EPCR_BASE | ||
1374 | |||
1375 | // immu_trampoline is (4x) CACHE_LINE aligned | ||
1376 | // and only 6 instructions long, | ||
1377 | // so we need to invalidate only 2 lines | ||
1378 | |||
1379 | /* Establish cache block size | ||
1380 | If BS=0, 16; | ||
1381 | If BS=1, 32; | ||
1382 | r14 contain block size | ||
1383 | */ | ||
1384 | l.mfspr r21,r0,SPR_ICCFGR | ||
1385 | l.andi r21,r21,SPR_ICCFGR_CBS | ||
1386 | l.srli r21,r21,7 | ||
1387 | l.ori r23,r0,16 | ||
1388 | l.sll r14,r23,r21 | ||
1389 | |||
1390 | l.mtspr r0,r5,SPR_ICBIR | ||
1391 | l.add r5,r5,r14 | ||
1392 | l.mtspr r0,r5,SPR_ICBIR | ||
1393 | |||
1394 | l.jr r9 | ||
1395 | l.nop | ||
1396 | |||
1397 | |||
1398 | /* | ||
1399 | * DSCR: prints a string referenced by r3. | ||
1400 | * | ||
1401 | * PRMS: r3 - address of the first character of null | ||
1402 | * terminated string to be printed | ||
1403 | * | ||
1404 | * PREQ: UART at UART_BASE_ADD has to be initialized | ||
1405 | * | ||
1406 | * POST: caller should be aware that r3, r9 are changed | ||
1407 | */ | ||
1408 | ENTRY(_emergency_print) | ||
1409 | EMERGENCY_PRINT_STORE_GPR4 | ||
1410 | EMERGENCY_PRINT_STORE_GPR5 | ||
1411 | EMERGENCY_PRINT_STORE_GPR6 | ||
1412 | EMERGENCY_PRINT_STORE_GPR7 | ||
1413 | 2: | ||
1414 | l.lbz r7,0(r3) | ||
1415 | l.sfeq r7,r0 | ||
1416 | l.bf 9f | ||
1417 | l.nop | ||
1418 | |||
1419 | // putc: | ||
1420 | l.movhi r4,hi(UART_BASE_ADD) | ||
1421 | |||
1422 | l.addi r6,r0,0x20 | ||
1423 | 1: l.lbz r5,5(r4) | ||
1424 | l.andi r5,r5,0x20 | ||
1425 | l.sfeq r5,r6 | ||
1426 | l.bnf 1b | ||
1427 | l.nop | ||
1428 | |||
1429 | l.sb 0(r4),r7 | ||
1430 | |||
1431 | l.addi r6,r0,0x60 | ||
1432 | 1: l.lbz r5,5(r4) | ||
1433 | l.andi r5,r5,0x60 | ||
1434 | l.sfeq r5,r6 | ||
1435 | l.bnf 1b | ||
1436 | l.nop | ||
1437 | |||
1438 | /* next character */ | ||
1439 | l.j 2b | ||
1440 | l.addi r3,r3,0x1 | ||
1441 | |||
1442 | 9: | ||
1443 | EMERGENCY_PRINT_LOAD_GPR7 | ||
1444 | EMERGENCY_PRINT_LOAD_GPR6 | ||
1445 | EMERGENCY_PRINT_LOAD_GPR5 | ||
1446 | EMERGENCY_PRINT_LOAD_GPR4 | ||
1447 | l.jr r9 | ||
1448 | l.nop | ||
1449 | |||
1450 | ENTRY(_emergency_print_nr) | ||
1451 | EMERGENCY_PRINT_STORE_GPR4 | ||
1452 | EMERGENCY_PRINT_STORE_GPR5 | ||
1453 | EMERGENCY_PRINT_STORE_GPR6 | ||
1454 | EMERGENCY_PRINT_STORE_GPR7 | ||
1455 | EMERGENCY_PRINT_STORE_GPR8 | ||
1456 | |||
1457 | l.addi r8,r0,32 // shift register | ||
1458 | |||
1459 | 1: /* remove leading zeros */ | ||
1460 | l.addi r8,r8,-0x4 | ||
1461 | l.srl r7,r3,r8 | ||
1462 | l.andi r7,r7,0xf | ||
1463 | |||
1464 | /* don't skip the last zero if number == 0x0 */ | ||
1465 | l.sfeqi r8,0x4 | ||
1466 | l.bf 2f | ||
1467 | l.nop | ||
1468 | |||
1469 | l.sfeq r7,r0 | ||
1470 | l.bf 1b | ||
1471 | l.nop | ||
1472 | |||
1473 | 2: | ||
1474 | l.srl r7,r3,r8 | ||
1475 | |||
1476 | l.andi r7,r7,0xf | ||
1477 | l.sflts r8,r0 | ||
1478 | l.bf 9f | ||
1479 | |||
1480 | l.sfgtui r7,0x9 | ||
1481 | l.bnf 8f | ||
1482 | l.nop | ||
1483 | l.addi r7,r7,0x27 | ||
1484 | |||
1485 | 8: | ||
1486 | l.addi r7,r7,0x30 | ||
1487 | // putc: | ||
1488 | l.movhi r4,hi(UART_BASE_ADD) | ||
1489 | |||
1490 | l.addi r6,r0,0x20 | ||
1491 | 1: l.lbz r5,5(r4) | ||
1492 | l.andi r5,r5,0x20 | ||
1493 | l.sfeq r5,r6 | ||
1494 | l.bnf 1b | ||
1495 | l.nop | ||
1496 | |||
1497 | l.sb 0(r4),r7 | ||
1498 | |||
1499 | l.addi r6,r0,0x60 | ||
1500 | 1: l.lbz r5,5(r4) | ||
1501 | l.andi r5,r5,0x60 | ||
1502 | l.sfeq r5,r6 | ||
1503 | l.bnf 1b | ||
1504 | l.nop | ||
1505 | |||
1506 | /* next character */ | ||
1507 | l.j 2b | ||
1508 | l.addi r8,r8,-0x4 | ||
1509 | |||
1510 | 9: | ||
1511 | EMERGENCY_PRINT_LOAD_GPR8 | ||
1512 | EMERGENCY_PRINT_LOAD_GPR7 | ||
1513 | EMERGENCY_PRINT_LOAD_GPR6 | ||
1514 | EMERGENCY_PRINT_LOAD_GPR5 | ||
1515 | EMERGENCY_PRINT_LOAD_GPR4 | ||
1516 | l.jr r9 | ||
1517 | l.nop | ||
1518 | |||
1519 | |||
1520 | /* | ||
1521 | * This should be used for debugging only. | ||
1522 | * It messes up the Linux early serial output | ||
1523 | * somehow, so use it sparingly and essentially | ||
1524 | * only if you need to debug something that goes wrong | ||
1525 | * before Linux gets the early serial going. | ||
1526 | * | ||
1527 | * Furthermore, you'll have to make sure you set the | ||
1528 | * UART_DEVISOR correctly according to the system | ||
1529 | * clock rate. | ||
1530 | * | ||
1531 | * | ||
1532 | */ | ||
1533 | |||
1534 | |||
1535 | |||
1536 | #define SYS_CLK 20000000 | ||
1537 | //#define SYS_CLK 1843200 | ||
1538 | #define OR32_CONSOLE_BAUD 115200 | ||
1539 | #define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD) | ||
1540 | |||
1541 | ENTRY(_early_uart_init) | ||
1542 | l.movhi r3,hi(UART_BASE_ADD) | ||
1543 | |||
1544 | l.addi r4,r0,0x7 | ||
1545 | l.sb 0x2(r3),r4 | ||
1546 | |||
1547 | l.addi r4,r0,0x0 | ||
1548 | l.sb 0x1(r3),r4 | ||
1549 | |||
1550 | l.addi r4,r0,0x3 | ||
1551 | l.sb 0x3(r3),r4 | ||
1552 | |||
1553 | l.lbz r5,3(r3) | ||
1554 | l.ori r4,r5,0x80 | ||
1555 | l.sb 0x3(r3),r4 | ||
1556 | l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff) | ||
1557 | l.sb UART_DLM(r3),r4 | ||
1558 | l.addi r4,r0,((UART_DIVISOR) & 0x000000ff) | ||
1559 | l.sb UART_DLL(r3),r4 | ||
1560 | l.sb 0x3(r3),r5 | ||
1561 | |||
1562 | l.jr r9 | ||
1563 | l.nop | ||
1564 | |||
1565 | _string_copying_linux: | ||
1566 | .string "\n\n\n\n\n\rCopying Linux... \0" | ||
1567 | |||
1568 | _string_ok_booting: | ||
1569 | .string "Ok, booting the kernel.\n\r\0" | ||
1570 | |||
1571 | _string_unhandled_exception: | ||
1572 | .string "\n\rRunarunaround: Unhandled exception 0x\0" | ||
1573 | |||
1574 | _string_epc_prefix: | ||
1575 | .string ": EPC=0x\0" | ||
1576 | |||
1577 | _string_nl: | ||
1578 | .string "\n\r\0" | ||
1579 | |||
1580 | .global _string_esr_irq_bug | ||
1581 | _string_esr_irq_bug: | ||
1582 | .string "\n\rESR external interrupt bug, for details look into entry.S\n\r\0" | ||
1583 | |||
1584 | |||
1585 | |||
1586 | /* ========================================[ page aligned structures ]=== */ | ||
1587 | |||
1588 | /* | ||
1589 | * .data section should be page aligned | ||
1590 | * (look into arch/or32/kernel/vmlinux.lds) | ||
1591 | */ | ||
1592 | .section .data,"aw" | ||
1593 | .align 8192 | ||
1594 | .global empty_zero_page | ||
1595 | empty_zero_page: | ||
1596 | .space 8192 | ||
1597 | |||
1598 | .global swapper_pg_dir | ||
1599 | swapper_pg_dir: | ||
1600 | .space 8192 | ||
1601 | |||
1602 | .global _unhandled_stack | ||
1603 | _unhandled_stack: | ||
1604 | .space 8192 | ||
1605 | _unhandled_stack_top: | ||
1606 | |||
1607 | /* ============================================================[ EOF ]=== */ | ||
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c new file mode 100644 index 00000000000..d5bc5f813e8 --- /dev/null +++ b/arch/openrisc/kernel/idle.c | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * OpenRISC idle.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * Idle daemon for or32. Idle daemon will handle any action | ||
18 | * that needs to be taken when the system becomes idle. | ||
19 | */ | ||
20 | |||
21 | #include <linux/errno.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/stddef.h> | ||
27 | #include <linux/unistd.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/tick.h> | ||
31 | |||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/uaccess.h> | ||
34 | #include <asm/system.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/processor.h> | ||
37 | #include <asm/mmu.h> | ||
38 | #include <asm/cache.h> | ||
39 | #include <asm/pgalloc.h> | ||
40 | |||
41 | void (*powersave) (void) = NULL; | ||
42 | |||
43 | static inline void pm_idle(void) | ||
44 | { | ||
45 | barrier(); | ||
46 | } | ||
47 | |||
48 | void cpu_idle(void) | ||
49 | { | ||
50 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
51 | |||
52 | /* endless idle loop with no priority at all */ | ||
53 | while (1) { | ||
54 | tick_nohz_stop_sched_tick(1); | ||
55 | |||
56 | while (!need_resched()) { | ||
57 | check_pgt_cache(); | ||
58 | rmb(); | ||
59 | |||
60 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
61 | |||
62 | local_irq_disable(); | ||
63 | /* Don't trace irqs off for idle */ | ||
64 | stop_critical_timings(); | ||
65 | if (!need_resched() && powersave != NULL) | ||
66 | powersave(); | ||
67 | start_critical_timings(); | ||
68 | local_irq_enable(); | ||
69 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
70 | } | ||
71 | |||
72 | tick_nohz_restart_sched_tick(); | ||
73 | preempt_enable_no_resched(); | ||
74 | schedule(); | ||
75 | preempt_disable(); | ||
76 | } | ||
77 | } | ||
diff --git a/arch/openrisc/kernel/init_task.c b/arch/openrisc/kernel/init_task.c new file mode 100644 index 00000000000..45744a38492 --- /dev/null +++ b/arch/openrisc/kernel/init_task.c | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * OpenRISC init_task.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #include <linux/init_task.h> | ||
19 | #include <linux/mqueue.h> | ||
20 | |||
21 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
22 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
23 | |||
24 | /* | ||
25 | * Initial thread structure. | ||
26 | * | ||
27 | * We need to make sure that this is THREAD_SIZE aligned due to the | ||
28 | * way process stacks are handled. This is done by having a special | ||
29 | * "init_task" linker map entry.. | ||
30 | */ | ||
31 | union thread_union init_thread_union __init_task_data = { | ||
32 | INIT_THREAD_INFO(init_task) | ||
33 | }; | ||
34 | |||
35 | /* | ||
36 | * Initial task structure. | ||
37 | * | ||
38 | * All other task structs will be allocated on slabs in fork.c | ||
39 | */ | ||
40 | struct task_struct init_task = INIT_TASK(init_task); | ||
41 | EXPORT_SYMBOL(init_task); | ||
diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c new file mode 100644 index 00000000000..59b30233833 --- /dev/null +++ b/arch/openrisc/kernel/irq.c | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * OpenRISC irq.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/ftrace.h> | ||
23 | #include <linux/irq.h> | ||
24 | #include <linux/seq_file.h> | ||
25 | #include <linux/kernel_stat.h> | ||
26 | |||
27 | #include <linux/irqflags.h> | ||
28 | |||
29 | /* read interrupt enabled status */ | ||
30 | unsigned long arch_local_save_flags(void) | ||
31 | { | ||
32 | return mfspr(SPR_SR) & (SPR_SR_IEE|SPR_SR_TEE); | ||
33 | } | ||
34 | EXPORT_SYMBOL(arch_local_save_flags); | ||
35 | |||
36 | /* set interrupt enabled status */ | ||
37 | void arch_local_irq_restore(unsigned long flags) | ||
38 | { | ||
39 | mtspr(SPR_SR, ((mfspr(SPR_SR) & ~(SPR_SR_IEE|SPR_SR_TEE)) | flags)); | ||
40 | } | ||
41 | EXPORT_SYMBOL(arch_local_irq_restore); | ||
42 | |||
43 | |||
44 | /* OR1K PIC implementation */ | ||
45 | |||
46 | /* We're a couple of cycles faster than the generic implementations with | ||
47 | * these 'fast' versions. | ||
48 | */ | ||
49 | |||
50 | static void or1k_pic_mask(struct irq_data *data) | ||
51 | { | ||
52 | mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->irq)); | ||
53 | } | ||
54 | |||
55 | static void or1k_pic_unmask(struct irq_data *data) | ||
56 | { | ||
57 | mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->irq)); | ||
58 | } | ||
59 | |||
60 | static void or1k_pic_ack(struct irq_data *data) | ||
61 | { | ||
62 | /* EDGE-triggered interrupts need to be ack'ed in order to clear | ||
63 | * the latch. | ||
64 | * LEVER-triggered interrupts do not need to be ack'ed; however, | ||
65 | * ack'ing the interrupt has no ill-effect and is quicker than | ||
66 | * trying to figure out what type it is... | ||
67 | */ | ||
68 | |||
69 | /* The OpenRISC 1000 spec says to write a 1 to the bit to ack the | ||
70 | * interrupt, but the OR1200 does this backwards and requires a 0 | ||
71 | * to be written... | ||
72 | */ | ||
73 | |||
74 | #ifdef CONFIG_OR1K_1200 | ||
75 | /* There are two oddities with the OR1200 PIC implementation: | ||
76 | * i) LEVEL-triggered interrupts are latched and need to be cleared | ||
77 | * ii) the interrupt latch is cleared by writing a 0 to the bit, | ||
78 | * as opposed to a 1 as mandated by the spec | ||
79 | */ | ||
80 | |||
81 | mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->irq)); | ||
82 | #else | ||
83 | WARN(1, "Interrupt handling possibily broken\n"); | ||
84 | mtspr(SPR_PICSR, (1UL << irq)); | ||
85 | #endif | ||
86 | } | ||
87 | |||
88 | static void or1k_pic_mask_ack(struct irq_data *data) | ||
89 | { | ||
90 | /* Comments for pic_ack apply here, too */ | ||
91 | |||
92 | #ifdef CONFIG_OR1K_1200 | ||
93 | mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->irq)); | ||
94 | #else | ||
95 | WARN(1, "Interrupt handling possibily broken\n"); | ||
96 | mtspr(SPR_PICSR, (1UL << irq)); | ||
97 | #endif | ||
98 | } | ||
99 | |||
100 | static int or1k_pic_set_type(struct irq_data *data, unsigned int flow_type) | ||
101 | { | ||
102 | /* There's nothing to do in the PIC configuration when changing | ||
103 | * flow type. Level and edge-triggered interrupts are both | ||
104 | * supported, but it's PIC-implementation specific which type | ||
105 | * is handled. */ | ||
106 | |||
107 | return irq_setup_alt_chip(data, flow_type); | ||
108 | } | ||
109 | |||
110 | static inline int pic_get_irq(int first) | ||
111 | { | ||
112 | int irq; | ||
113 | |||
114 | irq = ffs(mfspr(SPR_PICSR) >> first); | ||
115 | |||
116 | return irq ? irq + first - 1 : NO_IRQ; | ||
117 | } | ||
118 | |||
119 | static void __init or1k_irq_init(void) | ||
120 | { | ||
121 | struct irq_chip_generic *gc; | ||
122 | struct irq_chip_type *ct; | ||
123 | |||
124 | /* Disable all interrupts until explicitly requested */ | ||
125 | mtspr(SPR_PICMR, (0UL)); | ||
126 | |||
127 | gc = irq_alloc_generic_chip("or1k-PIC", 1, 0, 0, handle_level_irq); | ||
128 | ct = gc->chip_types; | ||
129 | |||
130 | ct->chip.irq_unmask = or1k_pic_unmask; | ||
131 | ct->chip.irq_mask = or1k_pic_mask; | ||
132 | ct->chip.irq_ack = or1k_pic_ack; | ||
133 | ct->chip.irq_mask_ack = or1k_pic_mask_ack; | ||
134 | ct->chip.irq_set_type = or1k_pic_set_type; | ||
135 | |||
136 | /* The OR1K PIC can handle both level and edge trigged | ||
137 | * interrupts in roughly the same manner | ||
138 | */ | ||
139 | #if 0 | ||
140 | /* FIXME: chip.type??? */ | ||
141 | ct->chip.type = IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_MASK; | ||
142 | #endif | ||
143 | |||
144 | irq_setup_generic_chip(gc, IRQ_MSK(NR_IRQS), 0, | ||
145 | IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); | ||
146 | } | ||
147 | |||
148 | void __init init_IRQ(void) | ||
149 | { | ||
150 | or1k_irq_init(); | ||
151 | } | ||
152 | |||
153 | void __irq_entry do_IRQ(struct pt_regs *regs) | ||
154 | { | ||
155 | int irq = -1; | ||
156 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
157 | |||
158 | irq_enter(); | ||
159 | |||
160 | while ((irq = pic_get_irq(irq + 1)) != NO_IRQ) | ||
161 | generic_handle_irq(irq); | ||
162 | |||
163 | irq_exit(); | ||
164 | set_irq_regs(old_regs); | ||
165 | } | ||
166 | |||
167 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
168 | const u32 *intspec, unsigned int intsize) | ||
169 | { | ||
170 | return intspec[0]; | ||
171 | } | ||
172 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
diff --git a/arch/openrisc/kernel/module.c b/arch/openrisc/kernel/module.c new file mode 100644 index 00000000000..10ff50f0202 --- /dev/null +++ b/arch/openrisc/kernel/module.c | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * OpenRISC module.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/moduleloader.h> | ||
18 | #include <linux/elf.h> | ||
19 | |||
20 | int apply_relocate_add(Elf32_Shdr *sechdrs, | ||
21 | const char *strtab, | ||
22 | unsigned int symindex, | ||
23 | unsigned int relsec, | ||
24 | struct module *me) | ||
25 | { | ||
26 | unsigned int i; | ||
27 | Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; | ||
28 | Elf32_Sym *sym; | ||
29 | uint32_t *location; | ||
30 | uint32_t value; | ||
31 | |||
32 | pr_debug("Applying relocate section %u to %u\n", relsec, | ||
33 | sechdrs[relsec].sh_info); | ||
34 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
35 | /* This is where to make the change */ | ||
36 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
37 | + rel[i].r_offset; | ||
38 | |||
39 | /* This is the symbol it is referring to. Note that all | ||
40 | undefined symbols have been resolved. */ | ||
41 | sym = (Elf32_Sym *)sechdrs[symindex].sh_addr | ||
42 | + ELF32_R_SYM(rel[i].r_info); | ||
43 | value = sym->st_value + rel[i].r_addend; | ||
44 | |||
45 | switch (ELF32_R_TYPE(rel[i].r_info)) { | ||
46 | case R_OR32_32: | ||
47 | *location = value; | ||
48 | break; | ||
49 | case R_OR32_CONST: | ||
50 | location = (uint16_t *)location + 1; | ||
51 | *((uint16_t *)location) = (uint16_t) (value); | ||
52 | break; | ||
53 | case R_OR32_CONSTH: | ||
54 | location = (uint16_t *)location + 1; | ||
55 | *((uint16_t *)location) = (uint16_t) (value >> 16); | ||
56 | break; | ||
57 | case R_OR32_JUMPTARG: | ||
58 | value -= (uint32_t)location; | ||
59 | value >>= 2; | ||
60 | value &= 0x03ffffff; | ||
61 | value |= *location & 0xfc000000; | ||
62 | *location = value; | ||
63 | break; | ||
64 | default: | ||
65 | pr_err("module %s: Unknown relocation: %u\n", | ||
66 | me->name, ELF32_R_TYPE(rel[i].r_info)); | ||
67 | break; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | return 0; | ||
72 | } | ||
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c new file mode 100644 index 00000000000..83ccf7c0c58 --- /dev/null +++ b/arch/openrisc/kernel/or32_ksyms.c | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * OpenRISC or32_ksyms.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/elfcore.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/in6.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/vmalloc.h> | ||
24 | #include <linux/semaphore.h> | ||
25 | |||
26 | #include <asm/processor.h> | ||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/checksum.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/hardirq.h> | ||
31 | #include <asm/delay.h> | ||
32 | #include <asm/pgalloc.h> | ||
33 | |||
34 | #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) | ||
35 | |||
36 | /* compiler generated symbols */ | ||
37 | DECLARE_EXPORT(__udivsi3); | ||
38 | DECLARE_EXPORT(__divsi3); | ||
39 | DECLARE_EXPORT(__umodsi3); | ||
40 | DECLARE_EXPORT(__modsi3); | ||
41 | DECLARE_EXPORT(__muldi3); | ||
42 | DECLARE_EXPORT(__ashrdi3); | ||
43 | DECLARE_EXPORT(__ashldi3); | ||
44 | DECLARE_EXPORT(__lshrdi3); | ||
45 | |||
46 | EXPORT_SYMBOL(__copy_tofrom_user); | ||
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c new file mode 100644 index 00000000000..e4209af879e --- /dev/null +++ b/arch/openrisc/kernel/process.c | |||
@@ -0,0 +1,311 @@ | |||
1 | /* | ||
2 | * OpenRISC process.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This file handles the architecture-dependent parts of process handling... | ||
18 | */ | ||
19 | |||
20 | #define __KERNEL_SYSCALLS__ | ||
21 | #include <stdarg.h> | ||
22 | |||
23 | #include <linux/errno.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/stddef.h> | ||
29 | #include <linux/unistd.h> | ||
30 | #include <linux/ptrace.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/elfcore.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/init_task.h> | ||
36 | #include <linux/mqueue.h> | ||
37 | #include <linux/fs.h> | ||
38 | |||
39 | #include <asm/uaccess.h> | ||
40 | #include <asm/pgtable.h> | ||
41 | #include <asm/system.h> | ||
42 | #include <asm/io.h> | ||
43 | #include <asm/processor.h> | ||
44 | #include <asm/spr_defs.h> | ||
45 | |||
46 | #include <linux/smp.h> | ||
47 | |||
48 | /* | ||
49 | * Pointer to Current thread info structure. | ||
50 | * | ||
51 | * Used at user space -> kernel transitions. | ||
52 | */ | ||
53 | struct thread_info *current_thread_info_set[NR_CPUS] = { &init_thread_info, }; | ||
54 | |||
55 | void machine_restart(void) | ||
56 | { | ||
57 | printk(KERN_INFO "*** MACHINE RESTART ***\n"); | ||
58 | __asm__("l.nop 1"); | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * Similar to machine_power_off, but don't shut off power. Add code | ||
63 | * here to freeze the system for e.g. post-mortem debug purpose when | ||
64 | * possible. This halt has nothing to do with the idle halt. | ||
65 | */ | ||
66 | void machine_halt(void) | ||
67 | { | ||
68 | printk(KERN_INFO "*** MACHINE HALT ***\n"); | ||
69 | __asm__("l.nop 1"); | ||
70 | } | ||
71 | |||
72 | /* If or when software power-off is implemented, add code here. */ | ||
73 | void machine_power_off(void) | ||
74 | { | ||
75 | printk(KERN_INFO "*** MACHINE POWER OFF ***\n"); | ||
76 | __asm__("l.nop 1"); | ||
77 | } | ||
78 | |||
79 | void (*pm_power_off) (void) = machine_power_off; | ||
80 | |||
81 | /* | ||
82 | * When a process does an "exec", machine state like FPU and debug | ||
83 | * registers need to be reset. This is a hook function for that. | ||
84 | * Currently we don't have any such state to reset, so this is empty. | ||
85 | */ | ||
86 | void flush_thread(void) | ||
87 | { | ||
88 | } | ||
89 | |||
90 | void show_regs(struct pt_regs *regs) | ||
91 | { | ||
92 | extern void show_registers(struct pt_regs *regs); | ||
93 | |||
94 | /* __PHX__ cleanup this mess */ | ||
95 | show_registers(regs); | ||
96 | } | ||
97 | |||
98 | unsigned long thread_saved_pc(struct task_struct *t) | ||
99 | { | ||
100 | return (unsigned long)user_regs(t->stack)->pc; | ||
101 | } | ||
102 | |||
103 | void release_thread(struct task_struct *dead_task) | ||
104 | { | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Copy the thread-specific (arch specific) info from the current | ||
109 | * process to the new one p | ||
110 | */ | ||
111 | extern asmlinkage void ret_from_fork(void); | ||
112 | |||
113 | int | ||
114 | copy_thread(unsigned long clone_flags, unsigned long usp, | ||
115 | unsigned long unused, struct task_struct *p, struct pt_regs *regs) | ||
116 | { | ||
117 | struct pt_regs *childregs; | ||
118 | struct pt_regs *kregs; | ||
119 | unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; | ||
120 | struct thread_info *ti; | ||
121 | unsigned long top_of_kernel_stack; | ||
122 | |||
123 | top_of_kernel_stack = sp; | ||
124 | |||
125 | p->set_child_tid = p->clear_child_tid = NULL; | ||
126 | |||
127 | /* Copy registers */ | ||
128 | /* redzone */ | ||
129 | sp -= STACK_FRAME_OVERHEAD; | ||
130 | sp -= sizeof(struct pt_regs); | ||
131 | childregs = (struct pt_regs *)sp; | ||
132 | |||
133 | /* Copy parent registers */ | ||
134 | *childregs = *regs; | ||
135 | |||
136 | if ((childregs->sr & SPR_SR_SM) == 1) { | ||
137 | /* for kernel thread, set `current_thread_info' | ||
138 | * and stackptr in new task | ||
139 | */ | ||
140 | childregs->sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; | ||
141 | childregs->gpr[10] = (unsigned long)task_thread_info(p); | ||
142 | } else { | ||
143 | childregs->sp = usp; | ||
144 | } | ||
145 | |||
146 | childregs->gpr[11] = 0; /* Result from fork() */ | ||
147 | |||
148 | /* | ||
149 | * The way this works is that at some point in the future | ||
150 | * some task will call _switch to switch to the new task. | ||
151 | * That will pop off the stack frame created below and start | ||
152 | * the new task running at ret_from_fork. The new task will | ||
153 | * do some house keeping and then return from the fork or clone | ||
154 | * system call, using the stack frame created above. | ||
155 | */ | ||
156 | /* redzone */ | ||
157 | sp -= STACK_FRAME_OVERHEAD; | ||
158 | sp -= sizeof(struct pt_regs); | ||
159 | kregs = (struct pt_regs *)sp; | ||
160 | |||
161 | ti = task_thread_info(p); | ||
162 | ti->ksp = sp; | ||
163 | |||
164 | /* kregs->sp must store the location of the 'pre-switch' kernel stack | ||
165 | * pointer... for a newly forked process, this is simply the top of | ||
166 | * the kernel stack. | ||
167 | */ | ||
168 | kregs->sp = top_of_kernel_stack; | ||
169 | kregs->gpr[3] = (unsigned long)current; /* arg to schedule_tail */ | ||
170 | kregs->gpr[10] = (unsigned long)task_thread_info(p); | ||
171 | kregs->gpr[9] = (unsigned long)ret_from_fork; | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Set up a thread for executing a new program | ||
178 | */ | ||
179 | void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) | ||
180 | { | ||
181 | unsigned long sr = regs->sr & ~SPR_SR_SM; | ||
182 | |||
183 | set_fs(USER_DS); | ||
184 | memset(regs->gpr, 0, sizeof(regs->gpr)); | ||
185 | |||
186 | regs->pc = pc; | ||
187 | regs->sr = sr; | ||
188 | regs->sp = sp; | ||
189 | |||
190 | /* printk("start thread, ksp = %lx\n", current_thread_info()->ksp);*/ | ||
191 | } | ||
192 | |||
193 | /* Fill in the fpu structure for a core dump. */ | ||
194 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu) | ||
195 | { | ||
196 | /* TODO */ | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | extern struct thread_info *_switch(struct thread_info *old_ti, | ||
201 | struct thread_info *new_ti); | ||
202 | |||
203 | struct task_struct *__switch_to(struct task_struct *old, | ||
204 | struct task_struct *new) | ||
205 | { | ||
206 | struct task_struct *last; | ||
207 | struct thread_info *new_ti, *old_ti; | ||
208 | unsigned long flags; | ||
209 | |||
210 | local_irq_save(flags); | ||
211 | |||
212 | /* current_set is an array of saved current pointers | ||
213 | * (one for each cpu). we need them at user->kernel transition, | ||
214 | * while we save them at kernel->user transition | ||
215 | */ | ||
216 | new_ti = new->stack; | ||
217 | old_ti = old->stack; | ||
218 | |||
219 | current_thread_info_set[smp_processor_id()] = new_ti; | ||
220 | last = (_switch(old_ti, new_ti))->task; | ||
221 | |||
222 | local_irq_restore(flags); | ||
223 | |||
224 | return last; | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * Write out registers in core dump format, as defined by the | ||
229 | * struct user_regs_struct | ||
230 | */ | ||
231 | void dump_elf_thread(elf_greg_t *dest, struct pt_regs* regs) | ||
232 | { | ||
233 | dest[0] = 0; /* r0 */ | ||
234 | memcpy(dest+1, regs->gpr+1, 31*sizeof(unsigned long)); | ||
235 | dest[32] = regs->pc; | ||
236 | dest[33] = regs->sr; | ||
237 | dest[34] = 0; | ||
238 | dest[35] = 0; | ||
239 | } | ||
240 | |||
241 | extern void _kernel_thread_helper(void); | ||
242 | |||
243 | void __noreturn kernel_thread_helper(int (*fn) (void *), void *arg) | ||
244 | { | ||
245 | do_exit(fn(arg)); | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * Create a kernel thread. | ||
250 | */ | ||
251 | int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags) | ||
252 | { | ||
253 | struct pt_regs regs; | ||
254 | |||
255 | memset(®s, 0, sizeof(regs)); | ||
256 | |||
257 | regs.gpr[20] = (unsigned long)fn; | ||
258 | regs.gpr[22] = (unsigned long)arg; | ||
259 | regs.sr = mfspr(SPR_SR); | ||
260 | regs.pc = (unsigned long)_kernel_thread_helper; | ||
261 | |||
262 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, | ||
263 | 0, ®s, 0, NULL, NULL); | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * sys_execve() executes a new program. | ||
268 | */ | ||
269 | asmlinkage long _sys_execve(const char __user *name, | ||
270 | const char __user * const __user *argv, | ||
271 | const char __user * const __user *envp, | ||
272 | struct pt_regs *regs) | ||
273 | { | ||
274 | int error; | ||
275 | char *filename; | ||
276 | |||
277 | filename = getname(name); | ||
278 | error = PTR_ERR(filename); | ||
279 | |||
280 | if (IS_ERR(filename)) | ||
281 | goto out; | ||
282 | |||
283 | error = do_execve(filename, argv, envp, regs); | ||
284 | putname(filename); | ||
285 | |||
286 | out: | ||
287 | return error; | ||
288 | } | ||
289 | |||
290 | unsigned long get_wchan(struct task_struct *p) | ||
291 | { | ||
292 | /* TODO */ | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | ||
298 | { | ||
299 | register long __res asm("r11") = __NR_execve; | ||
300 | register long __a asm("r3") = (long)(filename); | ||
301 | register long __b asm("r4") = (long)(argv); | ||
302 | register long __c asm("r5") = (long)(envp); | ||
303 | __asm__ volatile ("l.sys 1" | ||
304 | : "=r" (__res), "=r"(__a), "=r"(__b), "=r"(__c) | ||
305 | : "0"(__res), "1"(__a), "2"(__b), "3"(__c) | ||
306 | : "r6", "r7", "r8", "r12", "r13", "r15", | ||
307 | "r17", "r19", "r21", "r23", "r25", "r27", | ||
308 | "r29", "r31"); | ||
309 | __asm__ volatile ("l.nop"); | ||
310 | return __res; | ||
311 | } | ||
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c new file mode 100644 index 00000000000..1bb58ba89af --- /dev/null +++ b/arch/openrisc/kernel/prom.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * OpenRISC prom.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | * | ||
16 | * Architecture specific procedures for creating, accessing and | ||
17 | * interpreting the device tree. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <stdarg.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/threads.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/pci.h> | ||
29 | #include <linux/stringify.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/initrd.h> | ||
32 | #include <linux/bitops.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/kexec.h> | ||
35 | #include <linux/debugfs.h> | ||
36 | #include <linux/irq.h> | ||
37 | #include <linux/memblock.h> | ||
38 | #include <linux/of_fdt.h> | ||
39 | |||
40 | #include <asm/prom.h> | ||
41 | #include <asm/page.h> | ||
42 | #include <asm/processor.h> | ||
43 | #include <asm/irq.h> | ||
44 | #include <linux/io.h> | ||
45 | #include <asm/system.h> | ||
46 | #include <asm/mmu.h> | ||
47 | #include <asm/pgtable.h> | ||
48 | #include <asm/sections.h> | ||
49 | #include <asm/setup.h> | ||
50 | |||
51 | extern char cmd_line[COMMAND_LINE_SIZE]; | ||
52 | |||
53 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | ||
54 | { | ||
55 | size &= PAGE_MASK; | ||
56 | memblock_add(base, size); | ||
57 | } | ||
58 | |||
59 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | ||
60 | { | ||
61 | return __va(memblock_alloc(size, align)); | ||
62 | } | ||
63 | |||
64 | void __init early_init_devtree(void *params) | ||
65 | { | ||
66 | void *alloc; | ||
67 | |||
68 | /* Setup flat device-tree pointer */ | ||
69 | initial_boot_params = params; | ||
70 | |||
71 | |||
72 | /* Retrieve various informations from the /chosen node of the | ||
73 | * device-tree, including the platform type, initrd location and | ||
74 | * size, TCE reserve, and more ... | ||
75 | */ | ||
76 | of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line); | ||
77 | |||
78 | /* Scan memory nodes and rebuild MEMBLOCKs */ | ||
79 | memblock_init(); | ||
80 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | ||
81 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); | ||
82 | |||
83 | /* Save command line for /proc/cmdline and then parse parameters */ | ||
84 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); | ||
85 | |||
86 | memblock_analyze(); | ||
87 | |||
88 | /* We must copy the flattend device tree from init memory to regular | ||
89 | * memory because the device tree references the strings in it | ||
90 | * directly. | ||
91 | */ | ||
92 | |||
93 | alloc = __va(memblock_alloc(initial_boot_params->totalsize, PAGE_SIZE)); | ||
94 | |||
95 | memcpy(alloc, initial_boot_params, initial_boot_params->totalsize); | ||
96 | |||
97 | initial_boot_params = alloc; | ||
98 | } | ||
99 | |||
100 | #ifdef CONFIG_BLK_DEV_INITRD | ||
101 | void __init early_init_dt_setup_initrd_arch(unsigned long start, | ||
102 | unsigned long end) | ||
103 | { | ||
104 | initrd_start = (unsigned long)__va(start); | ||
105 | initrd_end = (unsigned long)__va(end); | ||
106 | initrd_below_start_ok = 1; | ||
107 | } | ||
108 | #endif | ||
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c new file mode 100644 index 00000000000..656b94beab8 --- /dev/null +++ b/arch/openrisc/kernel/ptrace.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * OpenRISC ptrace.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com> | ||
11 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | */ | ||
18 | |||
19 | #include <stddef.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/string.h> | ||
23 | |||
24 | #include <linux/mm.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/ptrace.h> | ||
27 | #include <linux/audit.h> | ||
28 | #include <linux/regset.h> | ||
29 | #include <linux/tracehook.h> | ||
30 | #include <linux/elf.h> | ||
31 | |||
32 | #include <asm/thread_info.h> | ||
33 | #include <asm/segment.h> | ||
34 | #include <asm/page.h> | ||
35 | #include <asm/pgtable.h> | ||
36 | #include <asm/system.h> | ||
37 | |||
38 | /* | ||
39 | * Copy the thread state to a regset that can be interpreted by userspace. | ||
40 | * | ||
41 | * It doesn't matter what our internal pt_regs structure looks like. The | ||
42 | * important thing is that we export a consistent view of the thread state | ||
43 | * to userspace. As such, we need to make sure that the regset remains | ||
44 | * ABI compatible as defined by the struct user_regs_struct: | ||
45 | * | ||
46 | * (Each item is a 32-bit word) | ||
47 | * r0 = 0 (exported for clarity) | ||
48 | * 31 GPRS r1-r31 | ||
49 | * PC (Program counter) | ||
50 | * SR (Supervision register) | ||
51 | */ | ||
52 | static int genregs_get(struct task_struct *target, | ||
53 | const struct user_regset *regset, | ||
54 | unsigned int pos, unsigned int count, | ||
55 | void *kbuf, void __user * ubuf) | ||
56 | { | ||
57 | const struct pt_regs *regs = task_pt_regs(target); | ||
58 | int ret; | ||
59 | |||
60 | /* r0 */ | ||
61 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 0, 4); | ||
62 | |||
63 | if (!ret) | ||
64 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
65 | regs->gpr+1, 4, 4*32); | ||
66 | if (!ret) | ||
67 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
68 | ®s->pc, 4*32, 4*33); | ||
69 | if (!ret) | ||
70 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
71 | ®s->sr, 4*33, 4*34); | ||
72 | if (!ret) | ||
73 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
74 | 4*34, -1); | ||
75 | |||
76 | return ret; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Set the thread state from a regset passed in via ptrace | ||
81 | */ | ||
82 | static int genregs_set(struct task_struct *target, | ||
83 | const struct user_regset *regset, | ||
84 | unsigned int pos, unsigned int count, | ||
85 | const void *kbuf, const void __user * ubuf) | ||
86 | { | ||
87 | struct pt_regs *regs = task_pt_regs(target); | ||
88 | int ret; | ||
89 | |||
90 | /* ignore r0 */ | ||
91 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); | ||
92 | /* r1 - r31 */ | ||
93 | if (!ret) | ||
94 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
95 | regs->gpr+1, 4, 4*32); | ||
96 | /* PC */ | ||
97 | if (!ret) | ||
98 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
99 | ®s->pc, 4*32, 4*33); | ||
100 | /* | ||
101 | * Skip SR and padding... userspace isn't allowed to changes bits in | ||
102 | * the Supervision register | ||
103 | */ | ||
104 | if (!ret) | ||
105 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, | ||
106 | 4*33, -1); | ||
107 | |||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Define the register sets available on OpenRISC under Linux | ||
113 | */ | ||
114 | enum or1k_regset { | ||
115 | REGSET_GENERAL, | ||
116 | }; | ||
117 | |||
118 | static const struct user_regset or1k_regsets[] = { | ||
119 | [REGSET_GENERAL] = { | ||
120 | .core_note_type = NT_PRSTATUS, | ||
121 | .n = ELF_NGREG, | ||
122 | .size = sizeof(long), | ||
123 | .align = sizeof(long), | ||
124 | .get = genregs_get, | ||
125 | .set = genregs_set, | ||
126 | }, | ||
127 | }; | ||
128 | |||
129 | static const struct user_regset_view user_or1k_native_view = { | ||
130 | .name = "or1k", | ||
131 | .e_machine = EM_OPENRISC, | ||
132 | .regsets = or1k_regsets, | ||
133 | .n = ARRAY_SIZE(or1k_regsets), | ||
134 | }; | ||
135 | |||
136 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) | ||
137 | { | ||
138 | return &user_or1k_native_view; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * does not yet catch signals sent when the child dies. | ||
143 | * in exit.c or in signal.c. | ||
144 | */ | ||
145 | |||
146 | |||
147 | /* | ||
148 | * Called by kernel/ptrace.c when detaching.. | ||
149 | * | ||
150 | * Make sure the single step bit is not set. | ||
151 | */ | ||
152 | void ptrace_disable(struct task_struct *child) | ||
153 | { | ||
154 | pr_debug("ptrace_disable(): TODO\n"); | ||
155 | |||
156 | user_disable_single_step(child); | ||
157 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
158 | } | ||
159 | |||
160 | long arch_ptrace(struct task_struct *child, long request, unsigned long addr, | ||
161 | unsigned long data) | ||
162 | { | ||
163 | int ret; | ||
164 | |||
165 | switch (request) { | ||
166 | default: | ||
167 | ret = ptrace_request(child, request, addr, data); | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Notification of system call entry/exit | ||
176 | * - triggered by current->work.syscall_trace | ||
177 | */ | ||
178 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | ||
179 | { | ||
180 | long ret = 0; | ||
181 | |||
182 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | ||
183 | tracehook_report_syscall_entry(regs)) | ||
184 | /* | ||
185 | * Tracing decided this syscall should not happen. | ||
186 | * We'll return a bogus call number to get an ENOSYS | ||
187 | * error, but leave the original number in <something>. | ||
188 | */ | ||
189 | ret = -1L; | ||
190 | |||
191 | /* Are these regs right??? */ | ||
192 | if (unlikely(current->audit_context)) | ||
193 | audit_syscall_entry(audit_arch(), regs->syscallno, | ||
194 | regs->gpr[3], regs->gpr[4], | ||
195 | regs->gpr[5], regs->gpr[6]); | ||
196 | |||
197 | return ret ? : regs->syscallno; | ||
198 | } | ||
199 | |||
200 | asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) | ||
201 | { | ||
202 | int step; | ||
203 | |||
204 | if (unlikely(current->audit_context)) | ||
205 | audit_syscall_exit(AUDITSC_RESULT(regs->gpr[11]), | ||
206 | regs->gpr[11]); | ||
207 | |||
208 | step = test_thread_flag(TIF_SINGLESTEP); | ||
209 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) | ||
210 | tracehook_report_syscall_exit(regs, step); | ||
211 | } | ||
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c new file mode 100644 index 00000000000..1422f747f52 --- /dev/null +++ b/arch/openrisc/kernel/setup.c | |||
@@ -0,0 +1,381 @@ | |||
1 | /* | ||
2 | * OpenRISC setup.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This file handles the architecture-dependent parts of initialization | ||
18 | */ | ||
19 | |||
20 | #include <linux/errno.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/stddef.h> | ||
25 | #include <linux/unistd.h> | ||
26 | #include <linux/ptrace.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/tty.h> | ||
29 | #include <linux/ioport.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/console.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/bootmem.h> | ||
34 | #include <linux/seq_file.h> | ||
35 | #include <linux/serial.h> | ||
36 | #include <linux/initrd.h> | ||
37 | #include <linux/of_fdt.h> | ||
38 | #include <linux/of.h> | ||
39 | #include <linux/memblock.h> | ||
40 | #include <linux/device.h> | ||
41 | #include <linux/of_platform.h> | ||
42 | |||
43 | #include <asm/segment.h> | ||
44 | #include <asm/system.h> | ||
45 | #include <asm/pgtable.h> | ||
46 | #include <asm/types.h> | ||
47 | #include <asm/setup.h> | ||
48 | #include <asm/io.h> | ||
49 | #include <asm/cpuinfo.h> | ||
50 | #include <asm/delay.h> | ||
51 | |||
52 | #include "vmlinux.h" | ||
53 | |||
54 | char __initdata cmd_line[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; | ||
55 | |||
56 | static unsigned long __init setup_memory(void) | ||
57 | { | ||
58 | unsigned long bootmap_size; | ||
59 | unsigned long ram_start_pfn; | ||
60 | unsigned long free_ram_start_pfn; | ||
61 | unsigned long ram_end_pfn; | ||
62 | phys_addr_t memory_start, memory_end; | ||
63 | struct memblock_region *region; | ||
64 | |||
65 | memory_end = memory_start = 0; | ||
66 | |||
67 | /* Find main memory where is the kernel */ | ||
68 | for_each_memblock(memory, region) { | ||
69 | memory_start = region->base; | ||
70 | memory_end = region->base + region->size; | ||
71 | printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, | ||
72 | memory_start, memory_end); | ||
73 | } | ||
74 | |||
75 | if (!memory_end) { | ||
76 | panic("No memory!"); | ||
77 | } | ||
78 | |||
79 | ram_start_pfn = PFN_UP(memory_start); | ||
80 | /* free_ram_start_pfn is first page after kernel */ | ||
81 | free_ram_start_pfn = PFN_UP(__pa(&_end)); | ||
82 | ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); | ||
83 | |||
84 | max_pfn = ram_end_pfn; | ||
85 | |||
86 | /* | ||
87 | * initialize the boot-time allocator (with low memory only). | ||
88 | * | ||
89 | * This makes the memory from the end of the kernel to the end of | ||
90 | * RAM usable. | ||
91 | * init_bootmem sets the global values min_low_pfn, max_low_pfn. | ||
92 | */ | ||
93 | bootmap_size = init_bootmem(free_ram_start_pfn, | ||
94 | ram_end_pfn - ram_start_pfn); | ||
95 | free_bootmem(PFN_PHYS(free_ram_start_pfn), | ||
96 | (ram_end_pfn - free_ram_start_pfn) << PAGE_SHIFT); | ||
97 | reserve_bootmem(PFN_PHYS(free_ram_start_pfn), bootmap_size, | ||
98 | BOOTMEM_DEFAULT); | ||
99 | |||
100 | for_each_memblock(reserved, region) { | ||
101 | printk(KERN_INFO "Reserved - 0x%08x-0x%08x\n", | ||
102 | (u32) region->base, (u32) region->size); | ||
103 | reserve_bootmem(region->base, region->size, BOOTMEM_DEFAULT); | ||
104 | } | ||
105 | |||
106 | return ram_end_pfn; | ||
107 | } | ||
108 | |||
109 | struct cpuinfo cpuinfo; | ||
110 | |||
111 | static void print_cpuinfo(void) | ||
112 | { | ||
113 | unsigned long upr = mfspr(SPR_UPR); | ||
114 | unsigned long vr = mfspr(SPR_VR); | ||
115 | unsigned int version; | ||
116 | unsigned int revision; | ||
117 | |||
118 | version = (vr & SPR_VR_VER) >> 24; | ||
119 | revision = (vr & SPR_VR_REV); | ||
120 | |||
121 | printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n", | ||
122 | version, revision, cpuinfo.clock_frequency / 1000000); | ||
123 | |||
124 | if (!(upr & SPR_UPR_UP)) { | ||
125 | printk(KERN_INFO | ||
126 | "-- no UPR register... unable to detect configuration\n"); | ||
127 | return; | ||
128 | } | ||
129 | |||
130 | if (upr & SPR_UPR_DCP) | ||
131 | printk(KERN_INFO | ||
132 | "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n", | ||
133 | cpuinfo.dcache_size, cpuinfo.dcache_block_size, 1); | ||
134 | else | ||
135 | printk(KERN_INFO "-- dcache disabled\n"); | ||
136 | if (upr & SPR_UPR_ICP) | ||
137 | printk(KERN_INFO | ||
138 | "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n", | ||
139 | cpuinfo.icache_size, cpuinfo.icache_block_size, 1); | ||
140 | else | ||
141 | printk(KERN_INFO "-- icache disabled\n"); | ||
142 | |||
143 | if (upr & SPR_UPR_DMP) | ||
144 | printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n", | ||
145 | 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), | ||
146 | 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW)); | ||
147 | if (upr & SPR_UPR_IMP) | ||
148 | printk(KERN_INFO "-- immu: %4d entries, %lu way(s)\n", | ||
149 | 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2), | ||
150 | 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW)); | ||
151 | |||
152 | printk(KERN_INFO "-- additional features:\n"); | ||
153 | if (upr & SPR_UPR_DUP) | ||
154 | printk(KERN_INFO "-- debug unit\n"); | ||
155 | if (upr & SPR_UPR_PCUP) | ||
156 | printk(KERN_INFO "-- performance counters\n"); | ||
157 | if (upr & SPR_UPR_PMP) | ||
158 | printk(KERN_INFO "-- power management\n"); | ||
159 | if (upr & SPR_UPR_PICP) | ||
160 | printk(KERN_INFO "-- PIC\n"); | ||
161 | if (upr & SPR_UPR_TTP) | ||
162 | printk(KERN_INFO "-- timer\n"); | ||
163 | if (upr & SPR_UPR_CUP) | ||
164 | printk(KERN_INFO "-- custom unit(s)\n"); | ||
165 | } | ||
166 | |||
167 | void __init setup_cpuinfo(void) | ||
168 | { | ||
169 | struct device_node *cpu; | ||
170 | unsigned long iccfgr, dccfgr; | ||
171 | unsigned long cache_set_size, cache_ways; | ||
172 | |||
173 | cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481"); | ||
174 | if (!cpu) | ||
175 | panic("No compatible CPU found in device tree...\n"); | ||
176 | |||
177 | iccfgr = mfspr(SPR_ICCFGR); | ||
178 | cache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW); | ||
179 | cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3); | ||
180 | cpuinfo.icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7); | ||
181 | cpuinfo.icache_size = | ||
182 | cache_set_size * cache_ways * cpuinfo.icache_block_size; | ||
183 | |||
184 | dccfgr = mfspr(SPR_DCCFGR); | ||
185 | cache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW); | ||
186 | cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3); | ||
187 | cpuinfo.dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7); | ||
188 | cpuinfo.dcache_size = | ||
189 | cache_set_size * cache_ways * cpuinfo.dcache_block_size; | ||
190 | |||
191 | if (of_property_read_u32(cpu, "clock-frequency", | ||
192 | &cpuinfo.clock_frequency)) { | ||
193 | printk(KERN_WARNING | ||
194 | "Device tree missing CPU 'clock-frequency' parameter." | ||
195 | "Assuming frequency 25MHZ" | ||
196 | "This is probably not what you want."); | ||
197 | } | ||
198 | |||
199 | of_node_put(cpu); | ||
200 | |||
201 | print_cpuinfo(); | ||
202 | } | ||
203 | |||
204 | /** | ||
205 | * or32_early_setup | ||
206 | * | ||
207 | * Handles the pointer to the device tree that this kernel is to use | ||
208 | * for establishing the available platform devices. | ||
209 | * | ||
210 | * For now, this is limited to using the built-in device tree. In the future, | ||
211 | * it is intended that this function will take a pointer to the device tree | ||
212 | * that is potentially built-in, but potentially also passed in by the | ||
213 | * bootloader, or discovered by some equally clever means... | ||
214 | */ | ||
215 | |||
216 | void __init or32_early_setup(void) | ||
217 | { | ||
218 | |||
219 | early_init_devtree(__dtb_start); | ||
220 | |||
221 | printk(KERN_INFO "Compiled-in FDT at 0x%p\n", __dtb_start); | ||
222 | } | ||
223 | |||
224 | static int __init openrisc_device_probe(void) | ||
225 | { | ||
226 | of_platform_populate(NULL, NULL, NULL, NULL); | ||
227 | |||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | device_initcall(openrisc_device_probe); | ||
232 | |||
233 | static inline unsigned long extract_value_bits(unsigned long reg, | ||
234 | short bit_nr, short width) | ||
235 | { | ||
236 | return (reg >> bit_nr) & (0 << width); | ||
237 | } | ||
238 | |||
239 | static inline unsigned long extract_value(unsigned long reg, unsigned long mask) | ||
240 | { | ||
241 | while (!(mask & 0x1)) { | ||
242 | reg = reg >> 1; | ||
243 | mask = mask >> 1; | ||
244 | } | ||
245 | return mask & reg; | ||
246 | } | ||
247 | |||
248 | void __init detect_unit_config(unsigned long upr, unsigned long mask, | ||
249 | char *text, void (*func) (void)) | ||
250 | { | ||
251 | if (text != NULL) | ||
252 | printk("%s", text); | ||
253 | |||
254 | if (upr & mask) { | ||
255 | if (func != NULL) | ||
256 | func(); | ||
257 | else | ||
258 | printk("present\n"); | ||
259 | } else | ||
260 | printk("not present\n"); | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * calibrate_delay | ||
265 | * | ||
266 | * Lightweight calibrate_delay implementation that calculates loops_per_jiffy | ||
267 | * from the clock frequency passed in via the device tree | ||
268 | * | ||
269 | */ | ||
270 | |||
271 | void __cpuinit calibrate_delay(void) | ||
272 | { | ||
273 | const int *val; | ||
274 | struct device_node *cpu = NULL; | ||
275 | cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481"); | ||
276 | val = of_get_property(cpu, "clock-frequency", NULL); | ||
277 | if (!val) | ||
278 | panic("no cpu 'clock-frequency' parameter in device tree"); | ||
279 | loops_per_jiffy = *val / HZ; | ||
280 | pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", | ||
281 | loops_per_jiffy / (500000 / HZ), | ||
282 | (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy); | ||
283 | } | ||
284 | |||
285 | void __init setup_arch(char **cmdline_p) | ||
286 | { | ||
287 | unsigned long max_low_pfn; | ||
288 | |||
289 | unflatten_device_tree(); | ||
290 | |||
291 | setup_cpuinfo(); | ||
292 | |||
293 | /* process 1's initial memory region is the kernel code/data */ | ||
294 | init_mm.start_code = (unsigned long)&_stext; | ||
295 | init_mm.end_code = (unsigned long)&_etext; | ||
296 | init_mm.end_data = (unsigned long)&_edata; | ||
297 | init_mm.brk = (unsigned long)&_end; | ||
298 | |||
299 | #ifdef CONFIG_BLK_DEV_INITRD | ||
300 | initrd_start = (unsigned long)&__initrd_start; | ||
301 | initrd_end = (unsigned long)&__initrd_end; | ||
302 | if (initrd_start == initrd_end) { | ||
303 | initrd_start = 0; | ||
304 | initrd_end = 0; | ||
305 | } | ||
306 | initrd_below_start_ok = 1; | ||
307 | #endif | ||
308 | |||
309 | /* setup bootmem allocator */ | ||
310 | max_low_pfn = setup_memory(); | ||
311 | |||
312 | /* paging_init() sets up the MMU and marks all pages as reserved */ | ||
313 | paging_init(); | ||
314 | |||
315 | #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) | ||
316 | if (!conswitchp) | ||
317 | conswitchp = &dummy_con; | ||
318 | #endif | ||
319 | |||
320 | *cmdline_p = cmd_line; | ||
321 | |||
322 | printk(KERN_INFO "OpenRISC Linux -- http://openrisc.net\n"); | ||
323 | } | ||
324 | |||
325 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
326 | { | ||
327 | unsigned long vr; | ||
328 | int version, revision; | ||
329 | |||
330 | vr = mfspr(SPR_VR); | ||
331 | version = (vr & SPR_VR_VER) >> 24; | ||
332 | revision = vr & SPR_VR_REV; | ||
333 | |||
334 | return seq_printf(m, | ||
335 | "cpu\t\t: OpenRISC-%x\n" | ||
336 | "revision\t: %d\n" | ||
337 | "frequency\t: %ld\n" | ||
338 | "dcache size\t: %d bytes\n" | ||
339 | "dcache block size\t: %d bytes\n" | ||
340 | "icache size\t: %d bytes\n" | ||
341 | "icache block size\t: %d bytes\n" | ||
342 | "immu\t\t: %d entries, %lu ways\n" | ||
343 | "dmmu\t\t: %d entries, %lu ways\n" | ||
344 | "bogomips\t: %lu.%02lu\n", | ||
345 | version, | ||
346 | revision, | ||
347 | loops_per_jiffy * HZ, | ||
348 | cpuinfo.dcache_size, | ||
349 | cpuinfo.dcache_block_size, | ||
350 | cpuinfo.icache_size, | ||
351 | cpuinfo.icache_block_size, | ||
352 | 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), | ||
353 | 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW), | ||
354 | 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2), | ||
355 | 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW), | ||
356 | (loops_per_jiffy * HZ) / 500000, | ||
357 | ((loops_per_jiffy * HZ) / 5000) % 100); | ||
358 | } | ||
359 | |||
360 | static void *c_start(struct seq_file *m, loff_t * pos) | ||
361 | { | ||
362 | /* We only have one CPU... */ | ||
363 | return *pos < 1 ? (void *)1 : NULL; | ||
364 | } | ||
365 | |||
366 | static void *c_next(struct seq_file *m, void *v, loff_t * pos) | ||
367 | { | ||
368 | ++*pos; | ||
369 | return NULL; | ||
370 | } | ||
371 | |||
372 | static void c_stop(struct seq_file *m, void *v) | ||
373 | { | ||
374 | } | ||
375 | |||
376 | const struct seq_operations cpuinfo_op = { | ||
377 | .start = c_start, | ||
378 | .next = c_next, | ||
379 | .stop = c_stop, | ||
380 | .show = show_cpuinfo, | ||
381 | }; | ||
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c new file mode 100644 index 00000000000..95207ab0c99 --- /dev/null +++ b/arch/openrisc/kernel/signal.c | |||
@@ -0,0 +1,389 @@ | |||
1 | /* | ||
2 | * OpenRISC signal.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #include <linux/sched.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/signal.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/wait.h> | ||
25 | #include <linux/ptrace.h> | ||
26 | #include <linux/unistd.h> | ||
27 | #include <linux/stddef.h> | ||
28 | #include <linux/tracehook.h> | ||
29 | |||
30 | #include <asm/processor.h> | ||
31 | #include <asm/ucontext.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | |||
34 | #define DEBUG_SIG 0 | ||
35 | |||
36 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
37 | |||
38 | asmlinkage long | ||
39 | _sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs) | ||
40 | { | ||
41 | return do_sigaltstack(uss, uoss, regs->sp); | ||
42 | } | ||
43 | |||
44 | struct rt_sigframe { | ||
45 | struct siginfo *pinfo; | ||
46 | void *puc; | ||
47 | struct siginfo info; | ||
48 | struct ucontext uc; | ||
49 | unsigned char retcode[16]; /* trampoline code */ | ||
50 | }; | ||
51 | |||
52 | static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) | ||
53 | { | ||
54 | unsigned int err = 0; | ||
55 | |||
56 | /* Alwys make any pending restarted system call return -EINTR */ | ||
57 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
58 | |||
59 | /* | ||
60 | * Restore the regs from &sc->regs. | ||
61 | * (sc is already checked for VERIFY_READ since the sigframe was | ||
62 | * checked in sys_sigreturn previously) | ||
63 | */ | ||
64 | if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long))) | ||
65 | goto badframe; | ||
66 | if (__copy_from_user(®s->pc, &sc->regs.pc, sizeof(unsigned long))) | ||
67 | goto badframe; | ||
68 | if (__copy_from_user(®s->sr, &sc->regs.sr, sizeof(unsigned long))) | ||
69 | goto badframe; | ||
70 | |||
71 | /* make sure the SM-bit is cleared so user-mode cannot fool us */ | ||
72 | regs->sr &= ~SPR_SR_SM; | ||
73 | |||
74 | /* TODO: the other ports use regs->orig_XX to disable syscall checks | ||
75 | * after this completes, but we don't use that mechanism. maybe we can | ||
76 | * use it now ? | ||
77 | */ | ||
78 | |||
79 | return err; | ||
80 | |||
81 | badframe: | ||
82 | return 1; | ||
83 | } | ||
84 | |||
85 | asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) | ||
86 | { | ||
87 | struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp; | ||
88 | sigset_t set; | ||
89 | stack_t st; | ||
90 | |||
91 | /* | ||
92 | * Since we stacked the signal on a dword boundary, | ||
93 | * then frame should be dword aligned here. If it's | ||
94 | * not, then the user is trying to mess with us. | ||
95 | */ | ||
96 | if (((long)frame) & 3) | ||
97 | goto badframe; | ||
98 | |||
99 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
100 | goto badframe; | ||
101 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
102 | goto badframe; | ||
103 | |||
104 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
105 | spin_lock_irq(¤t->sighand->siglock); | ||
106 | current->blocked = set; | ||
107 | recalc_sigpending(); | ||
108 | spin_unlock_irq(¤t->sighand->siglock); | ||
109 | |||
110 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) | ||
111 | goto badframe; | ||
112 | |||
113 | if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) | ||
114 | goto badframe; | ||
115 | /* It is more difficult to avoid calling this function than to | ||
116 | call it and ignore errors. */ | ||
117 | do_sigaltstack(&st, NULL, regs->sp); | ||
118 | |||
119 | return regs->gpr[11]; | ||
120 | |||
121 | badframe: | ||
122 | force_sig(SIGSEGV, current); | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Set up a signal frame. | ||
128 | */ | ||
129 | |||
130 | static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, | ||
131 | unsigned long mask) | ||
132 | { | ||
133 | int err = 0; | ||
134 | |||
135 | /* copy the regs */ | ||
136 | |||
137 | err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long)); | ||
138 | err |= __copy_to_user(&sc->regs.pc, ®s->pc, sizeof(unsigned long)); | ||
139 | err |= __copy_to_user(&sc->regs.sr, ®s->sr, sizeof(unsigned long)); | ||
140 | |||
141 | /* then some other stuff */ | ||
142 | |||
143 | err |= __put_user(mask, &sc->oldmask); | ||
144 | |||
145 | return err; | ||
146 | } | ||
147 | |||
148 | static inline unsigned long align_sigframe(unsigned long sp) | ||
149 | { | ||
150 | return sp & ~3UL; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Work out where the signal frame should go. It's either on the user stack | ||
155 | * or the alternate stack. | ||
156 | */ | ||
157 | |||
158 | static inline void __user *get_sigframe(struct k_sigaction *ka, | ||
159 | struct pt_regs *regs, size_t frame_size) | ||
160 | { | ||
161 | unsigned long sp = regs->sp; | ||
162 | int onsigstack = on_sig_stack(sp); | ||
163 | |||
164 | /* redzone */ | ||
165 | sp -= STACK_FRAME_OVERHEAD; | ||
166 | |||
167 | /* This is the X/Open sanctioned signal stack switching. */ | ||
168 | if ((ka->sa.sa_flags & SA_ONSTACK) && !onsigstack) { | ||
169 | if (current->sas_ss_size) | ||
170 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
171 | } | ||
172 | |||
173 | sp = align_sigframe(sp - frame_size); | ||
174 | |||
175 | /* | ||
176 | * If we are on the alternate signal stack and would overflow it, don't. | ||
177 | * Return an always-bogus address instead so we will die with SIGSEGV. | ||
178 | */ | ||
179 | if (onsigstack && !likely(on_sig_stack(sp))) | ||
180 | return (void __user *)-1L; | ||
181 | |||
182 | return (void __user *)sp; | ||
183 | } | ||
184 | |||
185 | /* grab and setup a signal frame. | ||
186 | * | ||
187 | * basically we stack a lot of state info, and arrange for the | ||
188 | * user-mode program to return to the kernel using either a | ||
189 | * trampoline which performs the syscall sigreturn, or a provided | ||
190 | * user-mode trampoline. | ||
191 | */ | ||
192 | static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
193 | sigset_t *set, struct pt_regs *regs) | ||
194 | { | ||
195 | struct rt_sigframe *frame; | ||
196 | unsigned long return_ip; | ||
197 | int err = 0; | ||
198 | |||
199 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
200 | |||
201 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
202 | goto give_sigsegv; | ||
203 | |||
204 | err |= __put_user(&frame->info, &frame->pinfo); | ||
205 | err |= __put_user(&frame->uc, &frame->puc); | ||
206 | |||
207 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
208 | err |= copy_siginfo_to_user(&frame->info, info); | ||
209 | if (err) | ||
210 | goto give_sigsegv; | ||
211 | |||
212 | /* Clear all the bits of the ucontext we don't use. */ | ||
213 | err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); | ||
214 | err |= __put_user(0, &frame->uc.uc_flags); | ||
215 | err |= __put_user(NULL, &frame->uc.uc_link); | ||
216 | err |= __put_user((void *)current->sas_ss_sp, | ||
217 | &frame->uc.uc_stack.ss_sp); | ||
218 | err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); | ||
219 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
220 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); | ||
221 | |||
222 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
223 | |||
224 | if (err) | ||
225 | goto give_sigsegv; | ||
226 | |||
227 | /* trampoline - the desired return ip is the retcode itself */ | ||
228 | return_ip = (unsigned long)&frame->retcode; | ||
229 | /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ | ||
230 | err |= __put_user(0xa960, (short *)(frame->retcode + 0)); | ||
231 | err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2)); | ||
232 | err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); | ||
233 | err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); | ||
234 | |||
235 | if (err) | ||
236 | goto give_sigsegv; | ||
237 | |||
238 | /* TODO what is the current->exec_domain stuff and invmap ? */ | ||
239 | |||
240 | /* Set up registers for signal handler */ | ||
241 | regs->pc = (unsigned long)ka->sa.sa_handler; /* what we enter NOW */ | ||
242 | regs->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ | ||
243 | regs->gpr[3] = (unsigned long)sig; /* arg 1: signo */ | ||
244 | regs->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ | ||
245 | regs->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ | ||
246 | |||
247 | /* actually move the usp to reflect the stacked frame */ | ||
248 | regs->sp = (unsigned long)frame; | ||
249 | |||
250 | return; | ||
251 | |||
252 | give_sigsegv: | ||
253 | if (sig == SIGSEGV) | ||
254 | ka->sa.sa_handler = SIG_DFL; | ||
255 | force_sig(SIGSEGV, current); | ||
256 | } | ||
257 | |||
258 | static inline void | ||
259 | handle_signal(unsigned long sig, | ||
260 | siginfo_t *info, struct k_sigaction *ka, | ||
261 | sigset_t *oldset, struct pt_regs *regs) | ||
262 | { | ||
263 | setup_rt_frame(sig, ka, info, oldset, regs); | ||
264 | |||
265 | if (ka->sa.sa_flags & SA_ONESHOT) | ||
266 | ka->sa.sa_handler = SIG_DFL; | ||
267 | |||
268 | spin_lock_irq(¤t->sighand->siglock); | ||
269 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | ||
270 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
271 | sigaddset(¤t->blocked, sig); | ||
272 | recalc_sigpending(); | ||
273 | |||
274 | spin_unlock_irq(¤t->sighand->siglock); | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
279 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
280 | * mistake. | ||
281 | * | ||
282 | * Also note that the regs structure given here as an argument, is the latest | ||
283 | * pushed pt_regs. It may or may not be the same as the first pushed registers | ||
284 | * when the initial usermode->kernelmode transition took place. Therefore | ||
285 | * we can use user_mode(regs) to see if we came directly from kernel or user | ||
286 | * mode below. | ||
287 | */ | ||
288 | |||
289 | void do_signal(struct pt_regs *regs) | ||
290 | { | ||
291 | siginfo_t info; | ||
292 | int signr; | ||
293 | struct k_sigaction ka; | ||
294 | |||
295 | /* | ||
296 | * We want the common case to go fast, which | ||
297 | * is why we may in certain cases get here from | ||
298 | * kernel mode. Just return without doing anything | ||
299 | * if so. | ||
300 | */ | ||
301 | if (!user_mode(regs)) | ||
302 | return; | ||
303 | |||
304 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
305 | |||
306 | /* If we are coming out of a syscall then we need | ||
307 | * to check if the syscall was interrupted and wants to be | ||
308 | * restarted after handling the signal. If so, the original | ||
309 | * syscall number is put back into r11 and the PC rewound to | ||
310 | * point at the l.sys instruction that resulted in the | ||
311 | * original syscall. Syscall results other than the four | ||
312 | * below mean that the syscall executed to completion and no | ||
313 | * restart is necessary. | ||
314 | */ | ||
315 | if (regs->syscallno) { | ||
316 | int restart = 0; | ||
317 | |||
318 | switch (regs->gpr[11]) { | ||
319 | case -ERESTART_RESTARTBLOCK: | ||
320 | case -ERESTARTNOHAND: | ||
321 | /* Restart if there is no signal handler */ | ||
322 | restart = (signr <= 0); | ||
323 | break; | ||
324 | case -ERESTARTSYS: | ||
325 | /* Restart if there no signal handler or | ||
326 | * SA_RESTART flag is set */ | ||
327 | restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART)); | ||
328 | break; | ||
329 | case -ERESTARTNOINTR: | ||
330 | /* Always restart */ | ||
331 | restart = 1; | ||
332 | break; | ||
333 | } | ||
334 | |||
335 | if (restart) { | ||
336 | if (regs->gpr[11] == -ERESTART_RESTARTBLOCK) | ||
337 | regs->gpr[11] = __NR_restart_syscall; | ||
338 | else | ||
339 | regs->gpr[11] = regs->orig_gpr11; | ||
340 | regs->pc -= 4; | ||
341 | } else { | ||
342 | regs->gpr[11] = -EINTR; | ||
343 | } | ||
344 | } | ||
345 | |||
346 | if (signr <= 0) { | ||
347 | /* no signal to deliver so we just put the saved sigmask | ||
348 | * back */ | ||
349 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | ||
350 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
351 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
352 | } | ||
353 | |||
354 | } else { /* signr > 0 */ | ||
355 | sigset_t *oldset; | ||
356 | |||
357 | if (current_thread_info()->flags & _TIF_RESTORE_SIGMASK) | ||
358 | oldset = ¤t->saved_sigmask; | ||
359 | else | ||
360 | oldset = ¤t->blocked; | ||
361 | |||
362 | /* Whee! Actually deliver the signal. */ | ||
363 | handle_signal(signr, &info, &ka, oldset, regs); | ||
364 | /* a signal was successfully delivered; the saved | ||
365 | * sigmask will have been stored in the signal frame, | ||
366 | * and will be restored by sigreturn, so we can simply | ||
367 | * clear the TIF_RESTORE_SIGMASK flag */ | ||
368 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | ||
369 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
370 | |||
371 | tracehook_signal_handler(signr, &info, &ka, regs, | ||
372 | test_thread_flag(TIF_SINGLESTEP)); | ||
373 | } | ||
374 | |||
375 | return; | ||
376 | } | ||
377 | |||
378 | asmlinkage void do_notify_resume(struct pt_regs *regs) | ||
379 | { | ||
380 | if (current_thread_info()->flags & _TIF_SIGPENDING) | ||
381 | do_signal(regs); | ||
382 | |||
383 | if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) { | ||
384 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
385 | tracehook_notify_resume(regs); | ||
386 | if (current->replacement_session_keyring) | ||
387 | key_replace_session_keyring(); | ||
388 | } | ||
389 | } | ||
diff --git a/arch/openrisc/kernel/sys_call_table.c b/arch/openrisc/kernel/sys_call_table.c new file mode 100644 index 00000000000..e1f8ce8c72a --- /dev/null +++ b/arch/openrisc/kernel/sys_call_table.c | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * OpenRISC sys_call_table.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/syscalls.h> | ||
18 | #include <linux/signal.h> | ||
19 | #include <linux/unistd.h> | ||
20 | |||
21 | #include <asm/syscalls.h> | ||
22 | |||
23 | #undef __SYSCALL | ||
24 | #define __SYSCALL(nr, call) [nr] = (call), | ||
25 | |||
26 | void *sys_call_table[__NR_syscalls] = { | ||
27 | #include <asm/unistd.h> | ||
28 | }; | ||
diff --git a/arch/openrisc/kernel/sys_or32.c b/arch/openrisc/kernel/sys_or32.c new file mode 100644 index 00000000000..57060084c0c --- /dev/null +++ b/arch/openrisc/kernel/sys_or32.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * OpenRISC sys_or32.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This file contains various random system calls that | ||
18 | * have a non-standard calling sequence on some platforms. | ||
19 | * Since we don't have to do any backwards compatibility, our | ||
20 | * versions are done in the most "normal" way possible. | ||
21 | */ | ||
22 | |||
23 | #include <linux/errno.h> | ||
24 | #include <linux/syscalls.h> | ||
25 | #include <linux/mm.h> | ||
26 | |||
27 | #include <asm/syscalls.h> | ||
28 | |||
29 | /* These are secondary entry points as the primary entry points are defined in | ||
30 | * entry.S where we add the 'regs' parameter value | ||
31 | */ | ||
32 | |||
33 | asmlinkage long _sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
34 | int __user *parent_tid, int __user *child_tid, | ||
35 | struct pt_regs *regs) | ||
36 | { | ||
37 | long ret; | ||
38 | |||
39 | /* FIXME: Is alignment necessary? */ | ||
40 | /* newsp = ALIGN(newsp, 4); */ | ||
41 | |||
42 | if (!newsp) | ||
43 | newsp = regs->sp; | ||
44 | |||
45 | ret = do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); | ||
46 | |||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | asmlinkage int _sys_fork(struct pt_regs *regs) | ||
51 | { | ||
52 | #ifdef CONFIG_MMU | ||
53 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
54 | #else | ||
55 | return -EINVAL; | ||
56 | #endif | ||
57 | } | ||
diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c new file mode 100644 index 00000000000..bd946ef1623 --- /dev/null +++ b/arch/openrisc/kernel/time.c | |||
@@ -0,0 +1,181 @@ | |||
1 | /* | ||
2 | * OpenRISC time.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/time.h> | ||
19 | #include <linux/timex.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/ftrace.h> | ||
22 | |||
23 | #include <linux/clocksource.h> | ||
24 | #include <linux/clockchips.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/io.h> | ||
27 | |||
28 | #include <asm/cpuinfo.h> | ||
29 | |||
30 | static int openrisc_timer_set_next_event(unsigned long delta, | ||
31 | struct clock_event_device *dev) | ||
32 | { | ||
33 | u32 c; | ||
34 | |||
35 | /* Read 32-bit counter value, add delta, mask off the low 28 bits. | ||
36 | * We're guaranteed delta won't be bigger than 28 bits because the | ||
37 | * generic timekeeping code ensures that for us. | ||
38 | */ | ||
39 | c = mfspr(SPR_TTCR); | ||
40 | c += delta; | ||
41 | c &= SPR_TTMR_TP; | ||
42 | |||
43 | /* Set counter and enable interrupt. | ||
44 | * Keep timer in continuous mode always. | ||
45 | */ | ||
46 | mtspr(SPR_TTMR, SPR_TTMR_CR | SPR_TTMR_IE | c); | ||
47 | |||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | static void openrisc_timer_set_mode(enum clock_event_mode mode, | ||
52 | struct clock_event_device *evt) | ||
53 | { | ||
54 | switch (mode) { | ||
55 | case CLOCK_EVT_MODE_PERIODIC: | ||
56 | pr_debug(KERN_INFO "%s: periodic\n", __func__); | ||
57 | BUG(); | ||
58 | break; | ||
59 | case CLOCK_EVT_MODE_ONESHOT: | ||
60 | pr_debug(KERN_INFO "%s: oneshot\n", __func__); | ||
61 | break; | ||
62 | case CLOCK_EVT_MODE_UNUSED: | ||
63 | pr_debug(KERN_INFO "%s: unused\n", __func__); | ||
64 | break; | ||
65 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
66 | pr_debug(KERN_INFO "%s: shutdown\n", __func__); | ||
67 | break; | ||
68 | case CLOCK_EVT_MODE_RESUME: | ||
69 | pr_debug(KERN_INFO "%s: resume\n", __func__); | ||
70 | break; | ||
71 | } | ||
72 | } | ||
73 | |||
74 | /* This is the clock event device based on the OR1K tick timer. | ||
75 | * As the timer is being used as a continuous clock-source (required for HR | ||
76 | * timers) we cannot enable the PERIODIC feature. The tick timer can run using | ||
77 | * one-shot events, so no problem. | ||
78 | */ | ||
79 | |||
80 | static struct clock_event_device clockevent_openrisc_timer = { | ||
81 | .name = "openrisc_timer_clockevent", | ||
82 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
83 | .rating = 300, | ||
84 | .set_next_event = openrisc_timer_set_next_event, | ||
85 | .set_mode = openrisc_timer_set_mode, | ||
86 | }; | ||
87 | |||
88 | static inline void timer_ack(void) | ||
89 | { | ||
90 | /* Clear the IP bit and disable further interrupts */ | ||
91 | /* This can be done very simply... we just need to keep the timer | ||
92 | running, so just maintain the CR bits while clearing the rest | ||
93 | of the register | ||
94 | */ | ||
95 | mtspr(SPR_TTMR, SPR_TTMR_CR); | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * The timer interrupt is mostly handled in generic code nowadays... this | ||
100 | * function just acknowledges the interrupt and fires the event handler that | ||
101 | * has been set on the clockevent device by the generic time management code. | ||
102 | * | ||
103 | * This function needs to be called by the timer exception handler and that's | ||
104 | * all the exception handler needs to do. | ||
105 | */ | ||
106 | |||
107 | irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs) | ||
108 | { | ||
109 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
110 | struct clock_event_device *evt = &clockevent_openrisc_timer; | ||
111 | |||
112 | timer_ack(); | ||
113 | |||
114 | /* | ||
115 | * update_process_times() expects us to have called irq_enter(). | ||
116 | */ | ||
117 | irq_enter(); | ||
118 | evt->event_handler(evt); | ||
119 | irq_exit(); | ||
120 | |||
121 | set_irq_regs(old_regs); | ||
122 | |||
123 | return IRQ_HANDLED; | ||
124 | } | ||
125 | |||
126 | static __init void openrisc_clockevent_init(void) | ||
127 | { | ||
128 | clockevents_calc_mult_shift(&clockevent_openrisc_timer, | ||
129 | cpuinfo.clock_frequency, 4); | ||
130 | |||
131 | /* We only have 28 bits */ | ||
132 | clockevent_openrisc_timer.max_delta_ns = | ||
133 | clockevent_delta2ns((u32) 0x0fffffff, &clockevent_openrisc_timer); | ||
134 | clockevent_openrisc_timer.min_delta_ns = | ||
135 | clockevent_delta2ns(1, &clockevent_openrisc_timer); | ||
136 | clockevent_openrisc_timer.cpumask = cpumask_of(0); | ||
137 | clockevents_register_device(&clockevent_openrisc_timer); | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * Clocksource: Based on OpenRISC timer/counter | ||
142 | * | ||
143 | * This sets up the OpenRISC Tick Timer as a clock source. The tick timer | ||
144 | * is 32 bits wide and runs at the CPU clock frequency. | ||
145 | */ | ||
146 | |||
147 | static cycle_t openrisc_timer_read(struct clocksource *cs) | ||
148 | { | ||
149 | return (cycle_t) mfspr(SPR_TTCR); | ||
150 | } | ||
151 | |||
152 | static struct clocksource openrisc_timer = { | ||
153 | .name = "openrisc_timer", | ||
154 | .rating = 200, | ||
155 | .read = openrisc_timer_read, | ||
156 | .mask = CLOCKSOURCE_MASK(32), | ||
157 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
158 | }; | ||
159 | |||
160 | static int __init openrisc_timer_init(void) | ||
161 | { | ||
162 | if (clocksource_register_hz(&openrisc_timer, cpuinfo.clock_frequency)) | ||
163 | panic("failed to register clocksource"); | ||
164 | |||
165 | /* Enable the incrementer: 'continuous' mode with interrupt disabled */ | ||
166 | mtspr(SPR_TTMR, SPR_TTMR_CR); | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | void __init time_init(void) | ||
172 | { | ||
173 | u32 upr; | ||
174 | |||
175 | upr = mfspr(SPR_UPR); | ||
176 | if (!(upr & SPR_UPR_TTP)) | ||
177 | panic("Linux not supported on devices without tick timer"); | ||
178 | |||
179 | openrisc_timer_init(); | ||
180 | openrisc_clockevent_init(); | ||
181 | } | ||
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c new file mode 100644 index 00000000000..a4ec44a052b --- /dev/null +++ b/arch/openrisc/kernel/traps.c | |||
@@ -0,0 +1,366 @@ | |||
1 | /* | ||
2 | * OpenRISC traps.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * Here we handle the break vectors not used by the system call | ||
18 | * mechanism, as well as some general stack/register dumping | ||
19 | * things. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/init.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/kmod.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/ptrace.h> | ||
31 | #include <linux/timer.h> | ||
32 | #include <linux/mm.h> | ||
33 | #include <linux/kallsyms.h> | ||
34 | #include <asm/uaccess.h> | ||
35 | |||
36 | #include <asm/system.h> | ||
37 | #include <asm/segment.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/pgtable.h> | ||
40 | |||
41 | extern char _etext, _stext; | ||
42 | |||
43 | int kstack_depth_to_print = 0x180; | ||
44 | |||
45 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | ||
46 | { | ||
47 | return p > (void *)tinfo && p < (void *)tinfo + THREAD_SIZE - 3; | ||
48 | } | ||
49 | |||
50 | void show_trace(struct task_struct *task, unsigned long *stack) | ||
51 | { | ||
52 | struct thread_info *context; | ||
53 | unsigned long addr; | ||
54 | |||
55 | context = (struct thread_info *) | ||
56 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | ||
57 | |||
58 | while (valid_stack_ptr(context, stack)) { | ||
59 | addr = *stack++; | ||
60 | if (__kernel_text_address(addr)) { | ||
61 | printk(" [<%08lx>]", addr); | ||
62 | print_symbol(" %s", addr); | ||
63 | printk("\n"); | ||
64 | } | ||
65 | } | ||
66 | printk(" =======================\n"); | ||
67 | } | ||
68 | |||
69 | /* displays a short stack trace */ | ||
70 | void show_stack(struct task_struct *task, unsigned long *esp) | ||
71 | { | ||
72 | unsigned long addr, *stack; | ||
73 | int i; | ||
74 | |||
75 | if (esp == NULL) | ||
76 | esp = (unsigned long *)&esp; | ||
77 | |||
78 | stack = esp; | ||
79 | |||
80 | printk("Stack dump [0x%08lx]:\n", (unsigned long)esp); | ||
81 | for (i = 0; i < kstack_depth_to_print; i++) { | ||
82 | if (kstack_end(stack)) | ||
83 | break; | ||
84 | if (__get_user(addr, stack)) { | ||
85 | /* This message matches "failing address" marked | ||
86 | s390 in ksymoops, so lines containing it will | ||
87 | not be filtered out by ksymoops. */ | ||
88 | printk("Failing address 0x%lx\n", (unsigned long)stack); | ||
89 | break; | ||
90 | } | ||
91 | stack++; | ||
92 | |||
93 | printk("sp + %02d: 0x%08lx\n", i * 4, addr); | ||
94 | } | ||
95 | printk("\n"); | ||
96 | |||
97 | show_trace(task, esp); | ||
98 | |||
99 | return; | ||
100 | } | ||
101 | |||
102 | void show_trace_task(struct task_struct *tsk) | ||
103 | { | ||
104 | /* | ||
105 | * TODO: SysRq-T trace dump... | ||
106 | */ | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * The architecture-independent backtrace generator | ||
111 | */ | ||
112 | void dump_stack(void) | ||
113 | { | ||
114 | unsigned long stack; | ||
115 | |||
116 | show_stack(current, &stack); | ||
117 | } | ||
118 | |||
119 | void show_registers(struct pt_regs *regs) | ||
120 | { | ||
121 | int i; | ||
122 | int in_kernel = 1; | ||
123 | unsigned long esp; | ||
124 | |||
125 | esp = (unsigned long)(®s->sp); | ||
126 | if (user_mode(regs)) | ||
127 | in_kernel = 0; | ||
128 | |||
129 | printk("CPU #: %d\n" | ||
130 | " PC: %08lx SR: %08lx SP: %08lx\n", | ||
131 | smp_processor_id(), regs->pc, regs->sr, regs->sp); | ||
132 | printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", | ||
133 | 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); | ||
134 | printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", | ||
135 | regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); | ||
136 | printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", | ||
137 | regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); | ||
138 | printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", | ||
139 | regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); | ||
140 | printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", | ||
141 | regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); | ||
142 | printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", | ||
143 | regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); | ||
144 | printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", | ||
145 | regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); | ||
146 | printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", | ||
147 | regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); | ||
148 | printk(" RES: %08lx oGPR11: %08lx syscallno: %08lx\n", | ||
149 | regs->gpr[11], regs->orig_gpr11, regs->syscallno); | ||
150 | |||
151 | printk("Process %s (pid: %d, stackpage=%08lx)\n", | ||
152 | current->comm, current->pid, (unsigned long)current); | ||
153 | /* | ||
154 | * When in-kernel, we also print out the stack and code at the | ||
155 | * time of the fault.. | ||
156 | */ | ||
157 | if (in_kernel) { | ||
158 | |||
159 | printk("\nStack: "); | ||
160 | show_stack(NULL, (unsigned long *)esp); | ||
161 | |||
162 | printk("\nCode: "); | ||
163 | if (regs->pc < PAGE_OFFSET) | ||
164 | goto bad; | ||
165 | |||
166 | for (i = -24; i < 24; i++) { | ||
167 | unsigned char c; | ||
168 | if (__get_user(c, &((unsigned char *)regs->pc)[i])) { | ||
169 | bad: | ||
170 | printk(" Bad PC value."); | ||
171 | break; | ||
172 | } | ||
173 | |||
174 | if (i == 0) | ||
175 | printk("(%02x) ", c); | ||
176 | else | ||
177 | printk("%02x ", c); | ||
178 | } | ||
179 | } | ||
180 | printk("\n"); | ||
181 | } | ||
182 | |||
183 | void nommu_dump_state(struct pt_regs *regs, | ||
184 | unsigned long ea, unsigned long vector) | ||
185 | { | ||
186 | int i; | ||
187 | unsigned long addr, stack = regs->sp; | ||
188 | |||
189 | printk("\n\r[nommu_dump_state] :: ea %lx, vector %lx\n\r", ea, vector); | ||
190 | |||
191 | printk("CPU #: %d\n" | ||
192 | " PC: %08lx SR: %08lx SP: %08lx\n", | ||
193 | 0, regs->pc, regs->sr, regs->sp); | ||
194 | printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", | ||
195 | 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); | ||
196 | printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", | ||
197 | regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); | ||
198 | printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", | ||
199 | regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); | ||
200 | printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", | ||
201 | regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); | ||
202 | printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", | ||
203 | regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); | ||
204 | printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", | ||
205 | regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); | ||
206 | printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", | ||
207 | regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); | ||
208 | printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", | ||
209 | regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); | ||
210 | printk(" RES: %08lx oGPR11: %08lx syscallno: %08lx\n", | ||
211 | regs->gpr[11], regs->orig_gpr11, regs->syscallno); | ||
212 | |||
213 | printk("Process %s (pid: %d, stackpage=%08lx)\n", | ||
214 | ((struct task_struct *)(__pa(current)))->comm, | ||
215 | ((struct task_struct *)(__pa(current)))->pid, | ||
216 | (unsigned long)current); | ||
217 | |||
218 | printk("\nStack: "); | ||
219 | printk("Stack dump [0x%08lx]:\n", (unsigned long)stack); | ||
220 | for (i = 0; i < kstack_depth_to_print; i++) { | ||
221 | if (((long)stack & (THREAD_SIZE - 1)) == 0) | ||
222 | break; | ||
223 | stack++; | ||
224 | |||
225 | printk("%lx :: sp + %02d: 0x%08lx\n", stack, i * 4, | ||
226 | *((unsigned long *)(__pa(stack)))); | ||
227 | } | ||
228 | printk("\n"); | ||
229 | |||
230 | printk("Call Trace: "); | ||
231 | i = 1; | ||
232 | while (((long)stack & (THREAD_SIZE - 1)) != 0) { | ||
233 | addr = *((unsigned long *)__pa(stack)); | ||
234 | stack++; | ||
235 | |||
236 | if (kernel_text_address(addr)) { | ||
237 | if (i && ((i % 6) == 0)) | ||
238 | printk("\n "); | ||
239 | printk(" [<%08lx>]", addr); | ||
240 | i++; | ||
241 | } | ||
242 | } | ||
243 | printk("\n"); | ||
244 | |||
245 | printk("\nCode: "); | ||
246 | |||
247 | for (i = -24; i < 24; i++) { | ||
248 | unsigned char c; | ||
249 | c = ((unsigned char *)(__pa(regs->pc)))[i]; | ||
250 | |||
251 | if (i == 0) | ||
252 | printk("(%02x) ", c); | ||
253 | else | ||
254 | printk("%02x ", c); | ||
255 | } | ||
256 | printk("\n"); | ||
257 | } | ||
258 | |||
259 | /* This is normally the 'Oops' routine */ | ||
260 | void die(const char *str, struct pt_regs *regs, long err) | ||
261 | { | ||
262 | |||
263 | console_verbose(); | ||
264 | printk("\n%s#: %04lx\n", str, err & 0xffff); | ||
265 | show_registers(regs); | ||
266 | #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION | ||
267 | printk("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n"); | ||
268 | |||
269 | /* shut down interrupts */ | ||
270 | local_irq_disable(); | ||
271 | |||
272 | __asm__ __volatile__("l.nop 1"); | ||
273 | do {} while (1); | ||
274 | #endif | ||
275 | do_exit(SIGSEGV); | ||
276 | } | ||
277 | |||
278 | /* This is normally the 'Oops' routine */ | ||
279 | void die_if_kernel(const char *str, struct pt_regs *regs, long err) | ||
280 | { | ||
281 | if (user_mode(regs)) | ||
282 | return; | ||
283 | |||
284 | die(str, regs, err); | ||
285 | } | ||
286 | |||
287 | void unhandled_exception(struct pt_regs *regs, int ea, int vector) | ||
288 | { | ||
289 | printk("Unable to handle exception at EA =0x%x, vector 0x%x", | ||
290 | ea, vector); | ||
291 | die("Oops", regs, 9); | ||
292 | } | ||
293 | |||
294 | void __init trap_init(void) | ||
295 | { | ||
296 | /* Nothing needs to be done */ | ||
297 | } | ||
298 | |||
299 | asmlinkage void do_trap(struct pt_regs *regs, unsigned long address) | ||
300 | { | ||
301 | siginfo_t info; | ||
302 | memset(&info, 0, sizeof(info)); | ||
303 | info.si_signo = SIGTRAP; | ||
304 | info.si_code = TRAP_TRACE; | ||
305 | info.si_addr = (void *)address; | ||
306 | force_sig_info(SIGTRAP, &info, current); | ||
307 | |||
308 | regs->pc += 4; | ||
309 | } | ||
310 | |||
311 | asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) | ||
312 | { | ||
313 | siginfo_t info; | ||
314 | |||
315 | if (user_mode(regs)) { | ||
316 | /* Send a SIGSEGV */ | ||
317 | info.si_signo = SIGSEGV; | ||
318 | info.si_errno = 0; | ||
319 | /* info.si_code has been set above */ | ||
320 | info.si_addr = (void *)address; | ||
321 | force_sig_info(SIGSEGV, &info, current); | ||
322 | } else { | ||
323 | printk("KERNEL: Unaligned Access 0x%.8lx\n", address); | ||
324 | show_registers(regs); | ||
325 | die("Die:", regs, address); | ||
326 | } | ||
327 | |||
328 | } | ||
329 | |||
330 | asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address) | ||
331 | { | ||
332 | siginfo_t info; | ||
333 | |||
334 | if (user_mode(regs)) { | ||
335 | /* Send a SIGBUS */ | ||
336 | info.si_signo = SIGBUS; | ||
337 | info.si_errno = 0; | ||
338 | info.si_code = BUS_ADRERR; | ||
339 | info.si_addr = (void *)address; | ||
340 | force_sig_info(SIGBUS, &info, current); | ||
341 | } else { /* Kernel mode */ | ||
342 | printk("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address); | ||
343 | show_registers(regs); | ||
344 | die("Die:", regs, address); | ||
345 | } | ||
346 | } | ||
347 | |||
348 | asmlinkage void do_illegal_instruction(struct pt_regs *regs, | ||
349 | unsigned long address) | ||
350 | { | ||
351 | siginfo_t info; | ||
352 | |||
353 | if (user_mode(regs)) { | ||
354 | /* Send a SIGILL */ | ||
355 | info.si_signo = SIGILL; | ||
356 | info.si_errno = 0; | ||
357 | info.si_code = ILL_ILLOPC; | ||
358 | info.si_addr = (void *)address; | ||
359 | force_sig_info(SIGBUS, &info, current); | ||
360 | } else { /* Kernel mode */ | ||
361 | printk("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n", | ||
362 | address); | ||
363 | show_registers(regs); | ||
364 | die("Die:", regs, address); | ||
365 | } | ||
366 | } | ||
diff --git a/arch/openrisc/kernel/vmlinux.h b/arch/openrisc/kernel/vmlinux.h new file mode 100644 index 00000000000..ee842a2d3f3 --- /dev/null +++ b/arch/openrisc/kernel/vmlinux.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef __OPENRISC_VMLINUX_H_ | ||
2 | #define __OPENRISC_VMLINUX_H_ | ||
3 | |||
4 | extern char _stext, _etext, _edata, _end; | ||
5 | #ifdef CONFIG_BLK_DEV_INITRD | ||
6 | extern char __initrd_start, __initrd_end; | ||
7 | extern char __initramfs_start; | ||
8 | #endif | ||
9 | |||
10 | extern u32 __dtb_start[]; | ||
11 | |||
12 | #endif | ||
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S new file mode 100644 index 00000000000..2d69a853b74 --- /dev/null +++ b/arch/openrisc/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * OpenRISC vmlinux.lds.S | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * ld script for OpenRISC architecture | ||
18 | */ | ||
19 | |||
20 | /* TODO | ||
21 | * - clean up __offset & stuff | ||
22 | * - change all 8192 aligment to PAGE !!! | ||
23 | * - recheck if all aligments are really needed | ||
24 | */ | ||
25 | |||
26 | # define LOAD_OFFSET PAGE_OFFSET | ||
27 | # define LOAD_BASE PAGE_OFFSET | ||
28 | |||
29 | #include <asm/page.h> | ||
30 | #include <asm/cache.h> | ||
31 | #include <asm-generic/vmlinux.lds.h> | ||
32 | |||
33 | OUTPUT_FORMAT("elf32-or32", "elf32-or32", "elf32-or32") | ||
34 | jiffies = jiffies_64 + 4; | ||
35 | |||
36 | SECTIONS | ||
37 | { | ||
38 | /* Read-only sections, merged into text segment: */ | ||
39 | . = LOAD_BASE ; | ||
40 | |||
41 | /* _s_kernel_ro must be page aligned */ | ||
42 | . = ALIGN(PAGE_SIZE); | ||
43 | _s_kernel_ro = .; | ||
44 | |||
45 | .text : AT(ADDR(.text) - LOAD_OFFSET) | ||
46 | { | ||
47 | _stext = .; | ||
48 | TEXT_TEXT | ||
49 | SCHED_TEXT | ||
50 | LOCK_TEXT | ||
51 | KPROBES_TEXT | ||
52 | IRQENTRY_TEXT | ||
53 | *(.fixup) | ||
54 | *(.text.__*) | ||
55 | _etext = .; | ||
56 | } | ||
57 | /* TODO: Check if fixup and text.__* are really necessary | ||
58 | * fixup is definitely necessary | ||
59 | */ | ||
60 | |||
61 | _sdata = .; | ||
62 | |||
63 | /* Page alignment required for RO_DATA_SECTION */ | ||
64 | RO_DATA_SECTION(PAGE_SIZE) | ||
65 | _e_kernel_ro = .; | ||
66 | |||
67 | /* Whatever comes after _e_kernel_ro had better be page-aligend, too */ | ||
68 | |||
69 | /* 32 here is cacheline size... recheck this */ | ||
70 | RW_DATA_SECTION(32, PAGE_SIZE, PAGE_SIZE) | ||
71 | |||
72 | _edata = .; | ||
73 | |||
74 | EXCEPTION_TABLE(4) | ||
75 | NOTES | ||
76 | |||
77 | /* Init code and data */ | ||
78 | . = ALIGN(PAGE_SIZE); | ||
79 | __init_begin = .; | ||
80 | |||
81 | HEAD_TEXT_SECTION | ||
82 | |||
83 | /* Page aligned */ | ||
84 | INIT_TEXT_SECTION(PAGE_SIZE) | ||
85 | |||
86 | /* Align __setup_start on 16 byte boundary */ | ||
87 | INIT_DATA_SECTION(16) | ||
88 | |||
89 | PERCPU_SECTION(L1_CACHE_BYTES) | ||
90 | |||
91 | __init_end = .; | ||
92 | |||
93 | . = ALIGN(PAGE_SIZE); | ||
94 | .initrd : AT(ADDR(.initrd) - LOAD_OFFSET) | ||
95 | { | ||
96 | __initrd_start = .; | ||
97 | *(.initrd) | ||
98 | __initrd_end = .; | ||
99 | FILL (0); | ||
100 | . = ALIGN (PAGE_SIZE); | ||
101 | } | ||
102 | |||
103 | __vmlinux_end = .; /* last address of the physical file */ | ||
104 | |||
105 | BSS_SECTION(0, 0, 0x20) | ||
106 | |||
107 | _end = .; | ||
108 | |||
109 | /* Throw in the debugging sections */ | ||
110 | STABS_DEBUG | ||
111 | DWARF_DEBUG | ||
112 | |||
113 | /* Sections to be discarded -- must be last */ | ||
114 | DISCARDS | ||
115 | } | ||