diff options
author | Paul Mundt <lethal@linux-sh.org> | 2008-04-24 23:58:40 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-05-08 06:51:37 -0400 |
commit | ccd805874198c248498b5f269656ec14397eeede (patch) | |
tree | 14a99723690c207906c4f7e84428457c4251b45d /arch | |
parent | 9141d30a480850d989fc245909b98670a7b66ec1 (diff) |
sh64: Fixup the nommu build.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sh/kernel/cpu/sh5/entry.S | 28 | ||||
-rw-r--r-- | arch/sh/mm/Makefile_64 | 7 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 2 |
3 files changed, 34 insertions, 3 deletions
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index ba8750176d91..05372ed6c568 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
@@ -143,12 +143,22 @@ resvec_save_area: | |||
143 | trap_jtable: | 143 | trap_jtable: |
144 | .long do_exception_error /* 0x000 */ | 144 | .long do_exception_error /* 0x000 */ |
145 | .long do_exception_error /* 0x020 */ | 145 | .long do_exception_error /* 0x020 */ |
146 | #ifdef CONFIG_MMU | ||
146 | .long tlb_miss_load /* 0x040 */ | 147 | .long tlb_miss_load /* 0x040 */ |
147 | .long tlb_miss_store /* 0x060 */ | 148 | .long tlb_miss_store /* 0x060 */ |
149 | #else | ||
150 | .long do_exception_error | ||
151 | .long do_exception_error | ||
152 | #endif | ||
148 | ! ARTIFICIAL pseudo-EXPEVT setting | 153 | ! ARTIFICIAL pseudo-EXPEVT setting |
149 | .long do_debug_interrupt /* 0x080 */ | 154 | .long do_debug_interrupt /* 0x080 */ |
155 | #ifdef CONFIG_MMU | ||
150 | .long tlb_miss_load /* 0x0A0 */ | 156 | .long tlb_miss_load /* 0x0A0 */ |
151 | .long tlb_miss_store /* 0x0C0 */ | 157 | .long tlb_miss_store /* 0x0C0 */ |
158 | #else | ||
159 | .long do_exception_error | ||
160 | .long do_exception_error | ||
161 | #endif | ||
152 | .long do_address_error_load /* 0x0E0 */ | 162 | .long do_address_error_load /* 0x0E0 */ |
153 | .long do_address_error_store /* 0x100 */ | 163 | .long do_address_error_store /* 0x100 */ |
154 | #ifdef CONFIG_SH_FPU | 164 | #ifdef CONFIG_SH_FPU |
@@ -185,10 +195,18 @@ trap_jtable: | |||
185 | .endr | 195 | .endr |
186 | .long do_IRQ /* 0xA00 */ | 196 | .long do_IRQ /* 0xA00 */ |
187 | .long do_IRQ /* 0xA20 */ | 197 | .long do_IRQ /* 0xA20 */ |
198 | #ifdef CONFIG_MMU | ||
188 | .long itlb_miss_or_IRQ /* 0xA40 */ | 199 | .long itlb_miss_or_IRQ /* 0xA40 */ |
200 | #else | ||
201 | .long do_IRQ | ||
202 | #endif | ||
189 | .long do_IRQ /* 0xA60 */ | 203 | .long do_IRQ /* 0xA60 */ |
190 | .long do_IRQ /* 0xA80 */ | 204 | .long do_IRQ /* 0xA80 */ |
205 | #ifdef CONFIG_MMU | ||
191 | .long itlb_miss_or_IRQ /* 0xAA0 */ | 206 | .long itlb_miss_or_IRQ /* 0xAA0 */ |
207 | #else | ||
208 | .long do_IRQ | ||
209 | #endif | ||
192 | .long do_exception_error /* 0xAC0 */ | 210 | .long do_exception_error /* 0xAC0 */ |
193 | .long do_address_error_exec /* 0xAE0 */ | 211 | .long do_address_error_exec /* 0xAE0 */ |
194 | .rept 8 | 212 | .rept 8 |
@@ -274,6 +292,7 @@ not_a_tlb_miss: | |||
274 | * Instead of '.space 1024-TEXT_SIZE' place the RESVEC | 292 | * Instead of '.space 1024-TEXT_SIZE' place the RESVEC |
275 | * block making sure the final alignment is correct. | 293 | * block making sure the final alignment is correct. |
276 | */ | 294 | */ |
295 | #ifdef CONFIG_MMU | ||
277 | tlb_miss: | 296 | tlb_miss: |
278 | synco /* TAKum03020 (but probably a good idea anyway.) */ | 297 | synco /* TAKum03020 (but probably a good idea anyway.) */ |
279 | putcon SP, KCR1 | 298 | putcon SP, KCR1 |
@@ -377,6 +396,9 @@ fixup_to_invoke_general_handler: | |||
377 | getcon KCR1, SP | 396 | getcon KCR1, SP |
378 | pta handle_exception, tr0 | 397 | pta handle_exception, tr0 |
379 | blink tr0, ZERO | 398 | blink tr0, ZERO |
399 | #else /* CONFIG_MMU */ | ||
400 | .balign 256 | ||
401 | #endif | ||
380 | 402 | ||
381 | /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE | 403 | /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE |
382 | DOES END UP AT VBR+0x600 */ | 404 | DOES END UP AT VBR+0x600 */ |
@@ -1103,6 +1125,7 @@ restore_all: | |||
1103 | * fpu_error_or_IRQ? is a helper to deflect to the right cause. | 1125 | * fpu_error_or_IRQ? is a helper to deflect to the right cause. |
1104 | * | 1126 | * |
1105 | */ | 1127 | */ |
1128 | #ifdef CONFIG_MMU | ||
1106 | tlb_miss_load: | 1129 | tlb_miss_load: |
1107 | or SP, ZERO, r2 | 1130 | or SP, ZERO, r2 |
1108 | or ZERO, ZERO, r3 /* Read */ | 1131 | or ZERO, ZERO, r3 /* Read */ |
@@ -1132,6 +1155,7 @@ call_do_page_fault: | |||
1132 | movi do_page_fault, r6 | 1155 | movi do_page_fault, r6 |
1133 | ptabs r6, tr0 | 1156 | ptabs r6, tr0 |
1134 | blink tr0, ZERO | 1157 | blink tr0, ZERO |
1158 | #endif /* CONFIG_MMU */ | ||
1135 | 1159 | ||
1136 | fpu_error_or_IRQA: | 1160 | fpu_error_or_IRQA: |
1137 | pta its_IRQ, tr0 | 1161 | pta its_IRQ, tr0 |
@@ -1481,6 +1505,7 @@ poke_real_address_q: | |||
1481 | ptabs LINK, tr0 | 1505 | ptabs LINK, tr0 |
1482 | blink tr0, r63 | 1506 | blink tr0, r63 |
1483 | 1507 | ||
1508 | #ifdef CONFIG_MMU | ||
1484 | /* | 1509 | /* |
1485 | * --- User Access Handling Section | 1510 | * --- User Access Handling Section |
1486 | */ | 1511 | */ |
@@ -1604,6 +1629,7 @@ ___clear_user_exit: | |||
1604 | ptabs LINK, tr0 | 1629 | ptabs LINK, tr0 |
1605 | blink tr0, ZERO | 1630 | blink tr0, ZERO |
1606 | 1631 | ||
1632 | #endif /* CONFIG_MMU */ | ||
1607 | 1633 | ||
1608 | /* | 1634 | /* |
1609 | * int __strncpy_from_user(unsigned long __dest, unsigned long __src, | 1635 | * int __strncpy_from_user(unsigned long __dest, unsigned long __src, |
@@ -2014,9 +2040,11 @@ sa_default_restorer: | |||
2014 | .global asm_uaccess_start /* Just a marker */ | 2040 | .global asm_uaccess_start /* Just a marker */ |
2015 | asm_uaccess_start: | 2041 | asm_uaccess_start: |
2016 | 2042 | ||
2043 | #ifdef CONFIG_MMU | ||
2017 | .long ___copy_user1, ___copy_user_exit | 2044 | .long ___copy_user1, ___copy_user_exit |
2018 | .long ___copy_user2, ___copy_user_exit | 2045 | .long ___copy_user2, ___copy_user_exit |
2019 | .long ___clear_user1, ___clear_user_exit | 2046 | .long ___clear_user1, ___clear_user_exit |
2047 | #endif | ||
2020 | .long ___strncpy_from_user1, ___strncpy_from_user_exit | 2048 | .long ___strncpy_from_user1, ___strncpy_from_user_exit |
2021 | .long ___strnlen_user1, ___strnlen_user_exit | 2049 | .long ___strnlen_user1, ___strnlen_user_exit |
2022 | .long ___get_user_asm_b1, ___get_user_asm_b_exit | 2050 | .long ___get_user_asm_b1, ___get_user_asm_b_exit |
diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64 index cbd6aa33c5ac..0d92a8a3ac9a 100644 --- a/arch/sh/mm/Makefile_64 +++ b/arch/sh/mm/Makefile_64 | |||
@@ -2,10 +2,11 @@ | |||
2 | # Makefile for the Linux SuperH-specific parts of the memory manager. | 2 | # Makefile for the Linux SuperH-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o extable_64.o consistent.o | 5 | obj-y := init.o consistent.o |
6 | 6 | ||
7 | mmu-y := tlb-nommu.o pg-nommu.o | 7 | mmu-y := tlb-nommu.o pg-nommu.o extable_32.o |
8 | mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o | 8 | mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \ |
9 | extable_64.o | ||
9 | 10 | ||
10 | ifndef CONFIG_CACHE_OFF | 11 | ifndef CONFIG_CACHE_OFF |
11 | obj-y += cache-sh5.o | 12 | obj-y += cache-sh5.o |
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 3877321fcede..9e277ec7d536 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -714,6 +714,7 @@ void flush_cache_sigtramp(unsigned long vaddr) | |||
714 | sh64_icache_inv_current_user_range(vaddr, end); | 714 | sh64_icache_inv_current_user_range(vaddr, end); |
715 | } | 715 | } |
716 | 716 | ||
717 | #ifdef CONFIG_MMU | ||
717 | /* | 718 | /* |
718 | * These *MUST* lie in an area of virtual address space that's otherwise | 719 | * These *MUST* lie in an area of virtual address space that's otherwise |
719 | * unused. | 720 | * unused. |
@@ -830,3 +831,4 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
830 | else | 831 | else |
831 | sh64_clear_user_page_coloured(to, address); | 832 | sh64_clear_user_page_coloured(to, address); |
832 | } | 833 | } |
834 | #endif | ||