aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/Kconfig21
-rw-r--r--arch/xtensa/Makefile25
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S3
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S37
-rw-r--r--arch/xtensa/configs/iss_defconfig6
-rw-r--r--arch/xtensa/kernel/align.S42
-rw-r--r--arch/xtensa/kernel/coprocessor.S2
-rw-r--r--arch/xtensa/kernel/entry.S28
-rw-r--r--arch/xtensa/kernel/head.S53
-rw-r--r--arch/xtensa/kernel/pci-dma.c44
-rw-r--r--arch/xtensa/kernel/process.c37
-rw-r--r--arch/xtensa/kernel/ptrace.c2
-rw-r--r--arch/xtensa/kernel/setup.c41
-rw-r--r--arch/xtensa/kernel/signal.c16
-rw-r--r--arch/xtensa/kernel/syscalls.c4
-rw-r--r--arch/xtensa/kernel/traps.c56
-rw-r--r--arch/xtensa/kernel/vectors.S12
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S27
-rw-r--r--arch/xtensa/lib/checksum.S3
-rw-r--r--arch/xtensa/lib/memcopy.S2
-rw-r--r--arch/xtensa/lib/memset.S2
-rw-r--r--arch/xtensa/lib/strncpy_user.S2
-rw-r--r--arch/xtensa/lib/strnlen_user.S2
-rw-r--r--arch/xtensa/lib/usercopy.S2
-rw-r--r--arch/xtensa/mm/fault.c10
-rw-r--r--arch/xtensa/mm/init.c6
-rw-r--r--arch/xtensa/mm/misc.S265
-rw-r--r--arch/xtensa/mm/tlb.c445
-rw-r--r--arch/xtensa/platform-iss/console.c8
-rw-r--r--arch/xtensa/platform-iss/network.c2
30 files changed, 290 insertions, 915 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 9eccfbd1b536..2e74cb0b7807 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -48,25 +48,10 @@ menu "Processor type and features"
48 48
49choice 49choice
50 prompt "Xtensa Processor Configuration" 50 prompt "Xtensa Processor Configuration"
51 default XTENSA_CPU_LINUX_BE 51 default XTENSA_VARIANT_FSF
52 52
53config XTENSA_CPU_LINUX_BE 53config XTENSA_VARIANT_FSF
54 bool "linux_be" 54 bool "fsf"
55 ---help---
56 The linux_be processor configuration is the baseline Xtensa
57 configurations included in this kernel and also used by
58 binutils, gcc, and gdb. It contains no TIE, no coprocessors,
59 and the following configuration options:
60
61 Code Density Option 2 Misc Special Registers
62 NSA/NSAU Instructions 128-bit Data Bus Width
63 Processor ID 8K, 2-way I and D Caches
64 Zero-Overhead Loops 2 Inst Address Break Registers
65 Big Endian 2 Data Address Break Registers
66 64 General-Purpose Registers JTAG Interface and Trace Port
67 17 Interrupts MMU w/ TLBs and Autorefill
68 3 Interrupt Levels 8 Autorefill Ways (I/D TLBs)
69 3 Timers Unaligned Exceptions
70endchoice 55endchoice
71 56
72config MMU 57config MMU
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 3a3a4c66ef87..95f836db38fa 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -11,13 +11,13 @@
11# this architecture 11# this architecture
12 12
13# Core configuration. 13# Core configuration.
14# (Use CPU=<xtensa_config> to use another default compiler.) 14# (Use VAR=<xtensa_config> to use another default compiler.)
15 15
16cpu-$(CONFIG_XTENSA_CPU_LINUX_BE) := linux_be 16variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf
17cpu-$(CONFIG_XTENSA_CPU_LINUX_CUSTOM) := linux_custom 17variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom
18 18
19CPU = $(cpu-y) 19VARIANT = $(variant-y)
20export CPU 20export VARIANT
21 21
22# Platform configuration 22# Platform configuration
23 23
@@ -27,8 +27,6 @@ platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
27PLATFORM = $(platform-y) 27PLATFORM = $(platform-y)
28export PLATFORM 28export PLATFORM
29 29
30CPPFLAGS += $(if $(KBUILD_SRC),-I$(srctree)/include/asm-xtensa/)
31CPPFLAGS += -Iinclude/asm
32CFLAGS += -pipe -mlongcalls 30CFLAGS += -pipe -mlongcalls
33 31
34KBUILD_DEFCONFIG := iss_defconfig 32KBUILD_DEFCONFIG := iss_defconfig
@@ -41,12 +39,12 @@ core-$(CONFIG_EMBEDDED_RAMDISK) += arch/xtensa/boot/ramdisk/
41 39
42# Test for cross compiling 40# Test for cross compiling
43 41
44ifneq ($(CPU),) 42ifneq ($(VARIANT),)
45 COMPILE_ARCH = $(shell uname -m) 43 COMPILE_ARCH = $(shell uname -m)
46 44
47 ifneq ($(COMPILE_ARCH), xtensa) 45 ifneq ($(COMPILE_ARCH), xtensa)
48 ifndef CROSS_COMPILE 46 ifndef CROSS_COMPILE
49 CROSS_COMPILE = xtensa_$(CPU)- 47 CROSS_COMPILE = xtensa_$(VARIANT)-
50 endif 48 endif
51 endif 49 endif
52endif 50endif
@@ -68,14 +66,13 @@ archinc := include/asm-xtensa
68 66
69archprepare: $(archinc)/.platform 67archprepare: $(archinc)/.platform
70 68
71# Update machine cpu and platform symlinks if something which affects 69# Update processor variant and platform symlinks if something which affects
72# them changed. 70# them changed.
73 71
74$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf 72$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf
75 @echo ' SYMLINK $(archinc)/xtensa/config -> $(archinc)/xtensa/config-$(CPU)' 73 @echo ' SYMLINK $(archinc)/variant -> $(archinc)/variant-$(VARIANT)'
76 $(Q)mkdir -p $(archinc) 74 $(Q)mkdir -p $(archinc)
77 $(Q)mkdir -p $(archinc)/xtensa 75 $(Q)ln -fsn $(srctree)/$(archinc)/variant-$(VARIANT) $(archinc)/variant
78 $(Q)ln -fsn $(srctree)/$(archinc)/xtensa/config-$(CPU) $(archinc)/xtensa/config
79 @echo ' SYMLINK $(archinc)/platform -> $(archinc)/platform-$(PLATFORM)' 76 @echo ' SYMLINK $(archinc)/platform -> $(archinc)/platform-$(PLATFORM)'
80 $(Q)ln -fsn $(srctree)/$(archinc)/platform-$(PLATFORM) $(archinc)/platform 77 $(Q)ln -fsn $(srctree)/$(archinc)/platform-$(PLATFORM) $(archinc)/platform
81 @touch $@ 78 @touch $@
@@ -89,7 +86,7 @@ zImage zImage.initrd: vmlinux
89 $(Q)$(MAKE) $(build)=$(boot) $@ 86 $(Q)$(MAKE) $(build)=$(boot) $@
90 87
91CLEAN_FILES += arch/xtensa/vmlinux.lds \ 88CLEAN_FILES += arch/xtensa/vmlinux.lds \
92 $(archinc)/platform $(archinc)/xtensa/config \ 89 $(archinc)/platform $(archinc)/variant \
93 $(archinc)/.platform 90 $(archinc)/.platform
94 91
95define archhelp 92define archhelp
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index f857fc760aa8..464298bc348b 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -1,7 +1,4 @@
1 1
2#include <xtensa/config/specreg.h>
3#include <xtensa/config/core.h>
4
5#include <asm/bootparam.h> 2#include <asm/bootparam.h>
6 3
7 4
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
index ee636b0da81c..84848123e2a8 100644
--- a/arch/xtensa/boot/boot-redboot/bootstrap.S
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -1,9 +1,7 @@
1 1#include <asm/variant/core.h>
2#define _ASMLANGUAGE 2#include <asm/regs.h>
3#include <xtensa/config/specreg.h> 3#include <asm/asmmacro.h>
4#include <xtensa/config/core.h> 4#include <asm/cacheasm.h>
5#include <xtensa/cacheasm.h>
6
7 /* 5 /*
8 * RB-Data: RedBoot data/bss 6 * RB-Data: RedBoot data/bss
9 * P: Boot-Parameters 7 * P: Boot-Parameters
@@ -77,8 +75,14 @@ _start:
77 /* Note: The assembler cannot relax "addi a0, a0, ..." to an 75 /* Note: The assembler cannot relax "addi a0, a0, ..." to an
78 l32r, so we load to a4 first. */ 76 l32r, so we load to a4 first. */
79 77
80 addi a4, a0, __start - __start_a0 78 # addi a4, a0, __start - __start_a0
81 mov a0, a4 79 # mov a0, a4
80
81 movi a4, __start
82 movi a5, __start_a0
83 add a4, a0, a4
84 sub a0, a4, a5
85
82 movi a4, __start 86 movi a4, __start
83 movi a5, __reloc_end 87 movi a5, __reloc_end
84 88
@@ -106,9 +110,13 @@ _start:
106 /* We have to flush and invalidate the caches here before we jump. */ 110 /* We have to flush and invalidate the caches here before we jump. */
107 111
108#if XCHAL_DCACHE_IS_WRITEBACK 112#if XCHAL_DCACHE_IS_WRITEBACK
109 dcache_writeback_all a5, a6 113
114 ___flush_dcache_all a5 a6
115
110#endif 116#endif
111 icache_invalidate_all a5, a6 117
118 ___invalidate_icache_all a5 a6
119 isync
112 120
113 movi a11, _reloc 121 movi a11, _reloc
114 jx a11 122 jx a11
@@ -209,9 +217,14 @@ _reloc:
209 /* jump to the kernel */ 217 /* jump to the kernel */
2102: 2182:
211#if XCHAL_DCACHE_IS_WRITEBACK 219#if XCHAL_DCACHE_IS_WRITEBACK
212 dcache_writeback_all a5, a6 220
221 ___flush_dcache_all a5 a6
222
213#endif 223#endif
214 icache_invalidate_all a5, a6 224
225 ___invalidate_icache_all a5 a6
226
227 isync
215 228
216 movi a5, __start 229 movi a5, __start
217 movi a3, boot_initrd_start 230 movi a3, boot_initrd_start
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index 802621dd4867..f19854035e61 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -53,11 +53,7 @@ CONFIG_CC_ALIGN_JUMPS=0
53# 53#
54# Processor type and features 54# Processor type and features
55# 55#
56CONFIG_XTENSA_ARCH_LINUX_BE=y 56CONFIG_XTENSA_VARIANT_FSF=y
57# CONFIG_XTENSA_ARCH_LINUX_LE is not set
58# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
59# CONFIG_XTENSA_ARCH_S5 is not set
60# CONFIG_XTENSA_CUSTOM is not set
61CONFIG_MMU=y 57CONFIG_MMU=y
62# CONFIG_XTENSA_UNALIGNED_USER is not set 58# CONFIG_XTENSA_UNALIGNED_USER is not set
63# CONFIG_PREEMPT is not set 59# CONFIG_PREEMPT is not set
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index a4956578a24d..33d6e9d2e83c 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -16,14 +16,9 @@
16 */ 16 */
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <asm/ptrace.h>
20#include <asm/ptrace.h>
21#include <asm/current.h> 19#include <asm/current.h>
22#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
23#include <asm/pgtable.h>
24#include <asm/processor.h> 21#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/thread_info.h>
27 22
28#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 23#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
29 24
@@ -216,7 +211,7 @@ ENTRY(fast_unaligned)
216 211
217 extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble 212 extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
218 213
219#if XCHAL_HAVE_NARROW 214#if XCHAL_HAVE_DENSITY
220 _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump 215 _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
221 addi a6, a5, -OP0_S32I_N 216 addi a6, a5, -OP0_S32I_N
222 _beqz a6, .Lstore # S32I.N, do a store 217 _beqz a6, .Lstore # S32I.N, do a store
@@ -251,7 +246,7 @@ ENTRY(fast_unaligned)
251#endif 246#endif
252 __src_b a3, a5, a6 # a3 has the data word 247 __src_b a3, a5, a6 # a3 has the data word
253 248
254#if XCHAL_HAVE_NARROW 249#if XCHAL_HAVE_DENSITY
255 addi a7, a7, 2 # increment PC (assume 16-bit insn) 250 addi a7, a7, 2 # increment PC (assume 16-bit insn)
256 251
257 extui a5, a4, INSN_OP0, 4 252 extui a5, a4, INSN_OP0, 4
@@ -279,14 +274,14 @@ ENTRY(fast_unaligned)
279 274
2801: 2751:
281 276
282#if XCHAL_HAVE_LOOP 277#if XCHAL_HAVE_LOOPS
283 rsr a3, LEND # check if we reached LEND 278 rsr a5, LEND # check if we reached LEND
284 bne a7, a3, 1f 279 bne a7, a5, 1f
285 rsr a3, LCOUNT # and LCOUNT != 0 280 rsr a5, LCOUNT # and LCOUNT != 0
286 beqz a3, 1f 281 beqz a5, 1f
287 addi a3, a3, -1 # decrement LCOUNT and set 282 addi a5, a5, -1 # decrement LCOUNT and set
288 rsr a7, LBEG # set PC to LBEGIN 283 rsr a7, LBEG # set PC to LBEGIN
289 wsr a3, LCOUNT 284 wsr a5, LCOUNT
290#endif 285#endif
291 286
2921: wsr a7, EPC_1 # skip load instruction 2871: wsr a7, EPC_1 # skip load instruction
@@ -336,7 +331,7 @@ ENTRY(fast_unaligned)
336 331
337 movi a6, 0 # mask: ffffffff:00000000 332 movi a6, 0 # mask: ffffffff:00000000
338 333
339#if XCHAL_HAVE_NARROW 334#if XCHAL_HAVE_DENSITY
340 addi a7, a7, 2 # incr. PC,assume 16-bit instruction 335 addi a7, a7, 2 # incr. PC,assume 16-bit instruction
341 336
342 extui a5, a4, INSN_OP0, 4 # extract OP0 337 extui a5, a4, INSN_OP0, 4 # extract OP0
@@ -359,14 +354,14 @@ ENTRY(fast_unaligned)
359 /* Get memory address */ 354 /* Get memory address */
360 355
3611: 3561:
362#if XCHAL_HAVE_LOOP 357#if XCHAL_HAVE_LOOPS
363 rsr a3, LEND # check if we reached LEND 358 rsr a4, LEND # check if we reached LEND
364 bne a7, a3, 1f 359 bne a7, a4, 1f
365 rsr a3, LCOUNT # and LCOUNT != 0 360 rsr a4, LCOUNT # and LCOUNT != 0
366 beqz a3, 1f 361 beqz a4, 1f
367 addi a3, a3, -1 # decrement LCOUNT and set 362 addi a4, a4, -1 # decrement LCOUNT and set
368 rsr a7, LBEG # set PC to LBEGIN 363 rsr a7, LBEG # set PC to LBEGIN
369 wsr a3, LCOUNT 364 wsr a4, LCOUNT
370#endif 365#endif
371 366
3721: wsr a7, EPC_1 # skip store instruction 3671: wsr a7, EPC_1 # skip store instruction
@@ -416,6 +411,7 @@ ENTRY(fast_unaligned)
416 411
417 /* Restore working register */ 412 /* Restore working register */
418 413
414 l32i a8, a2, PT_AREG8
419 l32i a7, a2, PT_AREG7 415 l32i a7, a2, PT_AREG7
420 l32i a6, a2, PT_AREG6 416 l32i a6, a2, PT_AREG6
421 l32i a5, a2, PT_AREG5 417 l32i a5, a2, PT_AREG5
@@ -446,7 +442,7 @@ ENTRY(fast_unaligned)
446 mov a1, a2 442 mov a1, a2
447 443
448 rsr a0, PS 444 rsr a0, PS
449 bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode 445 bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
450 446
451 movi a0, _kernel_exception 447 movi a0, _kernel_exception
452 jx a0 448 jx a0
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index cf5a93fb6a2e..01bcb9fcfcbd 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -90,7 +90,6 @@ ENTRY(enable_coprocessor)
90 rsync 90 rsync
91 retw 91 retw
92 92
93#endif
94 93
95ENTRY(save_coprocessor_extra) 94ENTRY(save_coprocessor_extra)
96 entry sp, 16 95 entry sp, 16
@@ -197,4 +196,5 @@ _xtensa_reginfo_tables:
197 XCHAL_CP7_SA_CONTENTS_LIBDB 196 XCHAL_CP7_SA_CONTENTS_LIBDB
198 .word 0xFC000000 /* invalid register number,marks end of table*/ 197 .word 0xFC000000 /* invalid register number,marks end of table*/
199_xtensa_reginfo_table_end: 198_xtensa_reginfo_table_end:
199#endif
200 200
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 89e409e9e0de..c0b56b17927f 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -24,7 +24,7 @@
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/signal.h> 26#include <asm/signal.h>
27#include <xtensa/coreasm.h> 27#include <asm/tlbflush.h>
28 28
29/* Unimplemented features. */ 29/* Unimplemented features. */
30 30
@@ -364,7 +364,7 @@ common_exception:
364 movi a2, 1 364 movi a2, 1
365 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] 365 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
366 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception 366 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
367 movi a2, PS_WOE_MASK 367 movi a2, 1 << PS_WOE_BIT
368 or a3, a3, a2 368 or a3, a3, a2
369 rsr a0, EXCCAUSE 369 rsr a0, EXCCAUSE
370 xsr a3, PS 370 xsr a3, PS
@@ -399,7 +399,7 @@ common_exception_return:
399 /* Jump if we are returning from kernel exceptions. */ 399 /* Jump if we are returning from kernel exceptions. */
400 400
4011: l32i a3, a1, PT_PS 4011: l32i a3, a1, PT_PS
402 _bbsi.l a3, PS_UM_SHIFT, 2f 402 _bbsi.l a3, PS_UM_BIT, 2f
403 j kernel_exception_exit 403 j kernel_exception_exit
404 404
405 /* Specific to a user exception exit: 405 /* Specific to a user exception exit:
@@ -422,7 +422,7 @@ common_exception_return:
422 * (Hint: There is only one user exception frame on stack) 422 * (Hint: There is only one user exception frame on stack)
423 */ 423 */
424 424
425 movi a3, PS_WOE_MASK 425 movi a3, 1 << PS_WOE_BIT
426 426
427 _bbsi.l a4, TIF_NEED_RESCHED, 3f 427 _bbsi.l a4, TIF_NEED_RESCHED, 3f
428 _bbci.l a4, TIF_SIGPENDING, 4f 428 _bbci.l a4, TIF_SIGPENDING, 4f
@@ -694,7 +694,7 @@ common_exception_exit:
694ENTRY(debug_exception) 694ENTRY(debug_exception)
695 695
696 rsr a0, EPS + XCHAL_DEBUGLEVEL 696 rsr a0, EPS + XCHAL_DEBUGLEVEL
697 bbsi.l a0, PS_EXCM_SHIFT, 1f # exception mode 697 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
698 698
699 /* Set EPC_1 and EXCCAUSE */ 699 /* Set EPC_1 and EXCCAUSE */
700 700
@@ -707,7 +707,7 @@ ENTRY(debug_exception)
707 707
708 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 708 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
709 709
710 movi a2, 1 << PS_EXCM_SHIFT 710 movi a2, 1 << PS_EXCM_BIT
711 or a2, a0, a2 711 or a2, a0, a2
712 movi a0, debug_exception # restore a3, debug jump vector 712 movi a0, debug_exception # restore a3, debug jump vector
713 wsr a2, PS 713 wsr a2, PS
@@ -715,7 +715,7 @@ ENTRY(debug_exception)
715 715
716 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 716 /* Switch to kernel/user stack, restore jump vector, and save a0 */
717 717
718 bbsi.l a2, PS_UM_SHIFT, 2f # jump if user mode 718 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
719 719
720 addi a2, a1, -16-PT_SIZE # assume kernel stack 720 addi a2, a1, -16-PT_SIZE # assume kernel stack
721 s32i a0, a2, PT_AREG0 721 s32i a0, a2, PT_AREG0
@@ -778,7 +778,7 @@ ENTRY(unrecoverable_exception)
778 wsr a1, WINDOWBASE 778 wsr a1, WINDOWBASE
779 rsync 779 rsync
780 780
781 movi a1, PS_WOE_MASK | 1 781 movi a1, (1 << PS_WOE_BIT) | 1
782 wsr a1, PS 782 wsr a1, PS
783 rsync 783 rsync
784 784
@@ -1491,7 +1491,7 @@ ENTRY(_spill_registers)
1491 */ 1491 */
1492 1492
1493 rsr a0, PS 1493 rsr a0, PS
1494 _bbci.l a0, PS_UM_SHIFT, 1f 1494 _bbci.l a0, PS_UM_BIT, 1f
1495 1495
1496 /* User space: Setup a dummy frame and kill application. 1496 /* User space: Setup a dummy frame and kill application.
1497 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1497 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
@@ -1510,7 +1510,7 @@ ENTRY(_spill_registers)
1510 l32i a1, a3, EXC_TABLE_KSTK 1510 l32i a1, a3, EXC_TABLE_KSTK
1511 wsr a3, EXCSAVE_1 1511 wsr a3, EXCSAVE_1
1512 1512
1513 movi a4, PS_WOE_MASK | 1 1513 movi a4, (1 << PS_WOE_BIT) | 1
1514 wsr a4, PS 1514 wsr a4, PS
1515 rsync 1515 rsync
1516 1516
@@ -1612,7 +1612,7 @@ ENTRY(fast_second_level_miss)
1612 rsr a1, PTEVADDR 1612 rsr a1, PTEVADDR
1613 srli a1, a1, PAGE_SHIFT 1613 srli a1, a1, PAGE_SHIFT
1614 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1614 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
1615 addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number 1615 addi a1, a1, DTLB_WAY_PGD # ... + way_number
1616 1616
1617 wdtlb a0, a1 1617 wdtlb a0, a1
1618 dsync 1618 dsync
@@ -1654,7 +1654,7 @@ ENTRY(fast_second_level_miss)
1654 mov a1, a2 1654 mov a1, a2
1655 1655
1656 rsr a2, PS 1656 rsr a2, PS
1657 bbsi.l a2, PS_UM_SHIFT, 1f 1657 bbsi.l a2, PS_UM_BIT, 1f
1658 j _kernel_exception 1658 j _kernel_exception
16591: j _user_exception 16591: j _user_exception
1660 1660
@@ -1753,7 +1753,7 @@ ENTRY(fast_store_prohibited)
1753 mov a1, a2 1753 mov a1, a2
1754 1754
1755 rsr a2, PS 1755 rsr a2, PS
1756 bbsi.l a2, PS_UM_SHIFT, 1f 1756 bbsi.l a2, PS_UM_BIT, 1f
1757 j _kernel_exception 1757 j _kernel_exception
17581: j _user_exception 17581: j _user_exception
1759 1759
@@ -1924,7 +1924,7 @@ ENTRY(_switch_to)
1924 1924
1925 /* Disable ints while we manipulate the stack pointer; spill regs. */ 1925 /* Disable ints while we manipulate the stack pointer; spill regs. */
1926 1926
1927 movi a5, PS_EXCM_MASK | LOCKLEVEL 1927 movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL
1928 xsr a5, PS 1928 xsr a5, PS
1929 rsr a3, EXCSAVE_1 1929 rsr a3, EXCSAVE_1
1930 rsync 1930 rsync
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index c07cb2522993..ea89910efa44 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -15,9 +15,9 @@
15 * Kevin Chea 15 * Kevin Chea
16 */ 16 */
17 17
18#include <xtensa/cacheasm.h>
19#include <asm/processor.h> 18#include <asm/processor.h>
20#include <asm/page.h> 19#include <asm/page.h>
20#include <asm/cacheasm.h>
21 21
22/* 22/*
23 * This module contains the entry code for kernel images. It performs the 23 * This module contains the entry code for kernel images. It performs the
@@ -32,13 +32,6 @@
32 * 32 *
33 */ 33 */
34 34
35 .macro iterate from, to , cmd
36 .ifeq ((\to - \from) & ~0xfff)
37 \cmd \from
38 iterate "(\from+1)", \to, \cmd
39 .endif
40 .endm
41
42/* 35/*
43 * _start 36 * _start
44 * 37 *
@@ -64,7 +57,7 @@ _startup:
64 57
65 /* Disable interrupts and exceptions. */ 58 /* Disable interrupts and exceptions. */
66 59
67 movi a0, XCHAL_PS_EXCM_MASK 60 movi a0, LOCKLEVEL
68 wsr a0, PS 61 wsr a0, PS
69 62
70 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ 63 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
@@ -91,11 +84,11 @@ _startup:
91 movi a1, 15 84 movi a1, 15
92 wsr a0, ICOUNTLEVEL 85 wsr a0, ICOUNTLEVEL
93 86
94 .macro reset_dbreak num 87 .set _index, 0
95 wsr a0, DBREAKC + \num 88 .rept XCHAL_NUM_DBREAK - 1
96 .endm 89 wsr a0, DBREAKC + _index
97 90 .set _index, _index + 1
98 iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak 91 .endr
99#endif 92#endif
100 93
101 /* Clear CCOUNT (not really necessary, but nice) */ 94 /* Clear CCOUNT (not really necessary, but nice) */
@@ -110,10 +103,11 @@ _startup:
110 103
111 /* Disable all timers. */ 104 /* Disable all timers. */
112 105
113 .macro reset_timer num 106 .set _index, 0
114 wsr a0, CCOMPARE_0 + \num 107 .rept XCHAL_NUM_TIMERS - 1
115 .endm 108 wsr a0, CCOMPARE + _index
116 iterate 0, XCHAL_NUM_TIMERS-1, reset_timer 109 .set _index, _index + 1
110 .endr
117 111
118 /* Interrupt initialization. */ 112 /* Interrupt initialization. */
119 113
@@ -139,12 +133,21 @@ _startup:
139 rsync 133 rsync
140 134
141 /* Initialize the caches. 135 /* Initialize the caches.
142 * Does not include flushing writeback d-cache. 136 * a2, a3 are just working registers (clobbered).
143 * a6, a7 are just working registers (clobbered).
144 */ 137 */
145 138
146 icache_reset a2, a3 139#if XCHAL_DCACHE_LINE_LOCKABLE
147 dcache_reset a2, a3 140 ___unlock_dcache_all a2 a3
141#endif
142
143#if XCHAL_ICACHE_LINE_LOCKABLE
144 ___unlock_icache_all a2 a3
145#endif
146
147 ___invalidate_dcache_all a2 a3
148 ___invalidate_icache_all a2 a3
149
150 isync
148 151
149 /* Unpack data sections 152 /* Unpack data sections
150 * 153 *
@@ -181,9 +184,9 @@ _startup:
181 movi a2, _bss_start # start of BSS 184 movi a2, _bss_start # start of BSS
182 movi a3, _bss_end # end of BSS 185 movi a3, _bss_end # end of BSS
183 186
1841: addi a2, a2, 4 187 __loopt a2, a3, a4, 2
185 s32i a0, a2, 0 188 s32i a0, a2, 0
186 blt a2, a3, 1b 189 __endla a2, a4, 4
187 190
188#if XCHAL_DCACHE_IS_WRITEBACK 191#if XCHAL_DCACHE_IS_WRITEBACK
189 192
@@ -191,7 +194,7 @@ _startup:
191 * instructions/data are available. 194 * instructions/data are available.
192 */ 195 */
193 196
194 dcache_writeback_all a2, a3 197 ___flush_dcache_all a2 a3
195#endif 198#endif
196 199
197 /* Setup stack and enable window exceptions (keep irqs disabled) */ 200 /* Setup stack and enable window exceptions (keep irqs disabled) */
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 6648fa9d9192..ca76f071666e 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/xtensa/kernel/pci-dma.c 2 * arch/xtensa/pci-dma.c
3 * 3 *
4 * DMA coherent memory allocation. 4 * DMA coherent memory allocation.
5 * 5 *
@@ -29,28 +29,48 @@
29 */ 29 */
30 30
31void * 31void *
32dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 32dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
33{ 33{
34 void *ret; 34 unsigned long ret;
35 unsigned long uncached = 0;
35 36
36 /* ignore region speicifiers */ 37 /* ignore region speicifiers */
37 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
38 38
39 if (dev == NULL || (*dev->dma_mask < 0xffffffff)) 39 flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
40 gfp |= GFP_DMA;
41 ret = (void *)__get_free_pages(gfp, get_order(size));
42 40
43 if (ret != NULL) { 41 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
44 memset(ret, 0, size); 42 flag |= GFP_DMA;
45 *handle = virt_to_bus(ret); 43 ret = (unsigned long)__get_free_pages(flag, get_order(size));
44
45 if (ret == 0)
46 return NULL;
47
48 /* We currently don't support coherent memory outside KSEG */
49
50 if (ret < XCHAL_KSEG_CACHED_VADDR
51 || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
52 BUG();
53
54
55 if (ret != 0) {
56 memset((void*) ret, 0, size);
57 uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR;
58 *handle = virt_to_bus((void*)ret);
59 __flush_invalidate_dcache_range(ret, size);
46 } 60 }
47 return (void*) BYPASS_ADDR((unsigned long)ret); 61
62 return (void*)uncached;
48} 63}
49 64
50void dma_free_coherent(struct device *hwdev, size_t size, 65void dma_free_coherent(struct device *hwdev, size_t size,
51 void *vaddr, dma_addr_t dma_handle) 66 void *vaddr, dma_addr_t dma_handle)
52{ 67{
53 free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size)); 68 long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
69
70 if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
71 BUG();
72
73 free_pages(addr, get_order(size));
54} 74}
55 75
56 76
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index a7c4178c2a8c..3785f3481d71 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -1,4 +1,3 @@
1// TODO verify coprocessor handling
2/* 1/*
3 * arch/xtensa/kernel/process.c 2 * arch/xtensa/kernel/process.c
4 * 3 *
@@ -43,7 +42,7 @@
43#include <asm/irq.h> 42#include <asm/irq.h>
44#include <asm/atomic.h> 43#include <asm/atomic.h>
45#include <asm/asm-offsets.h> 44#include <asm/asm-offsets.h>
46#include <asm/coprocessor.h> 45#include <asm/regs.h>
47 46
48extern void ret_from_fork(void); 47extern void ret_from_fork(void);
49 48
@@ -67,25 +66,6 @@ void (*pm_power_off)(void) = NULL;
67EXPORT_SYMBOL(pm_power_off); 66EXPORT_SYMBOL(pm_power_off);
68 67
69 68
70#if XCHAL_CP_NUM > 0
71
72/*
73 * Coprocessor ownership.
74 */
75
76coprocessor_info_t coprocessor_info[] = {
77 { 0, XTENSA_CPE_CP0_OFFSET },
78 { 0, XTENSA_CPE_CP1_OFFSET },
79 { 0, XTENSA_CPE_CP2_OFFSET },
80 { 0, XTENSA_CPE_CP3_OFFSET },
81 { 0, XTENSA_CPE_CP4_OFFSET },
82 { 0, XTENSA_CPE_CP5_OFFSET },
83 { 0, XTENSA_CPE_CP6_OFFSET },
84 { 0, XTENSA_CPE_CP7_OFFSET },
85};
86
87#endif
88
89/* 69/*
90 * Powermanagement idle function, if any is provided by the platform. 70 * Powermanagement idle function, if any is provided by the platform.
91 */ 71 */
@@ -110,12 +90,10 @@ void cpu_idle(void)
110 90
111void exit_thread(void) 91void exit_thread(void)
112{ 92{
113 release_coprocessors(current); /* Empty macro if no CPs are defined */
114} 93}
115 94
116void flush_thread(void) 95void flush_thread(void)
117{ 96{
118 release_coprocessors(current); /* Empty macro if no CPs are defined */
119} 97}
120 98
121/* 99/*
@@ -275,7 +253,7 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
275 */ 253 */
276 254
277 elfregs->pc = regs->pc; 255 elfregs->pc = regs->pc;
278 elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK); 256 elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
279 elfregs->exccause = regs->exccause; 257 elfregs->exccause = regs->exccause;
280 elfregs->excvaddr = regs->excvaddr; 258 elfregs->excvaddr = regs->excvaddr;
281 elfregs->windowbase = regs->windowbase; 259 elfregs->windowbase = regs->windowbase;
@@ -325,7 +303,7 @@ void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
325 */ 303 */
326 304
327 regs->pc = elfregs->pc; 305 regs->pc = elfregs->pc;
328 regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK); 306 regs->ps = (elfregs->ps | (1 << PS_EXCM_BIT));
329 regs->exccause = elfregs->exccause; 307 regs->exccause = elfregs->exccause;
330 regs->excvaddr = elfregs->excvaddr; 308 regs->excvaddr = elfregs->excvaddr;
331 regs->windowbase = elfregs->windowbase; 309 regs->windowbase = elfregs->windowbase;
@@ -459,16 +437,7 @@ int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
459int 437int
460dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r) 438dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
461{ 439{
462/* see asm/coprocessor.h for this magic number 16 */
463#if XTENSA_CP_EXTRA_SIZE > 16
464 do_save_fpregs (r, regs, task);
465
466 /* For now, bit 16 means some extra state may be present: */
467// FIXME!! need to track to return more accurate mask
468 return 0x10000 | XCHAL_CP_MASK;
469#else
470 return 0; /* no coprocessors active on this processor */ 440 return 0; /* no coprocessors active on this processor */
471#endif
472} 441}
473 442
474/* 443/*
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 9aea23cc0dc5..604c3c3c6759 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -96,7 +96,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
96 /* Note: PS.EXCM is not set while user task is running; 96 /* Note: PS.EXCM is not set while user task is running;
97 * its being set in regs is for exception handling 97 * its being set in regs is for exception handling
98 * convenience. */ 98 * convenience. */
99 tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK); 99 tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
100 break; 100 break;
101 case REG_WB: 101 case REG_WB:
102 tmp = regs->windowbase; 102 tmp = regs->windowbase;
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index c99ab72b41b6..b6374c09de20 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -42,8 +42,6 @@
42#include <asm/page.h> 42#include <asm/page.h>
43#include <asm/setup.h> 43#include <asm/setup.h>
44 44
45#include <xtensa/config/system.h>
46
47#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 45#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
48struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16}; 46struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
49#endif 47#endif
@@ -336,7 +334,7 @@ c_show(struct seq_file *f, void *slot)
336 /* high-level stuff */ 334 /* high-level stuff */
337 seq_printf(f,"processor\t: 0\n" 335 seq_printf(f,"processor\t: 0\n"
338 "vendor_id\t: Tensilica\n" 336 "vendor_id\t: Tensilica\n"
339 "model\t\t: Xtensa " XCHAL_HW_RELEASE_NAME "\n" 337 "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
340 "core ID\t\t: " XCHAL_CORE_ID "\n" 338 "core ID\t\t: " XCHAL_CORE_ID "\n"
341 "build ID\t: 0x%x\n" 339 "build ID\t: 0x%x\n"
342 "byte order\t: %s\n" 340 "byte order\t: %s\n"
@@ -420,25 +418,6 @@ c_show(struct seq_file *f, void *slot)
420 XCHAL_NUM_TIMERS, 418 XCHAL_NUM_TIMERS,
421 XCHAL_DEBUGLEVEL); 419 XCHAL_DEBUGLEVEL);
422 420
423 /* Coprocessors */
424#if XCHAL_HAVE_CP
425 seq_printf(f, "coprocessors\t: %d\n", XCHAL_CP_NUM);
426#else
427 seq_printf(f, "coprocessors\t: none\n");
428#endif
429
430 /* {I,D}{RAM,ROM} and XLMI */
431 seq_printf(f,"inst ROMs\t: %d\n"
432 "inst RAMs\t: %d\n"
433 "data ROMs\t: %d\n"
434 "data RAMs\t: %d\n"
435 "XLMI ports\t: %d\n",
436 XCHAL_NUM_IROM,
437 XCHAL_NUM_IRAM,
438 XCHAL_NUM_DROM,
439 XCHAL_NUM_DRAM,
440 XCHAL_NUM_XLMI);
441
442 /* Cache */ 421 /* Cache */
443 seq_printf(f,"icache line size: %d\n" 422 seq_printf(f,"icache line size: %d\n"
444 "icache ways\t: %d\n" 423 "icache ways\t: %d\n"
@@ -466,24 +445,6 @@ c_show(struct seq_file *f, void *slot)
466 XCHAL_DCACHE_WAYS, 445 XCHAL_DCACHE_WAYS,
467 XCHAL_DCACHE_SIZE); 446 XCHAL_DCACHE_SIZE);
468 447
469 /* MMU */
470 seq_printf(f,"ASID bits\t: %d\n"
471 "ASID invalid\t: %d\n"
472 "ASID kernel\t: %d\n"
473 "rings\t\t: %d\n"
474 "itlb ways\t: %d\n"
475 "itlb AR ways\t: %d\n"
476 "dtlb ways\t: %d\n"
477 "dtlb AR ways\t: %d\n",
478 XCHAL_MMU_ASID_BITS,
479 XCHAL_MMU_ASID_INVALID,
480 XCHAL_MMU_ASID_KERNEL,
481 XCHAL_MMU_RINGS,
482 XCHAL_ITLB_WAYS,
483 XCHAL_ITLB_ARF_WAYS,
484 XCHAL_DTLB_WAYS,
485 XCHAL_DTLB_ARF_WAYS);
486
487 return 0; 448 return 0;
488} 449}
489 450
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index c494f0826fc5..6af7f4145990 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -12,8 +12,8 @@
12 * 12 *
13 */ 13 */
14 14
15#include <xtensa/config/core.h> 15#include <asm/variant/core.h>
16#include <xtensa/hal.h> 16#include <asm/coprocessor.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
@@ -216,8 +216,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
216 * handler, or the user mode value doesn't matter (e.g. PS.OWB). 216 * handler, or the user mode value doesn't matter (e.g. PS.OWB).
217 */ 217 */
218 err |= __get_user(ps, &sc->sc_ps); 218 err |= __get_user(ps, &sc->sc_ps);
219 regs->ps = (regs->ps & ~XCHAL_PS_CALLINC_MASK) 219 regs->ps = (regs->ps & ~PS_CALLINC_MASK)
220 | (ps & XCHAL_PS_CALLINC_MASK); 220 | (ps & PS_CALLINC_MASK);
221 221
222 /* Additional corruption checks */ 222 /* Additional corruption checks */
223 223
@@ -280,7 +280,7 @@ flush_my_cpstate(struct task_struct *tsk)
280static int 280static int
281save_cpextra (struct _cpstate *buf) 281save_cpextra (struct _cpstate *buf)
282{ 282{
283#if (XCHAL_EXTRA_SA_SIZE == 0) && (XCHAL_CP_NUM == 0) 283#if XCHAL_CP_NUM == 0
284 return 0; 284 return 0;
285#else 285#else
286 286
@@ -497,8 +497,10 @@ gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
497 497
498 /* Flush generated code out of the data cache */ 498 /* Flush generated code out of the data cache */
499 499
500 if (err == 0) 500 if (err == 0) {
501 __flush_invalidate_cache_range((unsigned long)codemem, 6UL); 501 __invalidate_icache_range((unsigned long)codemem, 6UL);
502 __flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
503 }
502 504
503 return err; 505 return err;
504} 506}
diff --git a/arch/xtensa/kernel/syscalls.c b/arch/xtensa/kernel/syscalls.c
index f49cb239e603..f9a5a752ca69 100644
--- a/arch/xtensa/kernel/syscalls.c
+++ b/arch/xtensa/kernel/syscalls.c
@@ -175,8 +175,8 @@ void system_call (struct pt_regs *regs)
175 * interrupts in the first place: 175 * interrupts in the first place:
176 */ 176 */
177 local_save_flags (ps); 177 local_save_flags (ps);
178 local_irq_restore((ps & ~XCHAL_PS_INTLEVEL_MASK) | 178 local_irq_restore((ps & ~PS_INTLEVEL_MASK) |
179 (regs->ps & XCHAL_PS_INTLEVEL_MASK) ); 179 (regs->ps & PS_INTLEVEL_MASK) );
180 180
181 if (syscallnr > __NR_Linux_syscalls) { 181 if (syscallnr > __NR_Linux_syscalls) {
182 regs->areg[2] = -ENOSYS; 182 regs->areg[2] = -ENOSYS;
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index ce077d6bf3a0..693ab268485e 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -75,7 +75,7 @@ extern void system_call (struct pt_regs*);
75#define USER 0x02 75#define USER 0x02
76 76
77#define COPROCESSOR(x) \ 77#define COPROCESSOR(x) \
78{ XCHAL_EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor } 78{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
79 79
80typedef struct { 80typedef struct {
81 int cause; 81 int cause;
@@ -85,38 +85,38 @@ typedef struct {
85 85
86dispatch_init_table_t __init dispatch_init_table[] = { 86dispatch_init_table_t __init dispatch_init_table[] = {
87 87
88{ XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction}, 88{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
89{ XCHAL_EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel }, 89{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
90{ XCHAL_EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user }, 90{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
91{ XCHAL_EXCCAUSE_SYSTEM_CALL, 0, system_call }, 91{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
92/* XCHAL_EXCCAUSE_INSTRUCTION_FETCH unhandled */ 92/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
93/* XCHAL_EXCCAUSE_LOAD_STORE_ERROR unhandled*/ 93/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
94{ XCHAL_EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt }, 94{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
95{ XCHAL_EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, 95{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
96/* XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */ 96/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
97/* XCHAL_EXCCAUSE_PRIVILEGED unhandled */ 97/* EXCCAUSE_PRIVILEGED unhandled */
98#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 98#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
99#ifdef CONFIG_UNALIGNED_USER 99#ifdef CONFIG_UNALIGNED_USER
100{ XCHAL_EXCCAUSE_UNALIGNED, USER, fast_unaligned }, 100{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
101#else 101#else
102{ XCHAL_EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, 102{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
103#endif 103#endif
104{ XCHAL_EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, 104{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
105#endif 105#endif
106{ XCHAL_EXCCAUSE_ITLB_MISS, 0, do_page_fault }, 106{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
107{ XCHAL_EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss}, 107{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
108{ XCHAL_EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit }, 108{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
109{ XCHAL_EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault }, 109{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
110/* XCHAL_EXCCAUSE_SIZE_RESTRICTION unhandled */ 110/* EXCCAUSE_SIZE_RESTRICTION unhandled */
111{ XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault }, 111{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
112{ XCHAL_EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss}, 112{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
113{ XCHAL_EXCCAUSE_DTLB_MISS, 0, do_page_fault }, 113{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
114{ XCHAL_EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit }, 114{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
115{ XCHAL_EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault }, 115{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
116/* XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */ 116/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
117{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited }, 117{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
118{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, 118{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
119{ XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, 119{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
120/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ 120/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
121#if (XCHAL_CP_MASK & 1) 121#if (XCHAL_CP_MASK & 1)
122COPROCESSOR(0), 122COPROCESSOR(0),
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 0e74397bfa2b..eb2d7bb69ee0 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -53,6 +53,8 @@
53#include <asm/thread_info.h> 53#include <asm/thread_info.h>
54#include <asm/processor.h> 54#include <asm/processor.h>
55 55
56#define WINDOW_VECTORS_SIZE 0x180
57
56 58
57/* 59/*
58 * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0) 60 * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
@@ -210,7 +212,7 @@ ENTRY(_DoubleExceptionVector)
210 /* Check for kernel double exception (usually fatal). */ 212 /* Check for kernel double exception (usually fatal). */
211 213
212 rsr a3, PS 214 rsr a3, PS
213 _bbci.l a3, PS_UM_SHIFT, .Lksp 215 _bbci.l a3, PS_UM_BIT, .Lksp
214 216
215 /* Check if we are currently handling a window exception. */ 217 /* Check if we are currently handling a window exception. */
216 /* Note: We don't need to indicate that we enter a critical section. */ 218 /* Note: We don't need to indicate that we enter a critical section. */
@@ -219,7 +221,7 @@ ENTRY(_DoubleExceptionVector)
219 221
220 movi a3, XCHAL_WINDOW_VECTORS_VADDR 222 movi a3, XCHAL_WINDOW_VECTORS_VADDR
221 _bltu a0, a3, .Lfixup 223 _bltu a0, a3, .Lfixup
222 addi a3, a3, XSHAL_WINDOW_VECTORS_SIZE 224 addi a3, a3, WINDOW_VECTORS_SIZE
223 _bgeu a0, a3, .Lfixup 225 _bgeu a0, a3, .Lfixup
224 226
225 /* Window overflow/underflow exception. Get stack pointer. */ 227 /* Window overflow/underflow exception. Get stack pointer. */
@@ -245,7 +247,7 @@ ENTRY(_DoubleExceptionVector)
245 247
246 wsr a2, DEPC # save stack pointer temporarily 248 wsr a2, DEPC # save stack pointer temporarily
247 rsr a0, PS 249 rsr a0, PS
248 extui a0, a0, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS 250 extui a0, a0, PS_OWB_SHIFT, 4
249 wsr a0, WINDOWBASE 251 wsr a0, WINDOWBASE
250 rsync 252 rsync
251 253
@@ -312,8 +314,8 @@ ENTRY(_DoubleExceptionVector)
312.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ 314.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
313 315
314 rsr a3, EXCCAUSE 316 rsr a3, EXCCAUSE
315 beqi a3, XCHAL_EXCCAUSE_ITLB_MISS, 1f 317 beqi a3, EXCCAUSE_ITLB_MISS, 1f
316 addi a3, a3, -XCHAL_EXCCAUSE_DTLB_MISS 318 addi a3, a3, -EXCCAUSE_DTLB_MISS
317 bnez a3, .Lunrecoverable 319 bnez a3, .Lunrecoverable
3181: movi a3, fast_second_level_miss_double_kernel 3201: movi a3, fast_second_level_miss_double_kernel
319 jx a3 321 jx a3
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index e01131fec69e..a36c104c3a52 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -16,20 +16,17 @@
16 16
17#include <asm-generic/vmlinux.lds.h> 17#include <asm-generic/vmlinux.lds.h>
18 18
19#define _NOCLANGUAGE 19#include <asm/variant/core.h>
20#undef __ASSEMBLER__
21#include <xtensa/config/core.h>
22#include <xtensa/config/system.h>
23OUTPUT_ARCH(xtensa) 20OUTPUT_ARCH(xtensa)
24ENTRY(_start) 21ENTRY(_start)
25 22
26#if XCHAL_MEMORY_ORDER == XTHAL_BIGENDIAN 23#ifdef __XTENSA_EB__
27jiffies = jiffies_64 + 4; 24jiffies = jiffies_64 + 4;
28#else 25#else
29jiffies = jiffies_64; 26jiffies = jiffies_64;
30#endif 27#endif
31 28
32#define KERNELOFFSET 0x1000 29#define KERNELOFFSET 0xd0001000
33 30
34/* Note: In the following macros, it would be nice to specify only the 31/* Note: In the following macros, it would be nice to specify only the
35 vector name and section kind and construct "sym" and "section" using 32 vector name and section kind and construct "sym" and "section" using
@@ -76,7 +73,7 @@ jiffies = jiffies_64;
76 73
77SECTIONS 74SECTIONS
78{ 75{
79 . = XCHAL_KSEG_CACHED_VADDR + KERNELOFFSET; 76 . = KERNELOFFSET;
80 /* .text section */ 77 /* .text section */
81 78
82 _text = .; 79 _text = .;
@@ -160,7 +157,7 @@ SECTIONS
160 157
161 /* Initialization code and data: */ 158 /* Initialization code and data: */
162 159
163 . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE); 160 . = ALIGN(1 << 12);
164 __init_begin = .; 161 __init_begin = .;
165 .init.text : { 162 .init.text : {
166 _sinittext = .; 163 _sinittext = .;
@@ -224,32 +221,32 @@ SECTIONS
224 .dummy) 221 .dummy)
225 SECTION_VECTOR (_DebugInterruptVector_literal, 222 SECTION_VECTOR (_DebugInterruptVector_literal,
226 .DebugInterruptVector.literal, 223 .DebugInterruptVector.literal,
227 XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL) - 4, 224 XCHAL_DEBUG_VECTOR_VADDR - 4,
228 SIZEOF(.WindowVectors.text), 225 SIZEOF(.WindowVectors.text),
229 .WindowVectors.text) 226 .WindowVectors.text)
230 SECTION_VECTOR (_DebugInterruptVector_text, 227 SECTION_VECTOR (_DebugInterruptVector_text,
231 .DebugInterruptVector.text, 228 .DebugInterruptVector.text,
232 XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL), 229 XCHAL_DEBUG_VECTOR_VADDR,
233 4, 230 4,
234 .DebugInterruptVector.literal) 231 .DebugInterruptVector.literal)
235 SECTION_VECTOR (_KernelExceptionVector_literal, 232 SECTION_VECTOR (_KernelExceptionVector_literal,
236 .KernelExceptionVector.literal, 233 .KernelExceptionVector.literal,
237 XCHAL_KERNELEXC_VECTOR_VADDR - 4, 234 XCHAL_KERNEL_VECTOR_VADDR - 4,
238 SIZEOF(.DebugInterruptVector.text), 235 SIZEOF(.DebugInterruptVector.text),
239 .DebugInterruptVector.text) 236 .DebugInterruptVector.text)
240 SECTION_VECTOR (_KernelExceptionVector_text, 237 SECTION_VECTOR (_KernelExceptionVector_text,
241 .KernelExceptionVector.text, 238 .KernelExceptionVector.text,
242 XCHAL_KERNELEXC_VECTOR_VADDR, 239 XCHAL_KERNEL_VECTOR_VADDR,
243 4, 240 4,
244 .KernelExceptionVector.literal) 241 .KernelExceptionVector.literal)
245 SECTION_VECTOR (_UserExceptionVector_literal, 242 SECTION_VECTOR (_UserExceptionVector_literal,
246 .UserExceptionVector.literal, 243 .UserExceptionVector.literal,
247 XCHAL_USEREXC_VECTOR_VADDR - 4, 244 XCHAL_USER_VECTOR_VADDR - 4,
248 SIZEOF(.KernelExceptionVector.text), 245 SIZEOF(.KernelExceptionVector.text),
249 .KernelExceptionVector.text) 246 .KernelExceptionVector.text)
250 SECTION_VECTOR (_UserExceptionVector_text, 247 SECTION_VECTOR (_UserExceptionVector_text,
251 .UserExceptionVector.text, 248 .UserExceptionVector.text,
252 XCHAL_USEREXC_VECTOR_VADDR, 249 XCHAL_USER_VECTOR_VADDR,
253 4, 250 4,
254 .UserExceptionVector.literal) 251 .UserExceptionVector.literal)
255 SECTION_VECTOR (_DoubleExceptionVector_literal, 252 SECTION_VECTOR (_DoubleExceptionVector_literal,
@@ -264,7 +261,7 @@ SECTIONS
264 .DoubleExceptionVector.literal) 261 .DoubleExceptionVector.literal)
265 262
266 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 263 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
267 . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE); 264 . = ALIGN(1 << 12);
268 265
269 __init_end = .; 266 __init_end = .;
270 267
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index e2d64dfd530c..9d9cd990afa6 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -16,8 +16,7 @@
16 16
17#include <asm/errno.h> 17#include <asm/errno.h>
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#define _ASMLANGUAGE 19#include <asm/variant/core.h>
20#include <xtensa/config/core.h>
21 20
22/* 21/*
23 * computes a partial checksum, e.g. for TCP/UDP fragments 22 * computes a partial checksum, e.g. for TCP/UDP fragments
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index e8f6d7eb7222..ddda8f4bc862 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -9,7 +9,7 @@
9 * Copyright (C) 2002 - 2005 Tensilica Inc. 9 * Copyright (C) 2002 - 2005 Tensilica Inc.
10 */ 10 */
11 11
12#include <xtensa/coreasm.h> 12#include <asm/variant/core.h>
13 13
14 .macro src_b r, w0, w1 14 .macro src_b r, w0, w1
15#ifdef __XTENSA_EB__ 15#ifdef __XTENSA_EB__
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
index 4de25134bc62..56a17495b2db 100644
--- a/arch/xtensa/lib/memset.S
+++ b/arch/xtensa/lib/memset.S
@@ -11,7 +11,7 @@
11 * Copyright (C) 2002 Tensilica Inc. 11 * Copyright (C) 2002 Tensilica Inc.
12 */ 12 */
13 13
14#include <xtensa/coreasm.h> 14#include <asm/variant/core.h>
15 15
16/* 16/*
17 * void *memset(void *dst, int c, size_t length) 17 * void *memset(void *dst, int c, size_t length)
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
index 71d55df43893..a834057bda6b 100644
--- a/arch/xtensa/lib/strncpy_user.S
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -11,7 +11,7 @@
11 * Copyright (C) 2002 Tensilica Inc. 11 * Copyright (C) 2002 Tensilica Inc.
12 */ 12 */
13 13
14#include <xtensa/coreasm.h> 14#include <asm/variant/core.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16 16
17/* Load or store instructions that may cause exceptions use the EX macro. */ 17/* Load or store instructions that may cause exceptions use the EX macro. */
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
index cdff4d670f3b..5e9c1e709b2e 100644
--- a/arch/xtensa/lib/strnlen_user.S
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -11,7 +11,7 @@
11 * Copyright (C) 2002 Tensilica Inc. 11 * Copyright (C) 2002 Tensilica Inc.
12 */ 12 */
13 13
14#include <xtensa/coreasm.h> 14#include <asm/variant/core.h>
15 15
16/* Load or store instructions that may cause exceptions use the EX macro. */ 16/* Load or store instructions that may cause exceptions use the EX macro. */
17 17
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index 4641ef510f0e..a8ab1d4fe0ae 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -53,7 +53,7 @@
53 * a11/ original length 53 * a11/ original length
54 */ 54 */
55 55
56#include <xtensa/coreasm.h> 56#include <asm/variant/core.h>
57 57
58#ifdef __XTENSA_EB__ 58#ifdef __XTENSA_EB__
59#define ALIGN(R, W0, W1) src R, W0, W1 59#define ALIGN(R, W0, W1) src R, W0, W1
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index dd0dbec2e57e..3dc6f2f07bbe 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -21,7 +21,7 @@
21#include <asm/system.h> 21#include <asm/system.h>
22#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
23 23
24unsigned long asid_cache = ASID_FIRST_VERSION; 24unsigned long asid_cache = ASID_USER_FIRST;
25void bad_page_fault(struct pt_regs*, unsigned long, int); 25void bad_page_fault(struct pt_regs*, unsigned long, int);
26 26
27/* 27/*
@@ -58,10 +58,10 @@ void do_page_fault(struct pt_regs *regs)
58 return; 58 return;
59 } 59 }
60 60
61 is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0; 61 is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
62 is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE || 62 is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
63 exccause == XCHAL_EXCCAUSE_ITLB_MISS || 63 exccause == EXCCAUSE_ITLB_MISS ||
64 exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; 64 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
65 65
66#if 0 66#if 0
67 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, 67 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 660ef058c149..e1ec2d1e8189 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -141,8 +141,8 @@ void __init bootmem_init(void)
141 if (min_low_pfn > max_pfn) 141 if (min_low_pfn > max_pfn)
142 panic("No memory found!\n"); 142 panic("No memory found!\n");
143 143
144 max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ? 144 max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ?
145 max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT; 145 max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
146 146
147 /* Find an area to use for the bootmem bitmap. */ 147 /* Find an area to use for the bootmem bitmap. */
148 148
@@ -215,7 +215,7 @@ void __init init_mmu (void)
215 215
216 /* Set rasid register to a known value. */ 216 /* Set rasid register to a known value. */
217 217
218 set_rasid_register (ASID_ALL_RESERVED); 218 set_rasid_register (ASID_USER_FIRST);
219 219
220 /* Set PTEVADDR special register to the start of the page 220 /* Set PTEVADDR special register to the start of the page
221 * table, which is in kernel mappable space (ie. not 221 * table, which is in kernel mappable space (ie. not
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index 327c0f17187c..ae085332c607 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -19,9 +19,8 @@
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/pgtable.h> 21#include <asm/pgtable.h>
22 22#include <asm/asmmacro.h>
23#include <xtensa/cacheasm.h> 23#include <asm/cacheasm.h>
24#include <xtensa/cacheattrasm.h>
25 24
26/* clear_page (page) */ 25/* clear_page (page) */
27 26
@@ -74,104 +73,66 @@ ENTRY(copy_page)
74 73
75 retw 74 retw
76 75
77
78/* 76/*
79 * void __flush_invalidate_cache_all(void) 77 * void __invalidate_icache_page(ulong start)
80 */ 78 */
81 79
82ENTRY(__flush_invalidate_cache_all) 80ENTRY(__invalidate_icache_page)
83 entry sp, 16 81 entry sp, 16
84 dcache_writeback_inv_all a2, a3
85 icache_invalidate_all a2, a3
86 retw
87 82
88/* 83 ___invalidate_icache_page a2 a3
89 * void __invalidate_icache_all(void) 84 isync
90 */
91 85
92ENTRY(__invalidate_icache_all)
93 entry sp, 16
94 icache_invalidate_all a2, a3
95 retw 86 retw
96 87
97/* 88/*
98 * void __flush_invalidate_dcache_all(void) 89 * void __invalidate_dcache_page(ulong start)
99 */ 90 */
100 91
101ENTRY(__flush_invalidate_dcache_all) 92ENTRY(__invalidate_dcache_page)
102 entry sp, 16 93 entry sp, 16
103 dcache_writeback_inv_all a2, a3
104 retw
105
106 94
107/* 95 ___invalidate_dcache_page a2 a3
108 * void __flush_invalidate_cache_range(ulong start, ulong size) 96 dsync
109 */
110 97
111ENTRY(__flush_invalidate_cache_range)
112 entry sp, 16
113 mov a4, a2
114 mov a5, a3
115 dcache_writeback_inv_region a4, a5, a6
116 icache_invalidate_region a2, a3, a4
117 retw 98 retw
118 99
119/* 100/*
120 * void __invalidate_icache_page(ulong start) 101 * void __flush_invalidate_dcache_page(ulong start)
121 */ 102 */
122 103
123ENTRY(__invalidate_icache_page) 104ENTRY(__flush_invalidate_dcache_page)
124 entry sp, 16 105 entry sp, 16
125 movi a3, PAGE_SIZE
126 icache_invalidate_region a2, a3, a4
127 retw
128 106
129/* 107 ___flush_invalidate_dcache_page a2 a3
130 * void __invalidate_dcache_page(ulong start)
131 */
132 108
133ENTRY(__invalidate_dcache_page) 109 dsync
134 entry sp, 16
135 movi a3, PAGE_SIZE
136 dcache_invalidate_region a2, a3, a4
137 retw 110 retw
138 111
139/* 112/*
140 * void __invalidate_icache_range(ulong start, ulong size) 113 * void __flush_dcache_page(ulong start)
141 */ 114 */
142 115
143ENTRY(__invalidate_icache_range) 116ENTRY(__flush_dcache_page)
144 entry sp, 16 117 entry sp, 16
145 icache_invalidate_region a2, a3, a4
146 retw
147 118
148/* 119 ___flush_dcache_page a2 a3
149 * void __invalidate_dcache_range(ulong start, ulong size)
150 */
151 120
152ENTRY(__invalidate_dcache_range) 121 dsync
153 entry sp, 16
154 dcache_invalidate_region a2, a3, a4
155 retw 122 retw
156 123
157/*
158 * void __flush_dcache_page(ulong start)
159 */
160 124
161ENTRY(__flush_dcache_page)
162 entry sp, 16
163 movi a3, PAGE_SIZE
164 dcache_writeback_region a2, a3, a4
165 retw
166 125
167/* 126/*
168 * void __flush_invalidate_dcache_page(ulong start) 127 * void __invalidate_icache_range(ulong start, ulong size)
169 */ 128 */
170 129
171ENTRY(__flush_invalidate_dcache_page) 130ENTRY(__invalidate_icache_range)
172 entry sp, 16 131 entry sp, 16
173 movi a3, PAGE_SIZE 132
174 dcache_writeback_inv_region a2, a3, a4 133 ___invalidate_icache_range a2 a3 a4
134 isync
135
175 retw 136 retw
176 137
177/* 138/*
@@ -180,195 +141,69 @@ ENTRY(__flush_invalidate_dcache_page)
180 141
181ENTRY(__flush_invalidate_dcache_range) 142ENTRY(__flush_invalidate_dcache_range)
182 entry sp, 16 143 entry sp, 16
183 dcache_writeback_inv_region a2, a3, a4
184 retw
185 144
186/* 145 ___flush_invalidate_dcache_range a2 a3 a4
187 * void __invalidate_dcache_all(void) 146 dsync
188 */
189 147
190ENTRY(__invalidate_dcache_all)
191 entry sp, 16
192 dcache_invalidate_all a2, a3
193 retw 148 retw
194 149
195/* 150/*
196 * void __flush_invalidate_dcache_page_phys(ulong start) 151 * void _flush_dcache_range(ulong start, ulong size)
197 */ 152 */
198 153
199ENTRY(__flush_invalidate_dcache_page_phys) 154ENTRY(__flush_dcache_range)
200 entry sp, 16 155 entry sp, 16
201 156
202 movi a3, XCHAL_DCACHE_SIZE 157 ___flush_dcache_range a2 a3 a4
203 movi a4, PAGE_MASK | 1
204 addi a2, a2, 1
205
2061: addi a3, a3, -XCHAL_DCACHE_LINESIZE
207
208 ldct a6, a3
209 dsync 158 dsync
210 and a6, a6, a4
211 beq a6, a2, 2f
212 bgeui a3, 2, 1b
213 retw
214 159
2152: diwbi a3, 0
216 bgeui a3, 2, 1b
217 retw 160 retw
218 161
219ENTRY(check_dcache_low0) 162/*
220 entry sp, 16 163 * void _invalidate_dcache_range(ulong start, ulong size)
221 164 */
222 movi a3, XCHAL_DCACHE_SIZE / 4
223 movi a4, PAGE_MASK | 1
224 addi a2, a2, 1
225
2261: addi a3, a3, -XCHAL_DCACHE_LINESIZE
227
228 ldct a6, a3
229 dsync
230 and a6, a6, a4
231 beq a6, a2, 2f
232 bgeui a3, 2, 1b
233 retw
234
2352: j 2b
236
237ENTRY(check_dcache_high0)
238 entry sp, 16
239
240 movi a5, XCHAL_DCACHE_SIZE / 4
241 movi a3, XCHAL_DCACHE_SIZE / 2
242 movi a4, PAGE_MASK | 1
243 addi a2, a2, 1
244
2451: addi a3, a3, -XCHAL_DCACHE_LINESIZE
246 addi a5, a5, -XCHAL_DCACHE_LINESIZE
247
248 ldct a6, a3
249 dsync
250 and a6, a6, a4
251 beq a6, a2, 2f
252 bgeui a5, 2, 1b
253 retw
254
2552: j 2b
256 165
257ENTRY(check_dcache_low1) 166ENTRY(__invalidate_dcache_range)
258 entry sp, 16 167 entry sp, 16
259 168
260 movi a5, XCHAL_DCACHE_SIZE / 4 169 ___invalidate_dcache_range a2 a3 a4
261 movi a3, XCHAL_DCACHE_SIZE * 3 / 4
262 movi a4, PAGE_MASK | 1
263 addi a2, a2, 1
264 170
2651: addi a3, a3, -XCHAL_DCACHE_LINESIZE
266 addi a5, a5, -XCHAL_DCACHE_LINESIZE
267 171
268 ldct a6, a3
269 dsync
270 and a6, a6, a4
271 beq a6, a2, 2f
272 bgeui a5, 2, 1b
273 retw 172 retw
274 173
2752: j 2b 174/*
175 * void _invalidate_icache_all(void)
176 */
276 177
277ENTRY(check_dcache_high1) 178ENTRY(__invalidate_icache_all)
278 entry sp, 16 179 entry sp, 16
279 180
280 movi a5, XCHAL_DCACHE_SIZE / 4 181 ___invalidate_icache_all a2 a3
281 movi a3, XCHAL_DCACHE_SIZE 182 isync
282 movi a4, PAGE_MASK | 1
283 addi a2, a2, 1
284
2851: addi a3, a3, -XCHAL_DCACHE_LINESIZE
286 addi a5, a5, -XCHAL_DCACHE_LINESIZE
287 183
288 ldct a6, a3
289 dsync
290 and a6, a6, a4
291 beq a6, a2, 2f
292 bgeui a5, 2, 1b
293 retw 184 retw
294 185
2952: j 2b
296
297
298/* 186/*
299 * void __invalidate_icache_page_phys(ulong start) 187 * void _flush_invalidate_dcache_all(void)
300 */ 188 */
301 189
302ENTRY(__invalidate_icache_page_phys) 190ENTRY(__flush_invalidate_dcache_all)
303 entry sp, 16 191 entry sp, 16
304 192
305 movi a3, XCHAL_ICACHE_SIZE 193 ___flush_invalidate_dcache_all a2 a3
306 movi a4, PAGE_MASK | 1 194 dsync
307 addi a2, a2, 1
308
3091: addi a3, a3, -XCHAL_ICACHE_LINESIZE
310
311 lict a6, a3
312 isync
313 and a6, a6, a4
314 beq a6, a2, 2f
315 bgeui a3, 2, 1b
316 retw
317 195
3182: iii a3, 0
319 bgeui a3, 2, 1b
320 retw 196 retw
321 197
198/*
199 * void _invalidate_dcache_all(void)
200 */
322 201
323#if 0 202ENTRY(__invalidate_dcache_all)
324
325 movi a3, XCHAL_DCACHE_WAYS - 1
326 movi a4, PAGE_SIZE
327
3281: mov a5, a2
329 add a6, a2, a4
330
3312: diwbi a5, 0
332 diwbi a5, XCHAL_DCACHE_LINESIZE
333 diwbi a5, XCHAL_DCACHE_LINESIZE * 2
334 diwbi a5, XCHAL_DCACHE_LINESIZE * 3
335
336 addi a5, a5, XCHAL_DCACHE_LINESIZE * 4
337 blt a5, a6, 2b
338
339 addi a3, a3, -1
340 addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS
341 bgez a3, 1b
342
343 retw
344
345ENTRY(__invalidate_icache_page_index)
346 entry sp, 16 203 entry sp, 16
347 204
348 movi a3, XCHAL_ICACHE_WAYS - 1 205 ___invalidate_dcache_all a2 a3
349 movi a4, PAGE_SIZE 206 dsync
350
3511: mov a5, a2
352 add a6, a2, a4
353
3542: iii a5, 0
355 iii a5, XCHAL_ICACHE_LINESIZE
356 iii a5, XCHAL_ICACHE_LINESIZE * 2
357 iii a5, XCHAL_ICACHE_LINESIZE * 3
358
359 addi a5, a5, XCHAL_ICACHE_LINESIZE * 4
360 blt a5, a6, 2b
361
362 addi a3, a3, -1
363 addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS
364 bgez a3, 2b
365 207
366 retw 208 retw
367 209
368#endif
369
370
371
372
373
374
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 0fefb8666874..239461d8ea88 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -24,12 +24,12 @@
24 24
25static inline void __flush_itlb_all (void) 25static inline void __flush_itlb_all (void)
26{ 26{
27 int way, index; 27 int w, i;
28 28
29 for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) { 29 for (w = 0; w < ITLB_ARF_WAYS; w++) {
30 for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) { 30 for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
31 int entry = way + (index << PAGE_SHIFT); 31 int e = w + (i << PAGE_SHIFT);
32 invalidate_itlb_entry_no_isync (entry); 32 invalidate_itlb_entry_no_isync(e);
33 } 33 }
34 } 34 }
35 asm volatile ("isync\n"); 35 asm volatile ("isync\n");
@@ -37,12 +37,12 @@ static inline void __flush_itlb_all (void)
37 37
38static inline void __flush_dtlb_all (void) 38static inline void __flush_dtlb_all (void)
39{ 39{
40 int way, index; 40 int w, i;
41 41
42 for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) { 42 for (w = 0; w < DTLB_ARF_WAYS; w++) {
43 for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) { 43 for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
44 int entry = way + (index << PAGE_SHIFT); 44 int e = w + (i << PAGE_SHIFT);
45 invalidate_dtlb_entry_no_isync (entry); 45 invalidate_dtlb_entry_no_isync(e);
46 } 46 }
47 } 47 }
48 asm volatile ("isync\n"); 48 asm volatile ("isync\n");
@@ -63,21 +63,25 @@ void flush_tlb_all (void)
63 63
64void flush_tlb_mm(struct mm_struct *mm) 64void flush_tlb_mm(struct mm_struct *mm)
65{ 65{
66#if 0
67 printk("[tlbmm<%lx>]\n", (unsigned long)mm->context);
68#endif
69
70 if (mm == current->active_mm) { 66 if (mm == current->active_mm) {
71 int flags; 67 int flags;
72 local_save_flags(flags); 68 local_save_flags(flags);
73 get_new_mmu_context(mm, asid_cache); 69 __get_new_mmu_context(mm);
74 set_rasid_register(ASID_INSERT(mm->context)); 70 __load_mmu_context(mm);
75 local_irq_restore(flags); 71 local_irq_restore(flags);
76 } 72 }
77 else 73 else
78 mm->context = 0; 74 mm->context = 0;
79} 75}
80 76
77#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
78#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
79#if _ITLB_ENTRIES > _DTLB_ENTRIES
80# define _TLB_ENTRIES _ITLB_ENTRIES
81#else
82# define _TLB_ENTRIES _DTLB_ENTRIES
83#endif
84
81void flush_tlb_range (struct vm_area_struct *vma, 85void flush_tlb_range (struct vm_area_struct *vma,
82 unsigned long start, unsigned long end) 86 unsigned long start, unsigned long end)
83{ 87{
@@ -93,7 +97,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
93#endif 97#endif
94 local_save_flags(flags); 98 local_save_flags(flags);
95 99
96 if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) { 100 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
97 int oldpid = get_rasid_register(); 101 int oldpid = get_rasid_register();
98 set_rasid_register (ASID_INSERT(mm->context)); 102 set_rasid_register (ASID_INSERT(mm->context));
99 start &= PAGE_MASK; 103 start &= PAGE_MASK;
@@ -111,9 +115,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
111 115
112 set_rasid_register(oldpid); 116 set_rasid_register(oldpid);
113 } else { 117 } else {
114 get_new_mmu_context(mm, asid_cache); 118 flush_tlb_mm(mm);
115 if (mm == current->active_mm)
116 set_rasid_register(ASID_INSERT(mm->context));
117 } 119 }
118 local_irq_restore(flags); 120 local_irq_restore(flags);
119} 121}
@@ -123,10 +125,6 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
123 struct mm_struct* mm = vma->vm_mm; 125 struct mm_struct* mm = vma->vm_mm;
124 unsigned long flags; 126 unsigned long flags;
125 int oldpid; 127 int oldpid;
126#if 0
127 printk("[tlbpage<%02lx,%08lx>]\n",
128 (unsigned long)mm->context, page);
129#endif
130 128
131 if(mm->context == NO_CONTEXT) 129 if(mm->context == NO_CONTEXT)
132 return; 130 return;
@@ -142,404 +140,5 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
142 set_rasid_register(oldpid); 140 set_rasid_register(oldpid);
143 141
144 local_irq_restore(flags); 142 local_irq_restore(flags);
145
146#if 0
147 flush_tlb_all();
148 return;
149#endif
150}
151
152
153#ifdef DEBUG_TLB
154
155#define USE_ITLB 0
156#define USE_DTLB 1
157
158struct way_config_t {
159 int indicies;
160 int indicies_log2;
161 int pgsz_log2;
162 int arf;
163};
164
165static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
166{
167 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
168 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
169 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
170 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
171 },
172 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
173 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
174 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
175 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
176 },
177 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
178 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
179 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
180 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
181 },
182 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
183 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
184 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
185 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
186 },
187 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
188 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
189 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
190 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
191 },
192 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
193 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
194 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
195 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
196 },
197 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
198 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
199 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
200 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
201 }
202};
203
204static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
205{
206 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
207 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
208 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
209 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
210 },
211 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
212 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
213 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
214 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
215 },
216 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
217 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
218 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
219 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
220 },
221 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
222 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
223 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
224 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
225 },
226 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
227 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
228 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
229 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
230 },
231 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
232 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
233 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
234 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
235 },
236 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
237 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
238 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
239 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
240 },
241 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
242 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
243 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
244 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
245 },
246 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
247 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
248 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
249 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
250 },
251 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
252 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
253 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
254 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
255 }
256};
257
258/* Total number of entries: */
259#define ITLB_TOTAL_ENTRIES \
260 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
261 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
262 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
263 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
264 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
265 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
266 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
267#define DTLB_TOTAL_ENTRIES \
268 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
269 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
270 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
271 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
272 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
273 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
274 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
275 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
276 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
277 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
278
279
280typedef struct {
281 unsigned va;
282 unsigned pa;
283 unsigned char asid;
284 unsigned char ca;
285 unsigned char way;
286 unsigned char index;
287 unsigned char pgsz_log2; /* 0 .. 32 */
288 unsigned char type; /* 0=ITLB 1=DTLB */
289} tlb_dump_entry_t;
290
291/* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
292int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
293{
294 if (a->asid < b->asid) return -1;
295 if (a->asid > b->asid) return 1;
296 if (a->va < b->va) return -1;
297 if (a->va > b->va) return 1;
298 if (a->pa < b->pa) return -1;
299 if (a->pa > b->pa) return 1;
300 if (a->ca < b->ca) return -1;
301 if (a->ca > b->ca) return 1;
302 if (a->way < b->way) return -1;
303 if (a->way > b->way) return 1;
304 if (a->index < b->index) return -1;
305 if (a->index > b->index) return 1;
306 return 0;
307}
308
309void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
310{
311 int i, j;
312 /* Simple O(n*n) sort: */
313 for (i = 0; i < n-1; i++)
314 for (j = i+1; j < n; j++)
315 if (cmp_tlb_dump_info(t+i, t+j) > 0) {
316 tlb_dump_entry_t tmp = t[i];
317 t[i] = t[j];
318 t[j] = tmp;
319 }
320}
321
322
323static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
324static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
325
326
327static inline char *way_type (int type)
328{
329 return type ? "autorefill" : "non-autorefill";
330}
331
332void print_entry (struct way_config_t *way_info,
333 unsigned int way,
334 unsigned int index,
335 unsigned int virtual,
336 unsigned int translation)
337{
338 char valid_chr;
339 unsigned int va, pa, asid, ca;
340
341 va = virtual &
342 ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
343 asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
344 pa = translation & ~((1 << way_info->pgsz_log2) - 1);
345 ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
346 valid_chr = asid ? 'V' : 'I';
347
348 /* Compute and incorporate the effect of the index bits on the
349 * va. It's more useful for kernel debugging, since we always
350 * want to know the effective va anyway. */
351
352 va += index << way_info->pgsz_log2;
353
354 printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
355 way, index, valid_chr, va, pa, asid, ca);
356}
357
358void print_itlb_entry (struct way_config_t *way_info, int way, int index)
359{
360 print_entry (way_info, way, index,
361 read_itlb_virtual (way + (index << way_info->pgsz_log2)),
362 read_itlb_translation (way + (index << way_info->pgsz_log2)));
363}
364
365void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
366{
367 print_entry (way_info, way, index,
368 read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
369 read_dtlb_translation (way + (index << way_info->pgsz_log2)));
370}
371
372void dump_itlb (void)
373{
374 int way, index;
375
376 printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
377
378 for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
379 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
380 way, itlb[way].indicies,
381 itlb[way].pgsz_log2, way_type(itlb[way].arf));
382 for (index = 0; index < itlb[way].indicies; index++) {
383 print_itlb_entry(&itlb[way], way, index);
384 }
385 }
386}
387
388void dump_dtlb (void)
389{
390 int way, index;
391
392 printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
393
394 for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
395 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
396 way, dtlb[way].indicies,
397 dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
398 for (index = 0; index < dtlb[way].indicies; index++) {
399 print_dtlb_entry(&dtlb[way], way, index);
400 }
401 }
402}
403
404void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
405 int entries, int ways, int type, int show_invalid)
406{
407 tlb_dump_entry_t *e = tinfo;
408 int way, i;
409
410 /* Gather all info: */
411 for (way = 0; way < ways; way++) {
412 struct way_config_t *cfg = config + way;
413 for (i = 0; i < cfg->indicies; i++) {
414 unsigned wayindex = way + (i << cfg->pgsz_log2);
415 unsigned vv = (type ? read_dtlb_virtual (wayindex)
416 : read_itlb_virtual (wayindex));
417 unsigned pp = (type ? read_dtlb_translation (wayindex)
418 : read_itlb_translation (wayindex));
419
420 /* Compute and incorporate the effect of the index bits on the
421 * va. It's more useful for kernel debugging, since we always
422 * want to know the effective va anyway. */
423
424 e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
425 e->va += (i << cfg->pgsz_log2);
426 e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
427 e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
428 e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
429 e->way = way;
430 e->index = i;
431 e->pgsz_log2 = cfg->pgsz_log2;
432 e->type = type;
433 e++;
434 }
435 }
436#if 1
437 /* Sort by ASID and VADDR: */
438 sort_tlb_dump_info (tinfo, entries);
439#endif
440
441 /* Display all sorted info: */
442 printk ("\n%cTLB dump:\n", (type ? 'D' : 'I'));
443 for (e = tinfo, i = 0; i < entries; i++, e++) {
444#if 0
445 if (e->asid == 0 && !show_invalid)
446 continue;
447#endif
448 printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
449 (e->type ? 'D' : 'I'), e->way, e->index,
450 e->asid, e->va, e->pa, e->ca,
451 (1 << (e->pgsz_log2 % 10)),
452 " kMG"[e->pgsz_log2 / 10]
453 );
454 }
455}
456
457void dump_tlbs2 (int showinv)
458{
459 dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
460 dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
461}
462
463void dump_all_tlbs (void)
464{
465 dump_tlbs2 (1);
466}
467
468void dump_valid_tlbs (void)
469{
470 dump_tlbs2 (0);
471} 143}
472 144
473
474void dump_tlbs (void)
475{
476 dump_itlb();
477 dump_dtlb();
478}
479
480void dump_cache_tag(int dcache, int idx)
481{
482 int w, i, s, e;
483 unsigned long tag, index;
484 unsigned long num_lines, num_ways, cache_size, line_size;
485
486 num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS;
487 cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE;
488 line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE;
489
490 num_lines = cache_size / num_ways;
491
492 s = 0; e = num_lines;
493
494 if (idx >= 0)
495 e = (s = idx * line_size) + 1;
496
497 for (i = s; i < e; i+= line_size) {
498 printk("\nline %#08x:", i);
499 for (w = 0; w < num_ways; w++) {
500 index = w * num_lines + i;
501 if (dcache)
502 __asm__ __volatile__("ldct %0, %1\n\t"
503 : "=a"(tag) : "a"(index));
504 else
505 __asm__ __volatile__("lict %0, %1\n\t"
506 : "=a"(tag) : "a"(index));
507
508 printk(" %#010lx", tag);
509 }
510 }
511 printk ("\n");
512}
513
514void dump_icache(int index)
515{
516 unsigned long data, addr;
517 int w, i;
518
519 const unsigned long num_ways = XCHAL_ICACHE_WAYS;
520 const unsigned long cache_size = XCHAL_ICACHE_SIZE;
521 const unsigned long line_size = XCHAL_ICACHE_LINESIZE;
522 const unsigned long num_lines = cache_size / num_ways / line_size;
523
524 for (w = 0; w < num_ways; w++) {
525 printk ("\nWay %d", w);
526
527 for (i = 0; i < line_size; i+= 4) {
528 addr = w * num_lines + index * line_size + i;
529 __asm__ __volatile__("licw %0, %1\n\t"
530 : "=a"(data) : "a"(addr));
531 printk(" %#010lx", data);
532 }
533 }
534 printk ("\n");
535}
536
537void dump_cache_tags(void)
538{
539 printk("Instruction cache\n");
540 dump_cache_tag(0, -1);
541 printk("Data cache\n");
542 dump_cache_tag(1, -1);
543}
544
545#endif
diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c
index 5c947cae7520..2f4f20ffe666 100644
--- a/arch/xtensa/platform-iss/console.c
+++ b/arch/xtensa/platform-iss/console.c
@@ -25,11 +25,15 @@
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/irq.h> 26#include <asm/irq.h>
27 27
28#include <xtensa/simcall.h> 28#include <asm/platform/simcall.h>
29 29
30#include <linux/tty.h> 30#include <linux/tty.h>
31#include <linux/tty_flip.h> 31#include <linux/tty_flip.h>
32 32
33#ifdef SERIAL_INLINE
34#define _INLINE_ inline
35#endif
36
33#define SERIAL_MAX_NUM_LINES 1 37#define SERIAL_MAX_NUM_LINES 1
34#define SERIAL_TIMER_VALUE (20 * HZ) 38#define SERIAL_TIMER_VALUE (20 * HZ)
35 39
@@ -191,7 +195,7 @@ static int rs_read_proc(char *page, char **start, off_t off, int count,
191} 195}
192 196
193 197
194static const struct tty_operations serial_ops = { 198static struct tty_operations serial_ops = {
195 .open = rs_open, 199 .open = rs_open,
196 .close = rs_close, 200 .close = rs_close,
197 .write = rs_write, 201 .write = rs_write,
diff --git a/arch/xtensa/platform-iss/network.c b/arch/xtensa/platform-iss/network.c
index 15d64414bd60..8ebfc8761229 100644
--- a/arch/xtensa/platform-iss/network.c
+++ b/arch/xtensa/platform-iss/network.c
@@ -34,7 +34,7 @@
34#include <linux/timer.h> 34#include <linux/timer.h>
35#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36 36
37#include <xtensa/simcall.h> 37#include <asm/platform/simcall.h>
38 38
39#define DRIVER_NAME "iss-netdev" 39#define DRIVER_NAME "iss-netdev"
40#define ETH_MAX_PACKET 1500 40#define ETH_MAX_PACKET 1500