diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-25 13:49:30 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-25 13:49:30 -0500 |
commit | 9b83d851a2bdd021e2135999e5bce3eb8fef94e6 (patch) | |
tree | b8703e813b1d8d66fb262cd757f54f3c05966d50 /arch/xtensa/include | |
parent | 2d08cd0ef89a24f5eb6c6801c48cd06bca230d6d (diff) | |
parent | 9ed82c6866e2ab671935a75ea454047e8bddb177 (diff) |
Merge tag 'xtensa-next-20140123' of git://github.com/czankel/xtensa-linux
Pull Xtensa patches from Chris Zankel:
"The major changes are adding support for SMP for Xtensa, fixing and
cleaning up the ISS (simulator) network driver, and better support for
device trees"
* tag 'xtensa-next-20140123' of git://github.com/czankel/xtensa-linux: (40 commits)
xtensa: implement ndelay
xtensa: clean up udelay
xtensa: enable HAVE_PERF_EVENTS
xtensa: remap io area defined in device tree
xtensa: support default device tree buses
xtensa: initialize device tree clock sources
xtensa: xtfpga: fix definitions of platform devices
xtensa: standardize devicetree cpu compatible strings
xtensa: avoid duplicate of IO range definitions
xtensa: fix ATOMCTL register documentation
xtensa: Enable irqs after cpu is set online
xtensa: ISS: raise network polling rate to 10 times/sec
xtensa: remove unused XTENSA_ISS_NETWORK Kconfig parameter
xtensa: ISS: avoid simple_strtoul usage
xtensa: Switch to sched_clock_register()
xtensa: implement CPU hotplug
xtensa: add SMP support
xtensa: add MX irqchip
xtensa: clear timer IRQ unconditionally in its handler
xtensa: clean up do_interrupt/do_IRQ
...
Diffstat (limited to 'arch/xtensa/include')
23 files changed, 503 insertions, 152 deletions
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 228d6aee3a16..5851db291583 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild | |||
@@ -8,7 +8,6 @@ generic-y += emergency-restart.h | |||
8 | generic-y += errno.h | 8 | generic-y += errno.h |
9 | generic-y += exec.h | 9 | generic-y += exec.h |
10 | generic-y += fcntl.h | 10 | generic-y += fcntl.h |
11 | generic-y += futex.h | ||
12 | generic-y += hardirq.h | 11 | generic-y += hardirq.h |
13 | generic-y += ioctl.h | 12 | generic-y += ioctl.h |
14 | generic-y += irq_regs.h | 13 | generic-y += irq_regs.h |
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index e1ee6b51dfc5..0a24b04d6b21 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h | |||
@@ -13,10 +13,6 @@ | |||
13 | #define rmb() barrier() | 13 | #define rmb() barrier() |
14 | #define wmb() mb() | 14 | #define wmb() mb() |
15 | 15 | ||
16 | #ifdef CONFIG_SMP | ||
17 | #error smp_* not defined | ||
18 | #endif | ||
19 | |||
20 | #include <asm-generic/barrier.h> | 16 | #include <asm-generic/barrier.h> |
21 | 17 | ||
22 | #endif /* _XTENSA_SYSTEM_H */ | 18 | #endif /* _XTENSA_SYSTEM_H */ |
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 84afe58d5d37..7b6873ae84c2 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h | |||
@@ -22,12 +22,8 @@ | |||
22 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
23 | #include <asm/byteorder.h> | 23 | #include <asm/byteorder.h> |
24 | 24 | ||
25 | #ifdef CONFIG_SMP | 25 | #define smp_mb__before_clear_bit() smp_mb() |
26 | # error SMP not supported on this architecture | 26 | #define smp_mb__after_clear_bit() smp_mb() |
27 | #endif | ||
28 | |||
29 | #define smp_mb__before_clear_bit() barrier() | ||
30 | #define smp_mb__after_clear_bit() barrier() | ||
31 | 27 | ||
32 | #include <asm-generic/bitops/non-atomic.h> | 28 | #include <asm-generic/bitops/non-atomic.h> |
33 | 29 | ||
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 127cd48883c4..555a98a18453 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h | |||
@@ -1,18 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/cacheflush.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 4 | * for more details. |
7 | * | 5 | * |
8 | * (C) 2001 - 2007 Tensilica Inc. | 6 | * (C) 2001 - 2013 Tensilica Inc. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | #ifndef _XTENSA_CACHEFLUSH_H | 9 | #ifndef _XTENSA_CACHEFLUSH_H |
12 | #define _XTENSA_CACHEFLUSH_H | 10 | #define _XTENSA_CACHEFLUSH_H |
13 | 11 | ||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
17 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
18 | #include <asm/page.h> | 14 | #include <asm/page.h> |
@@ -51,7 +47,6 @@ extern void __invalidate_icache_page(unsigned long); | |||
51 | extern void __invalidate_icache_range(unsigned long, unsigned long); | 47 | extern void __invalidate_icache_range(unsigned long, unsigned long); |
52 | extern void __invalidate_dcache_range(unsigned long, unsigned long); | 48 | extern void __invalidate_dcache_range(unsigned long, unsigned long); |
53 | 49 | ||
54 | |||
55 | #if XCHAL_DCACHE_IS_WRITEBACK | 50 | #if XCHAL_DCACHE_IS_WRITEBACK |
56 | extern void __flush_invalidate_dcache_all(void); | 51 | extern void __flush_invalidate_dcache_all(void); |
57 | extern void __flush_dcache_page(unsigned long); | 52 | extern void __flush_dcache_page(unsigned long); |
@@ -87,9 +82,22 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, | |||
87 | * (see also Documentation/cachetlb.txt) | 82 | * (see also Documentation/cachetlb.txt) |
88 | */ | 83 | */ |
89 | 84 | ||
90 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | 85 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP) |
86 | |||
87 | #ifdef CONFIG_SMP | ||
88 | void flush_cache_all(void); | ||
89 | void flush_cache_range(struct vm_area_struct*, ulong, ulong); | ||
90 | void flush_icache_range(unsigned long start, unsigned long end); | ||
91 | void flush_cache_page(struct vm_area_struct*, | ||
92 | unsigned long, unsigned long); | ||
93 | #else | ||
94 | #define flush_cache_all local_flush_cache_all | ||
95 | #define flush_cache_range local_flush_cache_range | ||
96 | #define flush_icache_range local_flush_icache_range | ||
97 | #define flush_cache_page local_flush_cache_page | ||
98 | #endif | ||
91 | 99 | ||
92 | #define flush_cache_all() \ | 100 | #define local_flush_cache_all() \ |
93 | do { \ | 101 | do { \ |
94 | __flush_invalidate_dcache_all(); \ | 102 | __flush_invalidate_dcache_all(); \ |
95 | __invalidate_icache_all(); \ | 103 | __invalidate_icache_all(); \ |
@@ -103,9 +111,11 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, | |||
103 | 111 | ||
104 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | 112 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
105 | extern void flush_dcache_page(struct page*); | 113 | extern void flush_dcache_page(struct page*); |
106 | extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); | 114 | |
107 | extern void flush_cache_page(struct vm_area_struct*, | 115 | void local_flush_cache_range(struct vm_area_struct *vma, |
108 | unsigned long, unsigned long); | 116 | unsigned long start, unsigned long end); |
117 | void local_flush_cache_page(struct vm_area_struct *vma, | ||
118 | unsigned long address, unsigned long pfn); | ||
109 | 119 | ||
110 | #else | 120 | #else |
111 | 121 | ||
@@ -119,13 +129,14 @@ extern void flush_cache_page(struct vm_area_struct*, | |||
119 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | 129 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 |
120 | #define flush_dcache_page(page) do { } while (0) | 130 | #define flush_dcache_page(page) do { } while (0) |
121 | 131 | ||
122 | #define flush_cache_page(vma,addr,pfn) do { } while (0) | 132 | #define flush_icache_range local_flush_icache_range |
123 | #define flush_cache_range(vma,start,end) do { } while (0) | 133 | #define flush_cache_page(vma, addr, pfn) do { } while (0) |
134 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
124 | 135 | ||
125 | #endif | 136 | #endif |
126 | 137 | ||
127 | /* Ensure consistency between data and instruction cache. */ | 138 | /* Ensure consistency between data and instruction cache. */ |
128 | #define flush_icache_range(start,end) \ | 139 | #define local_flush_icache_range(start, end) \ |
129 | do { \ | 140 | do { \ |
130 | __flush_dcache_range(start, (end) - (start)); \ | 141 | __flush_dcache_range(start, (end) - (start)); \ |
131 | __invalidate_icache_range(start,(end) - (start)); \ | 142 | __invalidate_icache_range(start,(end) - (start)); \ |
@@ -253,5 +264,4 @@ static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size) | |||
253 | } | 264 | } |
254 | } | 265 | } |
255 | 266 | ||
256 | #endif /* __KERNEL__ */ | ||
257 | #endif /* _XTENSA_CACHEFLUSH_H */ | 267 | #endif /* _XTENSA_CACHEFLUSH_H */ |
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h index 3899610c1dff..24304b39a5c7 100644 --- a/arch/xtensa/include/asm/delay.h +++ b/arch/xtensa/include/asm/delay.h | |||
@@ -19,23 +19,57 @@ extern unsigned long loops_per_jiffy; | |||
19 | 19 | ||
20 | static inline void __delay(unsigned long loops) | 20 | static inline void __delay(unsigned long loops) |
21 | { | 21 | { |
22 | /* 2 cycles per loop. */ | 22 | if (__builtin_constant_p(loops) && loops < 2) |
23 | __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" | 23 | __asm__ __volatile__ ("nop"); |
24 | : "=r" (loops) : "0" (loops)); | 24 | else if (loops >= 2) |
25 | /* 2 cycles per loop. */ | ||
26 | __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" | ||
27 | : "+r" (loops)); | ||
25 | } | 28 | } |
26 | 29 | ||
27 | /* For SMP/NUMA systems, change boot_cpu_data to something like | 30 | /* Undefined function to get compile-time error */ |
28 | * local_cpu_data->... where local_cpu_data points to the current | 31 | void __bad_udelay(void); |
29 | * cpu. */ | 32 | void __bad_ndelay(void); |
30 | 33 | ||
31 | static __inline__ void udelay (unsigned long usecs) | 34 | #define __MAX_UDELAY 30000 |
35 | #define __MAX_NDELAY 30000 | ||
36 | |||
37 | static inline void __udelay(unsigned long usecs) | ||
32 | { | 38 | { |
33 | unsigned long start = get_ccount(); | 39 | unsigned long start = get_ccount(); |
34 | unsigned long cycles = usecs * (loops_per_jiffy / (1000000UL / HZ)); | 40 | unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5; |
35 | 41 | ||
36 | /* Note: all variables are unsigned (can wrap around)! */ | 42 | /* Note: all variables are unsigned (can wrap around)! */ |
37 | while (((unsigned long)get_ccount()) - start < cycles) | 43 | while (((unsigned long)get_ccount()) - start < cycles) |
38 | ; | 44 | cpu_relax(); |
45 | } | ||
46 | |||
47 | static inline void udelay(unsigned long usec) | ||
48 | { | ||
49 | if (__builtin_constant_p(usec) && usec >= __MAX_UDELAY) | ||
50 | __bad_udelay(); | ||
51 | else | ||
52 | __udelay(usec); | ||
53 | } | ||
54 | |||
55 | static inline void __ndelay(unsigned long nsec) | ||
56 | { | ||
57 | /* | ||
58 | * Inner shift makes sure multiplication doesn't overflow | ||
59 | * for legitimate nsec values | ||
60 | */ | ||
61 | unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15; | ||
62 | __delay(cycles); | ||
63 | } | ||
64 | |||
65 | #define ndelay(n) ndelay(n) | ||
66 | |||
67 | static inline void ndelay(unsigned long nsec) | ||
68 | { | ||
69 | if (__builtin_constant_p(nsec) && nsec >= __MAX_NDELAY) | ||
70 | __bad_ndelay(); | ||
71 | else | ||
72 | __ndelay(nsec); | ||
39 | } | 73 | } |
40 | 74 | ||
41 | #endif | 75 | #endif |
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h index 73cc3f482304..736b9d214d80 100644 --- a/arch/xtensa/include/asm/ftrace.h +++ b/arch/xtensa/include/asm/ftrace.h | |||
@@ -18,7 +18,7 @@ | |||
18 | __asm__ __volatile__ ( \ | 18 | __asm__ __volatile__ ( \ |
19 | "mov %0, a0\n" \ | 19 | "mov %0, a0\n" \ |
20 | "mov %1, a1\n" \ | 20 | "mov %1, a1\n" \ |
21 | : "=r"(a0), "=r"(a1) : : ); \ | 21 | : "=r"(a0), "=r"(a1)); \ |
22 | MAKE_PC_FROM_RA(a0, a1); }) | 22 | MAKE_PC_FROM_RA(a0, a1); }) |
23 | #ifdef CONFIG_FRAME_POINTER | 23 | #ifdef CONFIG_FRAME_POINTER |
24 | extern unsigned long return_address(unsigned level); | 24 | extern unsigned long return_address(unsigned level); |
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h new file mode 100644 index 000000000000..b39531babec0 --- /dev/null +++ b/arch/xtensa/include/asm/futex.h | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * Atomic futex routines | ||
3 | * | ||
4 | * Based on the PowerPC implementataion | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Copyright (C) 2013 TangoTec Ltd. | ||
11 | * | ||
12 | * Baruch Siach <baruch@tkos.co.il> | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_XTENSA_FUTEX_H | ||
16 | #define _ASM_XTENSA_FUTEX_H | ||
17 | |||
18 | #ifdef __KERNEL__ | ||
19 | |||
20 | #include <linux/futex.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <linux/errno.h> | ||
23 | |||
24 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | ||
25 | __asm__ __volatile( \ | ||
26 | "1: l32i %0, %2, 0\n" \ | ||
27 | insn "\n" \ | ||
28 | " wsr %0, scompare1\n" \ | ||
29 | "2: s32c1i %1, %2, 0\n" \ | ||
30 | " bne %1, %0, 1b\n" \ | ||
31 | " movi %1, 0\n" \ | ||
32 | "3:\n" \ | ||
33 | " .section .fixup,\"ax\"\n" \ | ||
34 | " .align 4\n" \ | ||
35 | "4: .long 3b\n" \ | ||
36 | "5: l32r %0, 4b\n" \ | ||
37 | " movi %1, %3\n" \ | ||
38 | " jx %0\n" \ | ||
39 | " .previous\n" \ | ||
40 | " .section __ex_table,\"a\"\n" \ | ||
41 | " .long 1b,5b,2b,5b\n" \ | ||
42 | " .previous\n" \ | ||
43 | : "=&r" (oldval), "=&r" (ret) \ | ||
44 | : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \ | ||
45 | : "memory") | ||
46 | |||
47 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | ||
48 | { | ||
49 | int op = (encoded_op >> 28) & 7; | ||
50 | int cmp = (encoded_op >> 24) & 15; | ||
51 | int oparg = (encoded_op << 8) >> 20; | ||
52 | int cmparg = (encoded_op << 20) >> 20; | ||
53 | int oldval = 0, ret; | ||
54 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
55 | oparg = 1 << oparg; | ||
56 | |||
57 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | ||
58 | return -EFAULT; | ||
59 | |||
60 | #if !XCHAL_HAVE_S32C1I | ||
61 | return -ENOSYS; | ||
62 | #endif | ||
63 | |||
64 | pagefault_disable(); | ||
65 | |||
66 | switch (op) { | ||
67 | case FUTEX_OP_SET: | ||
68 | __futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg); | ||
69 | break; | ||
70 | case FUTEX_OP_ADD: | ||
71 | __futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr, | ||
72 | oparg); | ||
73 | break; | ||
74 | case FUTEX_OP_OR: | ||
75 | __futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr, | ||
76 | oparg); | ||
77 | break; | ||
78 | case FUTEX_OP_ANDN: | ||
79 | __futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr, | ||
80 | ~oparg); | ||
81 | break; | ||
82 | case FUTEX_OP_XOR: | ||
83 | __futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr, | ||
84 | oparg); | ||
85 | break; | ||
86 | default: | ||
87 | ret = -ENOSYS; | ||
88 | } | ||
89 | |||
90 | pagefault_enable(); | ||
91 | |||
92 | if (ret) | ||
93 | return ret; | ||
94 | |||
95 | switch (cmp) { | ||
96 | case FUTEX_OP_CMP_EQ: return (oldval == cmparg); | ||
97 | case FUTEX_OP_CMP_NE: return (oldval != cmparg); | ||
98 | case FUTEX_OP_CMP_LT: return (oldval < cmparg); | ||
99 | case FUTEX_OP_CMP_GE: return (oldval >= cmparg); | ||
100 | case FUTEX_OP_CMP_LE: return (oldval <= cmparg); | ||
101 | case FUTEX_OP_CMP_GT: return (oldval > cmparg); | ||
102 | } | ||
103 | |||
104 | return -ENOSYS; | ||
105 | } | ||
106 | |||
107 | static inline int | ||
108 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | ||
109 | u32 oldval, u32 newval) | ||
110 | { | ||
111 | int ret = 0; | ||
112 | u32 prev; | ||
113 | |||
114 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | ||
115 | return -EFAULT; | ||
116 | |||
117 | #if !XCHAL_HAVE_S32C1I | ||
118 | return -ENOSYS; | ||
119 | #endif | ||
120 | |||
121 | __asm__ __volatile__ ( | ||
122 | " # futex_atomic_cmpxchg_inatomic\n" | ||
123 | "1: l32i %1, %3, 0\n" | ||
124 | " mov %0, %5\n" | ||
125 | " wsr %1, scompare1\n" | ||
126 | "2: s32c1i %0, %3, 0\n" | ||
127 | "3:\n" | ||
128 | " .section .fixup,\"ax\"\n" | ||
129 | " .align 4\n" | ||
130 | "4: .long 3b\n" | ||
131 | "5: l32r %1, 4b\n" | ||
132 | " movi %0, %6\n" | ||
133 | " jx %1\n" | ||
134 | " .previous\n" | ||
135 | " .section __ex_table,\"a\"\n" | ||
136 | " .long 1b,5b,2b,5b\n" | ||
137 | " .previous\n" | ||
138 | : "+r" (ret), "=&r" (prev), "+m" (*uaddr) | ||
139 | : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) | ||
140 | : "memory"); | ||
141 | |||
142 | *uval = prev; | ||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | #endif /* __KERNEL__ */ | ||
147 | #endif /* _ASM_XTENSA_FUTEX_H */ | ||
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h index 722553f17db3..600781edc8a3 100644 --- a/arch/xtensa/include/asm/initialize_mmu.h +++ b/arch/xtensa/include/asm/initialize_mmu.h | |||
@@ -26,6 +26,9 @@ | |||
26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
27 | #include <asm/vectors.h> | 27 | #include <asm/vectors.h> |
28 | 28 | ||
29 | #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC) | ||
30 | #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC) | ||
31 | |||
29 | #ifdef __ASSEMBLY__ | 32 | #ifdef __ASSEMBLY__ |
30 | 33 | ||
31 | #define XTENSA_HWVERSION_RC_2009_0 230000 | 34 | #define XTENSA_HWVERSION_RC_2009_0 230000 |
@@ -80,8 +83,6 @@ | |||
80 | /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code | 83 | /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code |
81 | * and jump to the new mapping. | 84 | * and jump to the new mapping. |
82 | */ | 85 | */ |
83 | #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC) | ||
84 | #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC) | ||
85 | 86 | ||
86 | srli a3, a0, 27 | 87 | srli a3, a0, 27 |
87 | slli a3, a3, 27 | 88 | slli a3, a3, 27 |
@@ -123,13 +124,13 @@ | |||
123 | wdtlb a4, a5 | 124 | wdtlb a4, a5 |
124 | witlb a4, a5 | 125 | witlb a4, a5 |
125 | 126 | ||
126 | movi a5, 0xe0000006 | 127 | movi a5, XCHAL_KIO_CACHED_VADDR + 6 |
127 | movi a4, 0xf0000000 + CA_WRITEBACK | 128 | movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK |
128 | wdtlb a4, a5 | 129 | wdtlb a4, a5 |
129 | witlb a4, a5 | 130 | witlb a4, a5 |
130 | 131 | ||
131 | movi a5, 0xf0000006 | 132 | movi a5, XCHAL_KIO_BYPASS_VADDR + 6 |
132 | movi a4, 0xf0000000 + CA_BYPASS | 133 | movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS |
133 | wdtlb a4, a5 | 134 | wdtlb a4, a5 |
134 | witlb a4, a5 | 135 | witlb a4, a5 |
135 | 136 | ||
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index 700c2e6f2d25..2a042d430c25 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h | |||
@@ -14,20 +14,26 @@ | |||
14 | #ifdef __KERNEL__ | 14 | #ifdef __KERNEL__ |
15 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
16 | #include <asm/page.h> | 16 | #include <asm/page.h> |
17 | #include <asm/vectors.h> | ||
17 | #include <linux/bug.h> | 18 | #include <linux/bug.h> |
18 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
19 | 20 | ||
20 | #include <linux/types.h> | 21 | #include <linux/types.h> |
21 | 22 | ||
22 | #define XCHAL_KIO_CACHED_VADDR 0xe0000000 | ||
23 | #define XCHAL_KIO_BYPASS_VADDR 0xf0000000 | ||
24 | #define XCHAL_KIO_PADDR 0xf0000000 | ||
25 | #define XCHAL_KIO_SIZE 0x10000000 | ||
26 | |||
27 | #define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x)) | 23 | #define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x)) |
28 | #define IO_SPACE_LIMIT ~0 | 24 | #define IO_SPACE_LIMIT ~0 |
29 | 25 | ||
30 | #ifdef CONFIG_MMU | 26 | #ifdef CONFIG_MMU |
27 | |||
28 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF | ||
29 | extern unsigned long xtensa_kio_paddr; | ||
30 | |||
31 | static inline unsigned long xtensa_get_kio_paddr(void) | ||
32 | { | ||
33 | return xtensa_kio_paddr; | ||
34 | } | ||
35 | #endif | ||
36 | |||
31 | /* | 37 | /* |
32 | * Return the virtual address for the specified bus memory. | 38 | * Return the virtual address for the specified bus memory. |
33 | * Note that we currently don't support any address outside the KIO segment. | 39 | * Note that we currently don't support any address outside the KIO segment. |
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h index 4c0ccc9c4f4c..f71f88ea7646 100644 --- a/arch/xtensa/include/asm/irq.h +++ b/arch/xtensa/include/asm/irq.h | |||
@@ -43,5 +43,14 @@ static __inline__ int irq_canonicalize(int irq) | |||
43 | } | 43 | } |
44 | 44 | ||
45 | struct irqaction; | 45 | struct irqaction; |
46 | struct irq_domain; | ||
47 | |||
48 | void migrate_irqs(void); | ||
49 | int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize, | ||
50 | unsigned long int_irq, unsigned long ext_irq, | ||
51 | unsigned long *out_hwirq, unsigned int *out_type); | ||
52 | int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw); | ||
53 | unsigned xtensa_map_ext_irq(unsigned ext_irq); | ||
54 | unsigned xtensa_get_ext_irq_no(unsigned irq); | ||
46 | 55 | ||
47 | #endif /* _XTENSA_IRQ_H */ | 56 | #endif /* _XTENSA_IRQ_H */ |
diff --git a/arch/xtensa/include/asm/mmu.h b/arch/xtensa/include/asm/mmu.h index 8554b2c8b17a..71afe418d0e5 100644 --- a/arch/xtensa/include/asm/mmu.h +++ b/arch/xtensa/include/asm/mmu.h | |||
@@ -1,11 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/mmu.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 4 | * for more details. |
7 | * | 5 | * |
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 6 | * Copyright (C) 2001 - 2013 Tensilica Inc. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | #ifndef _XTENSA_MMU_H | 9 | #ifndef _XTENSA_MMU_H |
@@ -15,8 +13,10 @@ | |||
15 | #include <asm-generic/mmu.h> | 13 | #include <asm-generic/mmu.h> |
16 | #else | 14 | #else |
17 | 15 | ||
18 | /* Default "unsigned long" context */ | 16 | typedef struct { |
19 | typedef unsigned long mm_context_t; | 17 | unsigned long asid[NR_CPUS]; |
18 | unsigned int cpu; | ||
19 | } mm_context_t; | ||
20 | 20 | ||
21 | #endif /* CONFIG_MMU */ | 21 | #endif /* CONFIG_MMU */ |
22 | #endif /* _XTENSA_MMU_H */ | 22 | #endif /* _XTENSA_MMU_H */ |
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index d43525a286bb..d33c71a8c9ec 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h | |||
@@ -1,13 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/mmu_context.h | ||
3 | * | ||
4 | * Switch an MMU context. | 2 | * Switch an MMU context. |
5 | * | 3 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 4 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 5 | * License. See the file "COPYING" in the main directory of this archive |
8 | * for more details. | 6 | * for more details. |
9 | * | 7 | * |
10 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 8 | * Copyright (C) 2001 - 2013 Tensilica Inc. |
11 | */ | 9 | */ |
12 | 10 | ||
13 | #ifndef _XTENSA_MMU_CONTEXT_H | 11 | #ifndef _XTENSA_MMU_CONTEXT_H |
@@ -20,22 +18,25 @@ | |||
20 | #include <linux/stringify.h> | 18 | #include <linux/stringify.h> |
21 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
22 | 20 | ||
23 | #include <variant/core.h> | 21 | #include <asm/vectors.h> |
24 | 22 | ||
25 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
26 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
27 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
28 | #include <asm-generic/mm_hooks.h> | 26 | #include <asm-generic/mm_hooks.h> |
27 | #include <asm-generic/percpu.h> | ||
29 | 28 | ||
30 | #if (XCHAL_HAVE_TLBS != 1) | 29 | #if (XCHAL_HAVE_TLBS != 1) |
31 | # error "Linux must have an MMU!" | 30 | # error "Linux must have an MMU!" |
32 | #endif | 31 | #endif |
33 | 32 | ||
34 | extern unsigned long asid_cache; | 33 | DECLARE_PER_CPU(unsigned long, asid_cache); |
34 | #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu) | ||
35 | 35 | ||
36 | /* | 36 | /* |
37 | * NO_CONTEXT is the invalid ASID value that we don't ever assign to | 37 | * NO_CONTEXT is the invalid ASID value that we don't ever assign to |
38 | * any user or kernel context. | 38 | * any user or kernel context. We use the reserved values in the |
39 | * ASID_INSERT macro below. | ||
39 | * | 40 | * |
40 | * 0 invalid | 41 | * 0 invalid |
41 | * 1 kernel | 42 | * 1 kernel |
@@ -49,6 +50,12 @@ extern unsigned long asid_cache; | |||
49 | #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) | 50 | #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) |
50 | #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) | 51 | #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) |
51 | 52 | ||
53 | #ifdef CONFIG_MMU | ||
54 | void init_mmu(void); | ||
55 | #else | ||
56 | static inline void init_mmu(void) { } | ||
57 | #endif | ||
58 | |||
52 | static inline void set_rasid_register (unsigned long val) | 59 | static inline void set_rasid_register (unsigned long val) |
53 | { | 60 | { |
54 | __asm__ __volatile__ (" wsr %0, rasid\n\t" | 61 | __asm__ __volatile__ (" wsr %0, rasid\n\t" |
@@ -62,64 +69,77 @@ static inline unsigned long get_rasid_register (void) | |||
62 | return tmp; | 69 | return tmp; |
63 | } | 70 | } |
64 | 71 | ||
65 | static inline void | 72 | static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) |
66 | __get_new_mmu_context(struct mm_struct *mm) | ||
67 | { | 73 | { |
68 | extern void flush_tlb_all(void); | 74 | unsigned long asid = cpu_asid_cache(cpu); |
69 | if (! (++asid_cache & ASID_MASK) ) { | 75 | if ((++asid & ASID_MASK) == 0) { |
70 | flush_tlb_all(); /* start new asid cycle */ | 76 | /* |
71 | asid_cache += ASID_USER_FIRST; | 77 | * Start new asid cycle; continue counting with next |
78 | * incarnation bits; skipping over 0, 1, 2, 3. | ||
79 | */ | ||
80 | local_flush_tlb_all(); | ||
81 | asid += ASID_USER_FIRST; | ||
72 | } | 82 | } |
73 | mm->context = asid_cache; | 83 | cpu_asid_cache(cpu) = asid; |
84 | mm->context.asid[cpu] = asid; | ||
85 | mm->context.cpu = cpu; | ||
74 | } | 86 | } |
75 | 87 | ||
76 | static inline void | 88 | static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) |
77 | __load_mmu_context(struct mm_struct *mm) | ||
78 | { | 89 | { |
79 | set_rasid_register(ASID_INSERT(mm->context)); | 90 | /* |
91 | * Check if our ASID is of an older version and thus invalid. | ||
92 | */ | ||
93 | |||
94 | if (mm) { | ||
95 | unsigned long asid = mm->context.asid[cpu]; | ||
96 | |||
97 | if (asid == NO_CONTEXT || | ||
98 | ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK)) | ||
99 | get_new_mmu_context(mm, cpu); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | static inline void activate_context(struct mm_struct *mm, unsigned int cpu) | ||
104 | { | ||
105 | get_mmu_context(mm, cpu); | ||
106 | set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); | ||
80 | invalidate_page_directory(); | 107 | invalidate_page_directory(); |
81 | } | 108 | } |
82 | 109 | ||
83 | /* | 110 | /* |
84 | * Initialize the context related info for a new mm_struct | 111 | * Initialize the context related info for a new mm_struct |
85 | * instance. | 112 | * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing |
113 | * to -1 says the process has never run on any core. | ||
86 | */ | 114 | */ |
87 | 115 | ||
88 | static inline int | 116 | static inline int init_new_context(struct task_struct *tsk, |
89 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 117 | struct mm_struct *mm) |
90 | { | 118 | { |
91 | mm->context = NO_CONTEXT; | 119 | int cpu; |
120 | for_each_possible_cpu(cpu) { | ||
121 | mm->context.asid[cpu] = NO_CONTEXT; | ||
122 | } | ||
123 | mm->context.cpu = -1; | ||
92 | return 0; | 124 | return 0; |
93 | } | 125 | } |
94 | 126 | ||
95 | /* | ||
96 | * After we have set current->mm to a new value, this activates | ||
97 | * the context for the new mm so we see the new mappings. | ||
98 | */ | ||
99 | static inline void | ||
100 | activate_mm(struct mm_struct *prev, struct mm_struct *next) | ||
101 | { | ||
102 | /* Unconditionally get a new ASID. */ | ||
103 | |||
104 | __get_new_mmu_context(next); | ||
105 | __load_mmu_context(next); | ||
106 | } | ||
107 | |||
108 | |||
109 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 127 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
110 | struct task_struct *tsk) | 128 | struct task_struct *tsk) |
111 | { | 129 | { |
112 | unsigned long asid = asid_cache; | 130 | unsigned int cpu = smp_processor_id(); |
113 | 131 | int migrated = next->context.cpu != cpu; | |
114 | /* Check if our ASID is of an older version and thus invalid */ | 132 | /* Flush the icache if we migrated to a new core. */ |
115 | 133 | if (migrated) { | |
116 | if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK)) | 134 | __invalidate_icache_all(); |
117 | __get_new_mmu_context(next); | 135 | next->context.cpu = cpu; |
118 | 136 | } | |
119 | __load_mmu_context(next); | 137 | if (migrated || prev != next) |
138 | activate_context(next, cpu); | ||
120 | } | 139 | } |
121 | 140 | ||
122 | #define deactivate_mm(tsk, mm) do { } while(0) | 141 | #define activate_mm(prev, next) switch_mm((prev), (next), NULL) |
142 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
123 | 143 | ||
124 | /* | 144 | /* |
125 | * Destroy context related info for an mm_struct that is about | 145 | * Destroy context related info for an mm_struct that is about |
diff --git a/arch/xtensa/include/asm/mxregs.h b/arch/xtensa/include/asm/mxregs.h new file mode 100644 index 000000000000..73dcc5456f68 --- /dev/null +++ b/arch/xtensa/include/asm/mxregs.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * Xtensa MX interrupt distributor | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2008 - 2013 Tensilica Inc. | ||
9 | */ | ||
10 | |||
11 | #ifndef _XTENSA_MXREGS_H | ||
12 | #define _XTENSA_MXREGS_H | ||
13 | |||
14 | /* | ||
15 | * RER/WER at, as Read/write external register | ||
16 | * at: value | ||
17 | * as: address | ||
18 | * | ||
19 | * Address Value | ||
20 | * 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p | ||
21 | * 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p | ||
22 | * 0180 0...0m..m Clear enable specified by mask (m) | ||
23 | * 0184 0...0m..m Set enable specified by mask (m) | ||
24 | * 0190 0...0x..x 8-bit IPI partition register | ||
25 | * VVVVVVVVPPPPUUUUUUUUUUUUUUUUU | ||
26 | * V (10-bit) Release/Version | ||
27 | * P ( 4-bit) Number of cores - 1 | ||
28 | * U (18-bit) ID | ||
29 | * 01a0 i.......i 32-bit ConfigID | ||
30 | * 0200 0...0m..m RunStall core 'n' | ||
31 | * 0220 c Cache coherency enabled | ||
32 | */ | ||
33 | |||
34 | #define MIROUT(irq) (0x000 + (irq)) | ||
35 | #define MIPICAUSE(cpu) (0x100 + (cpu)) | ||
36 | #define MIPISET(cause) (0x140 + (cause)) | ||
37 | #define MIENG 0x180 | ||
38 | #define MIENGSET 0x184 | ||
39 | #define MIASG 0x188 /* Read Global Assert Register */ | ||
40 | #define MIASGSET 0x18c /* Set Global Addert Regiter */ | ||
41 | #define MIPIPART 0x190 | ||
42 | #define SYSCFGID 0x1a0 | ||
43 | #define MPSCORE 0x200 | ||
44 | #define CCON 0x220 | ||
45 | |||
46 | #endif /* _XTENSA_MXREGS_H */ | ||
diff --git a/arch/xtensa/include/asm/perf_event.h b/arch/xtensa/include/asm/perf_event.h new file mode 100644 index 000000000000..5aa4590acaae --- /dev/null +++ b/arch/xtensa/include/asm/perf_event.h | |||
@@ -0,0 +1,4 @@ | |||
1 | #ifndef __ASM_XTENSA_PERF_EVENT_H | ||
2 | #define __ASM_XTENSA_PERF_EVENT_H | ||
3 | |||
4 | #endif /* __ASM_XTENSA_PERF_EVENT_H */ | ||
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 7e409a5b0ec5..abb59708a3b7 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h | |||
@@ -191,5 +191,25 @@ extern unsigned long get_wchan(struct task_struct *p); | |||
191 | #define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);}) | 191 | #define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);}) |
192 | #define get_sr(sr) ({unsigned int v; RSR(v,sr); v; }) | 192 | #define get_sr(sr) ({unsigned int v; RSR(v,sr); v; }) |
193 | 193 | ||
194 | #ifndef XCHAL_HAVE_EXTERN_REGS | ||
195 | #define XCHAL_HAVE_EXTERN_REGS 0 | ||
196 | #endif | ||
197 | |||
198 | #if XCHAL_HAVE_EXTERN_REGS | ||
199 | |||
200 | static inline void set_er(unsigned long value, unsigned long addr) | ||
201 | { | ||
202 | asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory"); | ||
203 | } | ||
204 | |||
205 | static inline unsigned long get_er(unsigned long addr) | ||
206 | { | ||
207 | register unsigned long value; | ||
208 | asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory"); | ||
209 | return value; | ||
210 | } | ||
211 | |||
212 | #endif /* XCHAL_HAVE_EXTERN_REGS */ | ||
213 | |||
194 | #endif /* __ASSEMBLY__ */ | 214 | #endif /* __ASSEMBLY__ */ |
195 | #endif /* _XTENSA_PROCESSOR_H */ | 215 | #endif /* _XTENSA_PROCESSOR_H */ |
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index 81f31bc9dde0..598e752dcbcd 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h | |||
@@ -59,9 +59,17 @@ struct pt_regs { | |||
59 | (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) | 59 | (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) |
60 | # define user_mode(regs) (((regs)->ps & 0x00000020)!=0) | 60 | # define user_mode(regs) (((regs)->ps & 0x00000020)!=0) |
61 | # define instruction_pointer(regs) ((regs)->pc) | 61 | # define instruction_pointer(regs) ((regs)->pc) |
62 | # define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \ | ||
63 | (regs)->areg[1])) | ||
62 | 64 | ||
63 | # ifndef CONFIG_SMP | 65 | # ifndef CONFIG_SMP |
64 | # define profile_pc(regs) instruction_pointer(regs) | 66 | # define profile_pc(regs) instruction_pointer(regs) |
67 | # else | ||
68 | # define profile_pc(regs) \ | ||
69 | ({ \ | ||
70 | in_lock_functions(instruction_pointer(regs)) ? \ | ||
71 | return_pointer(regs) : instruction_pointer(regs); \ | ||
72 | }) | ||
65 | # endif | 73 | # endif |
66 | 74 | ||
67 | #define user_stack_pointer(regs) ((regs)->areg[1]) | 75 | #define user_stack_pointer(regs) ((regs)->areg[1]) |
diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h index 83c569e3bdbd..4e43f5643891 100644 --- a/arch/xtensa/include/asm/smp.h +++ b/arch/xtensa/include/asm/smp.h | |||
@@ -1,27 +1,43 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/smp.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 4 | * for more details. |
7 | * | 5 | * |
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 6 | * Copyright (C) 2001 - 2013 Tensilica Inc. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | #ifndef _XTENSA_SMP_H | 9 | #ifndef _XTENSA_SMP_H |
12 | #define _XTENSA_SMP_H | 10 | #define _XTENSA_SMP_H |
13 | 11 | ||
14 | extern struct xtensa_cpuinfo boot_cpu_data; | 12 | #ifdef CONFIG_SMP |
15 | 13 | ||
16 | #define cpu_data (&boot_cpu_data) | 14 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
17 | #define current_cpu_data boot_cpu_data | 15 | #define cpu_logical_map(cpu) (cpu) |
18 | 16 | ||
19 | struct xtensa_cpuinfo { | 17 | struct start_info { |
20 | unsigned long *pgd_cache; | 18 | unsigned long stack; |
21 | unsigned long *pte_cache; | ||
22 | unsigned long pgtable_cache_sz; | ||
23 | }; | 19 | }; |
20 | extern struct start_info start_info; | ||
24 | 21 | ||
25 | #define cpu_logical_map(cpu) (cpu) | 22 | struct cpumask; |
23 | void arch_send_call_function_ipi_mask(const struct cpumask *mask); | ||
24 | void arch_send_call_function_single_ipi(int cpu); | ||
25 | |||
26 | void smp_init_cpus(void); | ||
27 | void secondary_init_irq(void); | ||
28 | void ipi_init(void); | ||
29 | struct seq_file; | ||
30 | void show_ipi_list(struct seq_file *p, int prec); | ||
31 | |||
32 | #ifdef CONFIG_HOTPLUG_CPU | ||
33 | |||
34 | void __cpu_die(unsigned int cpu); | ||
35 | int __cpu_disable(void); | ||
36 | void cpu_die(void); | ||
37 | void cpu_restart(void); | ||
38 | |||
39 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
40 | |||
41 | #endif /* CONFIG_SMP */ | ||
26 | 42 | ||
27 | #endif /* _XTENSA_SMP_H */ | 43 | #endif /* _XTENSA_SMP_H */ |
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index 03975906b36f..1d95fa5dcd10 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h | |||
@@ -28,13 +28,13 @@ | |||
28 | * 1 somebody owns the spinlock | 28 | * 1 somebody owns the spinlock |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #define __raw_spin_is_locked(x) ((x)->slock != 0) | 31 | #define arch_spin_is_locked(x) ((x)->slock != 0) |
32 | #define __raw_spin_unlock_wait(lock) \ | 32 | #define arch_spin_unlock_wait(lock) \ |
33 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 33 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
34 | 34 | ||
35 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 35 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
36 | 36 | ||
37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 37 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
38 | { | 38 | { |
39 | unsigned long tmp; | 39 | unsigned long tmp; |
40 | 40 | ||
@@ -51,7 +51,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
51 | 51 | ||
52 | /* Returns 1 if the lock is obtained, 0 otherwise. */ | 52 | /* Returns 1 if the lock is obtained, 0 otherwise. */ |
53 | 53 | ||
54 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 54 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
55 | { | 55 | { |
56 | unsigned long tmp; | 56 | unsigned long tmp; |
57 | 57 | ||
@@ -67,7 +67,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
67 | return tmp == 0 ? 1 : 0; | 67 | return tmp == 0 ? 1 : 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 70 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
71 | { | 71 | { |
72 | unsigned long tmp; | 72 | unsigned long tmp; |
73 | 73 | ||
@@ -96,9 +96,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
96 | * 0x80000000 one writer owns the rwlock, no other writers, no readers | 96 | * 0x80000000 one writer owns the rwlock, no other writers, no readers |
97 | */ | 97 | */ |
98 | 98 | ||
99 | #define __raw_write_can_lock(x) ((x)->lock == 0) | 99 | #define arch_write_can_lock(x) ((x)->lock == 0) |
100 | 100 | ||
101 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 101 | static inline void arch_write_lock(arch_rwlock_t *rw) |
102 | { | 102 | { |
103 | unsigned long tmp; | 103 | unsigned long tmp; |
104 | 104 | ||
@@ -116,7 +116,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
116 | 116 | ||
117 | /* Returns 1 if the lock is obtained, 0 otherwise. */ | 117 | /* Returns 1 if the lock is obtained, 0 otherwise. */ |
118 | 118 | ||
119 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 119 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
120 | { | 120 | { |
121 | unsigned long tmp; | 121 | unsigned long tmp; |
122 | 122 | ||
@@ -133,7 +133,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
133 | return tmp == 0 ? 1 : 0; | 133 | return tmp == 0 ? 1 : 0; |
134 | } | 134 | } |
135 | 135 | ||
136 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 136 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
137 | { | 137 | { |
138 | unsigned long tmp; | 138 | unsigned long tmp; |
139 | 139 | ||
@@ -145,7 +145,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
145 | : "memory"); | 145 | : "memory"); |
146 | } | 146 | } |
147 | 147 | ||
148 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 148 | static inline void arch_read_lock(arch_rwlock_t *rw) |
149 | { | 149 | { |
150 | unsigned long tmp; | 150 | unsigned long tmp; |
151 | unsigned long result; | 151 | unsigned long result; |
@@ -164,7 +164,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
164 | 164 | ||
165 | /* Returns 1 if the lock is obtained, 0 otherwise. */ | 165 | /* Returns 1 if the lock is obtained, 0 otherwise. */ |
166 | 166 | ||
167 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 167 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
168 | { | 168 | { |
169 | unsigned long result; | 169 | unsigned long result; |
170 | unsigned long tmp; | 170 | unsigned long tmp; |
@@ -184,7 +184,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
184 | return result == 0; | 184 | return result == 0; |
185 | } | 185 | } |
186 | 186 | ||
187 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 187 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
188 | { | 188 | { |
189 | unsigned long tmp1, tmp2; | 189 | unsigned long tmp1, tmp2; |
190 | 190 | ||
@@ -199,4 +199,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
199 | : "memory"); | 199 | : "memory"); |
200 | } | 200 | } |
201 | 201 | ||
202 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
203 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
204 | |||
202 | #endif /* _XTENSA_SPINLOCK_H */ | 205 | #endif /* _XTENSA_SPINLOCK_H */ |
diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h new file mode 100644 index 000000000000..7ec5ce10c9e9 --- /dev/null +++ b/arch/xtensa/include/asm/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int slock; | ||
10 | } arch_spinlock_t; | ||
11 | |||
12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } arch_rwlock_t; | ||
17 | |||
18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h index 27fa3c170662..ca929e6a38b5 100644 --- a/arch/xtensa/include/asm/timex.h +++ b/arch/xtensa/include/asm/timex.h | |||
@@ -1,18 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/timex.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 4 | * for more details. |
7 | * | 5 | * |
8 | * Copyright (C) 2001 - 2008 Tensilica Inc. | 6 | * Copyright (C) 2001 - 2013 Tensilica Inc. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | #ifndef _XTENSA_TIMEX_H | 9 | #ifndef _XTENSA_TIMEX_H |
12 | #define _XTENSA_TIMEX_H | 10 | #define _XTENSA_TIMEX_H |
13 | 11 | ||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
17 | #include <linux/stringify.h> | 13 | #include <linux/stringify.h> |
18 | 14 | ||
@@ -39,14 +35,9 @@ extern unsigned long ccount_freq; | |||
39 | 35 | ||
40 | typedef unsigned long long cycles_t; | 36 | typedef unsigned long long cycles_t; |
41 | 37 | ||
42 | /* | ||
43 | * Only used for SMP. | ||
44 | */ | ||
45 | |||
46 | extern cycles_t cacheflush_time; | ||
47 | |||
48 | #define get_cycles() (0) | 38 | #define get_cycles() (0) |
49 | 39 | ||
40 | void local_timer_setup(unsigned cpu); | ||
50 | 41 | ||
51 | /* | 42 | /* |
52 | * Register access. | 43 | * Register access. |
@@ -81,5 +72,4 @@ static inline void set_linux_timer (unsigned long ccompare) | |||
81 | WSR_CCOMPARE(LINUX_TIMER, ccompare); | 72 | WSR_CCOMPARE(LINUX_TIMER, ccompare); |
82 | } | 73 | } |
83 | 74 | ||
84 | #endif /* __KERNEL__ */ | ||
85 | #endif /* _XTENSA_TIMEX_H */ | 75 | #endif /* _XTENSA_TIMEX_H */ |
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h index 43dd348a5a47..fc34274ce41b 100644 --- a/arch/xtensa/include/asm/tlbflush.h +++ b/arch/xtensa/include/asm/tlbflush.h | |||
@@ -1,18 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/tlbflush.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 4 | * for more details. |
7 | * | 5 | * |
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 6 | * Copyright (C) 2001 - 2013 Tensilica Inc. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | #ifndef _XTENSA_TLBFLUSH_H | 9 | #ifndef _XTENSA_TLBFLUSH_H |
12 | #define _XTENSA_TLBFLUSH_H | 10 | #define _XTENSA_TLBFLUSH_H |
13 | 11 | ||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | #include <linux/stringify.h> | 12 | #include <linux/stringify.h> |
17 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
18 | 14 | ||
@@ -34,12 +30,37 @@ | |||
34 | * - flush_tlb_range(mm, start, end) flushes a range of pages | 30 | * - flush_tlb_range(mm, start, end) flushes a range of pages |
35 | */ | 31 | */ |
36 | 32 | ||
37 | extern void flush_tlb_all(void); | 33 | void local_flush_tlb_all(void); |
38 | extern void flush_tlb_mm(struct mm_struct*); | 34 | void local_flush_tlb_mm(struct mm_struct *mm); |
39 | extern void flush_tlb_page(struct vm_area_struct*,unsigned long); | 35 | void local_flush_tlb_page(struct vm_area_struct *vma, |
40 | extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long); | 36 | unsigned long page); |
37 | void local_flush_tlb_range(struct vm_area_struct *vma, | ||
38 | unsigned long start, unsigned long end); | ||
39 | |||
40 | #ifdef CONFIG_SMP | ||
41 | |||
42 | void flush_tlb_all(void); | ||
43 | void flush_tlb_mm(struct mm_struct *); | ||
44 | void flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
45 | void flush_tlb_range(struct vm_area_struct *, unsigned long, | ||
46 | unsigned long); | ||
47 | |||
48 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
49 | unsigned long end) | ||
50 | { | ||
51 | flush_tlb_all(); | ||
52 | } | ||
53 | |||
54 | #else /* !CONFIG_SMP */ | ||
55 | |||
56 | #define flush_tlb_all() local_flush_tlb_all() | ||
57 | #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) | ||
58 | #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) | ||
59 | #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \ | ||
60 | end) | ||
61 | #define flush_tlb_kernel_range(start, end) local_flush_tlb_all() | ||
41 | 62 | ||
42 | #define flush_tlb_kernel_range(start,end) flush_tlb_all() | 63 | #endif /* CONFIG_SMP */ |
43 | 64 | ||
44 | /* TLB operations. */ | 65 | /* TLB operations. */ |
45 | 66 | ||
@@ -187,5 +208,4 @@ static inline unsigned long read_itlb_translation (int way) | |||
187 | } | 208 | } |
188 | 209 | ||
189 | #endif /* __ASSEMBLY__ */ | 210 | #endif /* __ASSEMBLY__ */ |
190 | #endif /* __KERNEL__ */ | ||
191 | #endif /* _XTENSA_TLBFLUSH_H */ | 211 | #endif /* _XTENSA_TLBFLUSH_H */ |
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index 917488a0ab00..8c194f6af45e 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h | |||
@@ -19,6 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | extern void * __init trap_set_handler(int cause, void *handler); | 20 | extern void * __init trap_set_handler(int cause, void *handler); |
21 | extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); | 21 | extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); |
22 | void secondary_trap_init(void); | ||
22 | 23 | ||
23 | static inline void spill_registers(void) | 24 | static inline void spill_registers(void) |
24 | { | 25 | { |
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index c52b656d0310..5791b45d5a5d 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h | |||
@@ -20,6 +20,17 @@ | |||
20 | 20 | ||
21 | #include <variant/core.h> | 21 | #include <variant/core.h> |
22 | 22 | ||
23 | #define XCHAL_KIO_CACHED_VADDR 0xe0000000 | ||
24 | #define XCHAL_KIO_BYPASS_VADDR 0xf0000000 | ||
25 | #define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 | ||
26 | #define XCHAL_KIO_SIZE 0x10000000 | ||
27 | |||
28 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF | ||
29 | #define XCHAL_KIO_PADDR xtensa_get_kio_paddr() | ||
30 | #else | ||
31 | #define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR | ||
32 | #endif | ||
33 | |||
23 | #if defined(CONFIG_MMU) | 34 | #if defined(CONFIG_MMU) |
24 | 35 | ||
25 | /* Will Become VECBASE */ | 36 | /* Will Become VECBASE */ |
@@ -30,11 +41,9 @@ | |||
30 | 41 | ||
31 | #if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY | 42 | #if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY |
32 | /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ | 43 | /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ |
33 | #define PHYSICAL_MEMORY_ADDRESS 0x00000000 | ||
34 | #define LOAD_MEMORY_ADDRESS 0x00003000 | 44 | #define LOAD_MEMORY_ADDRESS 0x00003000 |
35 | #else | 45 | #else |
36 | /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ | 46 | /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ |
37 | #define PHYSICAL_MEMORY_ADDRESS 0xD0000000 | ||
38 | #define LOAD_MEMORY_ADDRESS 0xD0003000 | 47 | #define LOAD_MEMORY_ADDRESS 0xD0003000 |
39 | #endif | 48 | #endif |
40 | 49 | ||
@@ -46,7 +55,6 @@ | |||
46 | 55 | ||
47 | /* Location of the start of the kernel text, _start */ | 56 | /* Location of the start of the kernel text, _start */ |
48 | #define KERNELOFFSET 0x00003000 | 57 | #define KERNELOFFSET 0x00003000 |
49 | #define PHYSICAL_MEMORY_ADDRESS 0x00000000 | ||
50 | 58 | ||
51 | /* Loaded just above possibly live vectors */ | 59 | /* Loaded just above possibly live vectors */ |
52 | #define LOAD_MEMORY_ADDRESS 0x00003000 | 60 | #define LOAD_MEMORY_ADDRESS 0x00003000 |
@@ -54,7 +62,6 @@ | |||
54 | #endif /* CONFIG_MMU */ | 62 | #endif /* CONFIG_MMU */ |
55 | 63 | ||
56 | #define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) | 64 | #define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) |
57 | #define XC_PADDR(offset) (PHYSICAL_MEMORY_ADDRESS + offset) | ||
58 | 65 | ||
59 | /* Used to set VECBASE register */ | 66 | /* Used to set VECBASE register */ |
60 | #define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS | 67 | #define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS |
@@ -67,7 +74,7 @@ | |||
67 | VECBASE_RESET_VADDR) | 74 | VECBASE_RESET_VADDR) |
68 | #define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS) | 75 | #define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS) |
69 | 76 | ||
70 | #if XCHAL_HAVE_VECBASE | 77 | #if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE |
71 | 78 | ||
72 | #define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) | 79 | #define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) |
73 | #define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS) | 80 | #define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS) |
@@ -81,11 +88,9 @@ | |||
81 | 88 | ||
82 | #define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS) | 89 | #define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS) |
83 | 90 | ||
84 | #undef XCHAL_NMI_VECTOR_VADDR | 91 | #define NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS) |
85 | #define XCHAL_NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS) | ||
86 | 92 | ||
87 | #undef XCHAL_INTLEVEL7_VECTOR_VADDR | 93 | #define INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS) |
88 | #define XCHAL_INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS) | ||
89 | 94 | ||
90 | /* | 95 | /* |
91 | * These XCHAL_* #defines from varian/core.h | 96 | * These XCHAL_* #defines from varian/core.h |