diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-25 13:49:30 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-25 13:49:30 -0500 |
commit | 9b83d851a2bdd021e2135999e5bce3eb8fef94e6 (patch) | |
tree | b8703e813b1d8d66fb262cd757f54f3c05966d50 /arch | |
parent | 2d08cd0ef89a24f5eb6c6801c48cd06bca230d6d (diff) | |
parent | 9ed82c6866e2ab671935a75ea454047e8bddb177 (diff) |
Merge tag 'xtensa-next-20140123' of git://github.com/czankel/xtensa-linux
Pull Xtensa patches from Chris Zankel:
"The major changes are adding support for SMP for Xtensa, fixing and
cleaning up the ISS (simulator) network driver, and better support for
device trees"
* tag 'xtensa-next-20140123' of git://github.com/czankel/xtensa-linux: (40 commits)
xtensa: implement ndelay
xtensa: clean up udelay
xtensa: enable HAVE_PERF_EVENTS
xtensa: remap io area defined in device tree
xtensa: support default device tree buses
xtensa: initialize device tree clock sources
xtensa: xtfpga: fix definitions of platform devices
xtensa: standardize devicetree cpu compatible strings
xtensa: avoid duplicate of IO range definitions
xtensa: fix ATOMCTL register documentation
xtensa: Enable irqs after cpu is set online
xtensa: ISS: raise network polling rate to 10 times/sec
xtensa: remove unused XTENSA_ISS_NETWORK Kconfig parameter
xtensa: ISS: avoid simple_strtoul usage
xtensa: Switch to sched_clock_register()
xtensa: implement CPU hotplug
xtensa: add SMP support
xtensa: add MX irqchip
xtensa: clear timer IRQ unconditionally in its handler
xtensa: clean up do_interrupt/do_IRQ
...
Diffstat (limited to 'arch')
45 files changed, 1813 insertions, 636 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index f8df0cc70cb6..ba56e11cbf77 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig | |||
@@ -9,7 +9,6 @@ config XTENSA | |||
9 | select GENERIC_CLOCKEVENTS | 9 | select GENERIC_CLOCKEVENTS |
10 | select VIRT_TO_BUS | 10 | select VIRT_TO_BUS |
11 | select GENERIC_IRQ_SHOW | 11 | select GENERIC_IRQ_SHOW |
12 | select GENERIC_CPU_DEVICES | ||
13 | select GENERIC_SCHED_CLOCK | 12 | select GENERIC_SCHED_CLOCK |
14 | select MODULES_USE_ELF_RELA | 13 | select MODULES_USE_ELF_RELA |
15 | select GENERIC_PCI_IOMAP | 14 | select GENERIC_PCI_IOMAP |
@@ -19,6 +18,8 @@ config XTENSA | |||
19 | select IRQ_DOMAIN | 18 | select IRQ_DOMAIN |
20 | select HAVE_OPROFILE | 19 | select HAVE_OPROFILE |
21 | select HAVE_FUNCTION_TRACER | 20 | select HAVE_FUNCTION_TRACER |
21 | select HAVE_IRQ_TIME_ACCOUNTING | ||
22 | select HAVE_PERF_EVENTS | ||
22 | help | 23 | help |
23 | Xtensa processors are 32-bit RISC machines designed by Tensilica | 24 | Xtensa processors are 32-bit RISC machines designed by Tensilica |
24 | primarily for embedded systems. These processors are both | 25 | primarily for embedded systems. These processors are both |
@@ -67,6 +68,9 @@ config VARIANT_IRQ_SWITCH | |||
67 | config HAVE_XTENSA_GPIO32 | 68 | config HAVE_XTENSA_GPIO32 |
68 | def_bool n | 69 | def_bool n |
69 | 70 | ||
71 | config MAY_HAVE_SMP | ||
72 | def_bool n | ||
73 | |||
70 | menu "Processor type and features" | 74 | menu "Processor type and features" |
71 | 75 | ||
72 | choice | 76 | choice |
@@ -110,6 +114,48 @@ config XTENSA_UNALIGNED_USER | |||
110 | 114 | ||
111 | source "kernel/Kconfig.preempt" | 115 | source "kernel/Kconfig.preempt" |
112 | 116 | ||
117 | config HAVE_SMP | ||
118 | bool "System Supports SMP (MX)" | ||
119 | depends on MAY_HAVE_SMP | ||
120 | select XTENSA_MX | ||
121 | help | ||
122 | This option is use to indicate that the system-on-a-chip (SOC) | ||
123 | supports Multiprocessing. Multiprocessor support implemented above | ||
124 | the CPU core definition and currently needs to be selected manually. | ||
125 | |||
126 | Multiprocessor support in implemented with external cache and | ||
127 | interrupt controlers. | ||
128 | |||
129 | The MX interrupt distributer adds Interprocessor Interrupts | ||
130 | and causes the IRQ numbers to be increased by 4 for devices | ||
131 | like the open cores ethernet driver and the serial interface. | ||
132 | |||
133 | You still have to select "Enable SMP" to enable SMP on this SOC. | ||
134 | |||
135 | config SMP | ||
136 | bool "Enable Symmetric multi-processing support" | ||
137 | depends on HAVE_SMP | ||
138 | select USE_GENERIC_SMP_HELPERS | ||
139 | select GENERIC_SMP_IDLE_THREAD | ||
140 | help | ||
141 | Enabled SMP Software; allows more than one CPU/CORE | ||
142 | to be activated during startup. | ||
143 | |||
144 | config NR_CPUS | ||
145 | depends on SMP | ||
146 | int "Maximum number of CPUs (2-32)" | ||
147 | range 2 32 | ||
148 | default "4" | ||
149 | |||
150 | config HOTPLUG_CPU | ||
151 | bool "Enable CPU hotplug support" | ||
152 | depends on SMP | ||
153 | help | ||
154 | Say Y here to allow turning CPUs off and on. CPUs can be | ||
155 | controlled through /sys/devices/system/cpu. | ||
156 | |||
157 | Say N if you want to disable CPU hotplug. | ||
158 | |||
113 | config MATH_EMULATION | 159 | config MATH_EMULATION |
114 | bool "Math emulation" | 160 | bool "Math emulation" |
115 | help | 161 | help |
@@ -156,9 +202,6 @@ config XTENSA_CALIBRATE_CCOUNT | |||
156 | config SERIAL_CONSOLE | 202 | config SERIAL_CONSOLE |
157 | def_bool n | 203 | def_bool n |
158 | 204 | ||
159 | config XTENSA_ISS_NETWORK | ||
160 | def_bool n | ||
161 | |||
162 | menu "Bus options" | 205 | menu "Bus options" |
163 | 206 | ||
164 | config PCI | 207 | config PCI |
@@ -185,7 +228,6 @@ config XTENSA_PLATFORM_ISS | |||
185 | depends on TTY | 228 | depends on TTY |
186 | select XTENSA_CALIBRATE_CCOUNT | 229 | select XTENSA_CALIBRATE_CCOUNT |
187 | select SERIAL_CONSOLE | 230 | select SERIAL_CONSOLE |
188 | select XTENSA_ISS_NETWORK | ||
189 | help | 231 | help |
190 | ISS is an acronym for Tensilica's Instruction Set Simulator. | 232 | ISS is an acronym for Tensilica's Instruction Set Simulator. |
191 | 233 | ||
diff --git a/arch/xtensa/boot/dts/lx60.dts b/arch/xtensa/boot/dts/lx60.dts index 2eab3658e1bd..a0f8b8ad3920 100644 --- a/arch/xtensa/boot/dts/lx60.dts +++ b/arch/xtensa/boot/dts/lx60.dts | |||
@@ -3,7 +3,7 @@ | |||
3 | /include/ "xtfpga-flash-4m.dtsi" | 3 | /include/ "xtfpga-flash-4m.dtsi" |
4 | 4 | ||
5 | / { | 5 | / { |
6 | compatible = "xtensa,lx60"; | 6 | compatible = "cdns,xtensa-lx60"; |
7 | memory@0 { | 7 | memory@0 { |
8 | device_type = "memory"; | 8 | device_type = "memory"; |
9 | reg = <0x00000000 0x04000000>; | 9 | reg = <0x00000000 0x04000000>; |
diff --git a/arch/xtensa/boot/dts/ml605.dts b/arch/xtensa/boot/dts/ml605.dts index 6ed51d6554e6..905c3a5035e9 100644 --- a/arch/xtensa/boot/dts/ml605.dts +++ b/arch/xtensa/boot/dts/ml605.dts | |||
@@ -3,7 +3,7 @@ | |||
3 | /include/ "xtfpga-flash-16m.dtsi" | 3 | /include/ "xtfpga-flash-16m.dtsi" |
4 | 4 | ||
5 | / { | 5 | / { |
6 | compatible = "xtensa,ml605"; | 6 | compatible = "cdns,xtensa-ml605"; |
7 | memory@0 { | 7 | memory@0 { |
8 | device_type = "memory"; | 8 | device_type = "memory"; |
9 | reg = <0x00000000 0x08000000>; | 9 | reg = <0x00000000 0x08000000>; |
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi index 7eda6ecf7eef..46b4f5eab421 100644 --- a/arch/xtensa/boot/dts/xtfpga.dtsi +++ b/arch/xtensa/boot/dts/xtfpga.dtsi | |||
@@ -1,5 +1,5 @@ | |||
1 | / { | 1 | / { |
2 | compatible = "xtensa,xtfpga"; | 2 | compatible = "cdns,xtensa-xtfpga"; |
3 | #address-cells = <1>; | 3 | #address-cells = <1>; |
4 | #size-cells = <1>; | 4 | #size-cells = <1>; |
5 | interrupt-parent = <&pic>; | 5 | interrupt-parent = <&pic>; |
@@ -17,7 +17,7 @@ | |||
17 | #address-cells = <1>; | 17 | #address-cells = <1>; |
18 | #size-cells = <0>; | 18 | #size-cells = <0>; |
19 | cpu@0 { | 19 | cpu@0 { |
20 | compatible = "xtensa,cpu"; | 20 | compatible = "cdns,xtensa-cpu"; |
21 | reg = <0>; | 21 | reg = <0>; |
22 | /* Filled in by platform_setup from FPGA register | 22 | /* Filled in by platform_setup from FPGA register |
23 | * clock-frequency = <100000000>; | 23 | * clock-frequency = <100000000>; |
@@ -26,7 +26,7 @@ | |||
26 | }; | 26 | }; |
27 | 27 | ||
28 | pic: pic { | 28 | pic: pic { |
29 | compatible = "xtensa,pic"; | 29 | compatible = "cdns,xtensa-pic"; |
30 | /* one cell: internal irq number, | 30 | /* one cell: internal irq number, |
31 | * two cells: second cell == 0: internal irq number | 31 | * two cells: second cell == 0: internal irq number |
32 | * second cell == 1: external irq number | 32 | * second cell == 1: external irq number |
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 228d6aee3a16..5851db291583 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild | |||
@@ -8,7 +8,6 @@ generic-y += emergency-restart.h | |||
8 | generic-y += errno.h | 8 | generic-y += errno.h |
9 | generic-y += exec.h | 9 | generic-y += exec.h |
10 | generic-y += fcntl.h | 10 | generic-y += fcntl.h |
11 | generic-y += futex.h | ||
12 | generic-y += hardirq.h | 11 | generic-y += hardirq.h |
13 | generic-y += ioctl.h | 12 | generic-y += ioctl.h |
14 | generic-y += irq_regs.h | 13 | generic-y += irq_regs.h |
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index e1ee6b51dfc5..0a24b04d6b21 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h | |||
@@ -13,10 +13,6 @@ | |||
13 | #define rmb() barrier() | 13 | #define rmb() barrier() |
14 | #define wmb() mb() | 14 | #define wmb() mb() |
15 | 15 | ||
16 | #ifdef CONFIG_SMP | ||
17 | #error smp_* not defined | ||
18 | #endif | ||
19 | |||
20 | #include <asm-generic/barrier.h> | 16 | #include <asm-generic/barrier.h> |
21 | 17 | ||
22 | #endif /* _XTENSA_SYSTEM_H */ | 18 | #endif /* _XTENSA_SYSTEM_H */ |
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 84afe58d5d37..7b6873ae84c2 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h | |||
@@ -22,12 +22,8 @@ | |||
22 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
23 | #include <asm/byteorder.h> | 23 | #include <asm/byteorder.h> |
24 | 24 | ||
25 | #ifdef CONFIG_SMP | 25 | #define smp_mb__before_clear_bit() smp_mb() |
26 | # error SMP not supported on this architecture | 26 | #define smp_mb__after_clear_bit() smp_mb() |
27 | #endif | ||
28 | |||
29 | #define smp_mb__before_clear_bit() barrier() | ||
30 | #define smp_mb__after_clear_bit() barrier() | ||
31 | 27 | ||
32 | #include <asm-generic/bitops/non-atomic.h> | 28 | #include <asm-generic/bitops/non-atomic.h> |
33 | 29 | ||
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 127cd48883c4..555a98a18453 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h | |||
@@ -1,18 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/cacheflush.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 4 | * for more details. |
7 | * | 5 | * |
8 | * (C) 2001 - 2007 Tensilica Inc. | 6 | * (C) 2001 - 2013 Tensilica Inc. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | #ifndef _XTENSA_CACHEFLUSH_H | 9 | #ifndef _XTENSA_CACHEFLUSH_H |
12 | #define _XTENSA_CACHEFLUSH_H | 10 | #define _XTENSA_CACHEFLUSH_H |
13 | 11 | ||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
17 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
18 | #include <asm/page.h> | 14 | #include <asm/page.h> |
@@ -51,7 +47,6 @@ extern void __invalidate_icache_page(unsigned long); | |||
51 | extern void __invalidate_icache_range(unsigned long, unsigned long); | 47 | extern void __invalidate_icache_range(unsigned long, unsigned long); |
52 | extern void __invalidate_dcache_range(unsigned long, unsigned long); | 48 | extern void __invalidate_dcache_range(unsigned long, unsigned long); |
53 | 49 | ||
54 | |||
55 | #if XCHAL_DCACHE_IS_WRITEBACK | 50 | #if XCHAL_DCACHE_IS_WRITEBACK |
56 | extern void __flush_invalidate_dcache_all(void); | 51 | extern void __flush_invalidate_dcache_all(void); |
57 | extern void __flush_dcache_page(unsigned long); | 52 | extern void __flush_dcache_page(unsigned long); |
@@ -87,9 +82,22 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, | |||
87 | * (see also Documentation/cachetlb.txt) | 82 | * (see also Documentation/cachetlb.txt) |
88 | */ | 83 | */ |
89 | 84 | ||
90 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | 85 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP) |
86 | |||
87 | #ifdef CONFIG_SMP | ||
88 | void flush_cache_all(void); | ||
89 | void flush_cache_range(struct vm_area_struct*, ulong, ulong); | ||
90 | void flush_icache_range(unsigned long start, unsigned long end); | ||
91 | void flush_cache_page(struct vm_area_struct*, | ||
92 | unsigned long, unsigned long); | ||
93 | #else | ||
94 | #define flush_cache_all local_flush_cache_all | ||
95 | #define flush_cache_range local_flush_cache_range | ||
96 | #define flush_icache_range local_flush_icache_range | ||
97 | #define flush_cache_page local_flush_cache_page | ||
98 | #endif | ||
91 | 99 | ||
92 | #define flush_cache_all() \ | 100 | #define local_flush_cache_all() \ |
93 | do { \ | 101 | do { \ |
94 | __flush_invalidate_dcache_all(); \ | 102 | __flush_invalidate_dcache_all(); \ |
95 | __invalidate_icache_all(); \ | 103 | __invalidate_icache_all(); \ |
@@ -103,9 +111,11 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, | |||
103 | 111 | ||
104 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | 112 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
105 | extern void flush_dcache_page(struct page*); | 113 | extern void flush_dcache_page(struct page*); |
106 | extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); | 114 | |
107 | extern void flush_cache_page(struct vm_area_struct*, | 115 | void local_flush_cache_range(struct vm_area_struct *vma, |
108 | unsigned long, unsigned long); | 116 | unsigned long start, unsigned long end); |
117 | void local_flush_cache_page(struct vm_area_struct *vma, | ||
118 | unsigned long address, unsigned long pfn); | ||
109 | 119 | ||
110 | #else | 120 | #else |
111 | 121 | ||
@@ -119,13 +129,14 @@ extern void flush_cache_page(struct vm_area_struct*, | |||
119 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | 129 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 |
120 | #define flush_dcache_page(page) do { } while (0) | 130 | #define flush_dcache_page(page) do { } while (0) |
121 | 131 | ||
122 | #define flush_cache_page(vma,addr,pfn) do { } while (0) | 132 | #define flush_icache_range local_flush_icache_range |
123 | #define flush_cache_range(vma,start,end) do { } while (0) | 133 | #define flush_cache_page(vma, addr, pfn) do { } while (0) |
134 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
124 | 135 | ||
125 | #endif | 136 | #endif |
126 | 137 | ||
127 | /* Ensure consistency between data and instruction cache. */ | 138 | /* Ensure consistency between data and instruction cache. */ |
128 | #define flush_icache_range(start,end) \ | 139 | #define local_flush_icache_range(start, end) \ |
129 | do { \ | 140 | do { \ |
130 | __flush_dcache_range(start, (end) - (start)); \ | 141 | __flush_dcache_range(start, (end) - (start)); \ |
131 | __invalidate_icache_range(start,(end) - (start)); \ | 142 | __invalidate_icache_range(start,(end) - (start)); \ |
@@ -253,5 +264,4 @@ static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size) | |||
253 | } | 264 | } |
254 | } | 265 | } |
255 | 266 | ||
256 | #endif /* __KERNEL__ */ | ||
257 | #endif /* _XTENSA_CACHEFLUSH_H */ | 267 | #endif /* _XTENSA_CACHEFLUSH_H */ |
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h index 3899610c1dff..24304b39a5c7 100644 --- a/arch/xtensa/include/asm/delay.h +++ b/arch/xtensa/include/asm/delay.h | |||
@@ -19,23 +19,57 @@ extern unsigned long loops_per_jiffy; | |||
19 | 19 | ||
20 | static inline void __delay(unsigned long loops) | 20 | static inline void __delay(unsigned long loops) |
21 | { | 21 | { |
22 | /* 2 cycles per loop. */ | 22 | if (__builtin_constant_p(loops) && loops < 2) |
23 | __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" | 23 | __asm__ __volatile__ ("nop"); |
24 | : "=r" (loops) : "0" (loops)); | 24 | else if (loops >= 2) |
25 | /* 2 cycles per loop. */ | ||
26 | __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" | ||
27 | : "+r" (loops)); | ||
25 | } | 28 | } |
26 | 29 | ||
27 | /* For SMP/NUMA systems, change boot_cpu_data to something like | 30 | /* Undefined function to get compile-time error */ |
28 | * local_cpu_data->... where local_cpu_data points to the current | 31 | void __bad_udelay(void); |
29 | * cpu. */ | 32 | void __bad_ndelay(void); |
30 | 33 | ||
31 | static __inline__ void udelay (unsigned long usecs) | 34 | #define __MAX_UDELAY 30000 |
35 | #define __MAX_NDELAY 30000 | ||
36 | |||
37 | static inline void __udelay(unsigned long usecs) | ||
32 | { | 38 | { |
33 | unsigned long start = get_ccount(); | 39 | unsigned long start = get_ccount(); |
34 | unsigned long cycles = usecs * (loops_per_jiffy / (1000000UL / HZ)); | 40 | unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5; |
35 | 41 | ||
36 | /* Note: all variables are unsigned (can wrap around)! */ | 42 | /* Note: all variables are unsigned (can wrap around)! */ |
37 | while (((unsigned long)get_ccount()) - start < cycles) | 43 | while (((unsigned long)get_ccount()) - start < cycles) |
38 | ; | 44 | cpu_relax(); |
45 | } | ||
46 | |||
47 | static inline void udelay(unsigned long usec) | ||
48 | { | ||
49 | if (__builtin_constant_p(usec) && usec >= __MAX_UDELAY) | ||
50 | __bad_udelay(); | ||
51 | else | ||
52 | __udelay(usec); | ||
53 | } | ||
54 | |||
55 | static inline void __ndelay(unsigned long nsec) | ||
56 | { | ||
57 | /* | ||
58 | * Inner shift makes sure multiplication doesn't overflow | ||
59 | * for legitimate nsec values | ||
60 | */ | ||
61 | unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15; | ||
62 | __delay(cycles); | ||
63 | } | ||
64 | |||
65 | #define ndelay(n) ndelay(n) | ||
66 | |||
67 | static inline void ndelay(unsigned long nsec) | ||
68 | { | ||
69 | if (__builtin_constant_p(nsec) && nsec >= __MAX_NDELAY) | ||
70 | __bad_ndelay(); | ||
71 | else | ||
72 | __ndelay(nsec); | ||
39 | } | 73 | } |
40 | 74 | ||
41 | #endif | 75 | #endif |
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h index 73cc3f482304..736b9d214d80 100644 --- a/arch/xtensa/include/asm/ftrace.h +++ b/arch/xtensa/include/asm/ftrace.h | |||
@@ -18,7 +18,7 @@ | |||
18 | __asm__ __volatile__ ( \ | 18 | __asm__ __volatile__ ( \ |
19 | "mov %0, a0\n" \ | 19 | "mov %0, a0\n" \ |
20 | "mov %1, a1\n" \ | 20 | "mov %1, a1\n" \ |
21 | : "=r"(a0), "=r"(a1) : : ); \ | 21 | : "=r"(a0), "=r"(a1)); \ |
22 | MAKE_PC_FROM_RA(a0, a1); }) | 22 | MAKE_PC_FROM_RA(a0, a1); }) |
23 | #ifdef CONFIG_FRAME_POINTER | 23 | #ifdef CONFIG_FRAME_POINTER |
24 | extern unsigned long return_address(unsigned level); | 24 | extern unsigned long return_address(unsigned level); |
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h new file mode 100644 index 000000000000..b39531babec0 --- /dev/null +++ b/arch/xtensa/include/asm/futex.h | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * Atomic futex routines | ||
3 | * | ||
4 | * Based on the PowerPC implementataion | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Copyright (C) 2013 TangoTec Ltd. | ||
11 | * | ||
12 | * Baruch Siach <baruch@tkos.co.il> | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_XTENSA_FUTEX_H | ||
16 | #define _ASM_XTENSA_FUTEX_H | ||
17 | |||
18 | #ifdef __KERNEL__ | ||
19 | |||
20 | #include <linux/futex.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <linux/errno.h> | ||
23 | |||
24 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | ||
25 | __asm__ __volatile( \ | ||
26 | "1: l32i %0, %2, 0\n" \ | ||
27 | insn "\n" \ | ||
28 | " wsr %0, scompare1\n" \ | ||
29 | "2: s32c1i %1, %2, 0\n" \ | ||
30 | " bne %1, %0, 1b\n" \ | ||
31 | " movi %1, 0\n" \ | ||
32 | "3:\n" \ | ||
33 | " .section .fixup,\"ax\"\n" \ | ||
34 | " .align 4\n" \ | ||
35 | "4: .long 3b\n" \ | ||
36 | "5: l32r %0, 4b\n" \ | ||
37 | " movi %1, %3\n" \ | ||
38 | " jx %0\n" \ | ||
39 | " .previous\n" \ | ||
40 | " .section __ex_table,\"a\"\n" \ | ||
41 | " .long 1b,5b,2b,5b\n" \ | ||
42 | " .previous\n" \ | ||
43 | : "=&r" (oldval), "=&r" (ret) \ | ||
44 | : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \ | ||
45 | : "memory") | ||
46 | |||
47 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | ||
48 | { | ||
49 | int op = (encoded_op >> 28) & 7; | ||
50 | int cmp = (encoded_op >> 24) & 15; | ||
51 | int oparg = (encoded_op << 8) >> 20; | ||
52 | int cmparg = (encoded_op << 20) >> 20; | ||
53 | int oldval = 0, ret; | ||
54 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
55 | oparg = 1 << oparg; | ||
56 | |||
57 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | ||
58 | return -EFAULT; | ||
59 | |||
60 | #if !XCHAL_HAVE_S32C1I | ||
61 | return -ENOSYS; | ||
62 | #endif | ||
63 | |||
64 | pagefault_disable(); | ||
65 | |||
66 | switch (op) { | ||
67 | case FUTEX_OP_SET: | ||
68 | __futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg); | ||
69 | break; | ||
70 | case FUTEX_OP_ADD: | ||
71 | __futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr, | ||
72 | oparg); | ||
73 | break; | ||
74 | case FUTEX_OP_OR: | ||
75 | __futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr, | ||
76 | oparg); | ||
77 | break; | ||
78 | case FUTEX_OP_ANDN: | ||
79 | __futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr, | ||
80 | ~oparg); | ||
81 | break; | ||
82 | case FUTEX_OP_XOR: | ||
83 | __futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr, | ||
84 | oparg); | ||
85 | break; | ||
86 | default: | ||
87 | ret = -ENOSYS; | ||
88 | } | ||
89 | |||
90 | pagefault_enable(); | ||
91 | |||
92 | if (ret) | ||
93 | return ret; | ||
94 | |||
95 | switch (cmp) { | ||
96 | case FUTEX_OP_CMP_EQ: return (oldval == cmparg); | ||
97 | case FUTEX_OP_CMP_NE: return (oldval != cmparg); | ||
98 | case FUTEX_OP_CMP_LT: return (oldval < cmparg); | ||
99 | case FUTEX_OP_CMP_GE: return (oldval >= cmparg); | ||
100 | case FUTEX_OP_CMP_LE: return (oldval <= cmparg); | ||
101 | case FUTEX_OP_CMP_GT: return (oldval > cmparg); | ||
102 | } | ||
103 | |||
104 | return -ENOSYS; | ||
105 | } | ||
106 | |||
107 | static inline int | ||
108 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | ||
109 | u32 oldval, u32 newval) | ||
110 | { | ||
111 | int ret = 0; | ||
112 | u32 prev; | ||
113 | |||
114 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | ||
115 | return -EFAULT; | ||
116 | |||
117 | #if !XCHAL_HAVE_S32C1I | ||
118 | return -ENOSYS; | ||
119 | #endif | ||
120 | |||
121 | __asm__ __volatile__ ( | ||
122 | " # futex_atomic_cmpxchg_inatomic\n" | ||
123 | "1: l32i %1, %3, 0\n" | ||
124 | " mov %0, %5\n" | ||
125 | " wsr %1, scompare1\n" | ||
126 | "2: s32c1i %0, %3, 0\n" | ||
127 | "3:\n" | ||
128 | " .section .fixup,\"ax\"\n" | ||
129 | " .align 4\n" | ||
130 | "4: .long 3b\n" | ||
131 | "5: l32r %1, 4b\n" | ||
132 | " movi %0, %6\n" | ||
133 | " jx %1\n" | ||
134 | " .previous\n" | ||
135 | " .section __ex_table,\"a\"\n" | ||
136 | " .long 1b,5b,2b,5b\n" | ||
137 | " .previous\n" | ||
138 | : "+r" (ret), "=&r" (prev), "+m" (*uaddr) | ||
139 | : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) | ||
140 | : "memory"); | ||
141 | |||
142 | *uval = prev; | ||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | #endif /* __KERNEL__ */ | ||
147 | #endif /* _ASM_XTENSA_FUTEX_H */ | ||
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h index 722553f17db3..600781edc8a3 100644 --- a/arch/xtensa/include/asm/initialize_mmu.h +++ b/arch/xtensa/include/asm/initialize_mmu.h | |||
@@ -26,6 +26,9 @@ | |||
26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
27 | #include <asm/vectors.h> | 27 | #include <asm/vectors.h> |
28 | 28 | ||
29 | #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC) | ||
30 | #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC) | ||
31 | |||
29 | #ifdef __ASSEMBLY__ | 32 | #ifdef __ASSEMBLY__ |
30 | 33 | ||
31 | #define XTENSA_HWVERSION_RC_2009_0 230000 | 34 | #define XTENSA_HWVERSION_RC_2009_0 230000 |
@@ -80,8 +83,6 @@ | |||
80 | /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code | 83 | /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code |
81 | * and jump to the new mapping. | 84 | * and jump to the new mapping. |
82 | */ | 85 | */ |
83 | #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC) | ||
84 | #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC) | ||
85 | 86 | ||
86 | srli a3, a0, 27 | 87 | srli a3, a0, 27 |
87 | slli a3, a3, 27 | 88 | slli a3, a3, 27 |
@@ -123,13 +124,13 @@ | |||
123 | wdtlb a4, a5 | 124 | wdtlb a4, a5 |
124 | witlb a4, a5 | 125 | witlb a4, a5 |
125 | 126 | ||
126 | movi a5, 0xe0000006 | 127 | movi a5, XCHAL_KIO_CACHED_VADDR + 6 |
127 | movi a4, 0xf0000000 + CA_WRITEBACK | 128 | movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK |
128 | wdtlb a4, a5 | 129 | wdtlb a4, a5 |
129 | witlb a4, a5 | 130 | witlb a4, a5 |
130 | 131 | ||
131 | movi a5, 0xf0000006 | 132 | movi a5, XCHAL_KIO_BYPASS_VADDR + 6 |
132 | movi a4, 0xf0000000 + CA_BYPASS | 133 | movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS |
133 | wdtlb a4, a5 | 134 | wdtlb a4, a5 |
134 | witlb a4, a5 | 135 | witlb a4, a5 |
135 | 136 | ||
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index 700c2e6f2d25..2a042d430c25 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h | |||
@@ -14,20 +14,26 @@ | |||
14 | #ifdef __KERNEL__ | 14 | #ifdef __KERNEL__ |
15 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
16 | #include <asm/page.h> | 16 | #include <asm/page.h> |
17 | #include <asm/vectors.h> | ||
17 | #include <linux/bug.h> | 18 | #include <linux/bug.h> |
18 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
19 | 20 | ||
20 | #include <linux/types.h> | 21 | #include <linux/types.h> |
21 | 22 | ||
22 | #define XCHAL_KIO_CACHED_VADDR 0xe0000000 | ||
23 | #define XCHAL_KIO_BYPASS_VADDR 0xf0000000 | ||
24 | #define XCHAL_KIO_PADDR 0xf0000000 | ||
25 | #define XCHAL_KIO_SIZE 0x10000000 | ||
26 | |||
27 | #define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x)) | 23 | #define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x)) |
28 | #define IO_SPACE_LIMIT ~0 | 24 | #define IO_SPACE_LIMIT ~0 |
29 | 25 | ||
30 | #ifdef CONFIG_MMU | 26 | #ifdef CONFIG_MMU |
27 | |||
28 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF | ||
29 | extern unsigned long xtensa_kio_paddr; | ||
30 | |||
31 | static inline unsigned long xtensa_get_kio_paddr(void) | ||
32 | { | ||
33 | return xtensa_kio_paddr; | ||
34 | } | ||
35 | #endif | ||
36 | |||
31 | /* | 37 | /* |
32 | * Return the virtual address for the specified bus memory. | 38 | * Return the virtual address for the specified bus memory. |
33 | * Note that we currently don't support any address outside the KIO segment. | 39 | * Note that we currently don't support any address outside the KIO segment. |
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h index 4c0ccc9c4f4c..f71f88ea7646 100644 --- a/arch/xtensa/include/asm/irq.h +++ b/arch/xtensa/include/asm/irq.h | |||
@@ -43,5 +43,14 @@ static __inline__ int irq_canonicalize(int irq) | |||
43 | } | 43 | } |
44 | 44 | ||
45 | struct irqaction; | 45 | struct irqaction; |
46 | struct irq_domain; | ||
47 | |||
48 | void migrate_irqs(void); | ||
49 | int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize, | ||
50 | unsigned long int_irq, unsigned long ext_irq, | ||
51 | unsigned long *out_hwirq, unsigned int *out_type); | ||
52 | int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw); | ||
53 | unsigned xtensa_map_ext_irq(unsigned ext_irq); | ||
54 | unsigned xtensa_get_ext_irq_no(unsigned irq); | ||
46 | 55 | ||
47 | #endif /* _XTENSA_IRQ_H */ | 56 | #endif /* _XTENSA_IRQ_H */ |
diff --git a/arch/xtensa/include/asm/mmu.h b/arch/xtensa/include/asm/mmu.h index 8554b2c8b17a..71afe418d0e5 100644 --- a/arch/xtensa/include/asm/mmu.h +++ b/arch/xtensa/include/asm/mmu.h | |||
@@ -1,11 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/mmu.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 4 | * for more details. |
7 | * | 5 | * |
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 6 | * Copyright (C) 2001 - 2013 Tensilica Inc. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | #ifndef _XTENSA_MMU_H | 9 | #ifndef _XTENSA_MMU_H |
@@ -15,8 +13,10 @@ | |||
15 | #include <asm-generic/mmu.h> | 13 | #include <asm-generic/mmu.h> |
16 | #else | 14 | #else |
17 | 15 | ||
18 | /* Default "unsigned long" context */ | 16 | typedef struct { |
19 | typedef unsigned long mm_context_t; | 17 | unsigned long asid[NR_CPUS]; |
18 | unsigned int cpu; | ||
19 | } mm_context_t; | ||
20 | 20 | ||
21 | #endif /* CONFIG_MMU */ | 21 | #endif /* CONFIG_MMU */ |
22 | #endif /* _XTENSA_MMU_H */ | 22 | #endif /* _XTENSA_MMU_H */ |
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index d43525a286bb..d33c71a8c9ec 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h | |||
@@ -1,13 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/mmu_context.h | ||
3 | * | ||
4 | * Switch an MMU context. | 2 | * Switch an MMU context. |
5 | * | 3 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 4 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 5 | * License. See the file "COPYING" in the main directory of this archive |
8 | * for more details. | 6 | * for more details. |
9 | * | 7 | * |
10 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 8 | * Copyright (C) 2001 - 2013 Tensilica Inc. |
11 | */ | 9 | */ |
12 | 10 | ||
13 | #ifndef _XTENSA_MMU_CONTEXT_H | 11 | #ifndef _XTENSA_MMU_CONTEXT_H |
@@ -20,22 +18,25 @@ | |||
20 | #include <linux/stringify.h> | 18 | #include <linux/stringify.h> |
21 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
22 | 20 | ||
23 | #include <variant/core.h> | 21 | #include <asm/vectors.h> |
24 | 22 | ||
25 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
26 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
27 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
28 | #include <asm-generic/mm_hooks.h> | 26 | #include <asm-generic/mm_hooks.h> |
27 | #include <asm-generic/percpu.h> | ||
29 | 28 | ||
30 | #if (XCHAL_HAVE_TLBS != 1) | 29 | #if (XCHAL_HAVE_TLBS != 1) |
31 | # error "Linux must have an MMU!" | 30 | # error "Linux must have an MMU!" |
32 | #endif | 31 | #endif |
33 | 32 | ||
34 | extern unsigned long asid_cache; | 33 | DECLARE_PER_CPU(unsigned long, asid_cache); |
34 | #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu) | ||
35 | 35 | ||
36 | /* | 36 | /* |
37 | * NO_CONTEXT is the invalid ASID value that we don't ever assign to | 37 | * NO_CONTEXT is the invalid ASID value that we don't ever assign to |
38 | * any user or kernel context. | 38 | * any user or kernel context. We use the reserved values in the |
39 | * ASID_INSERT macro below. | ||
39 | * | 40 | * |
40 | * 0 invalid | 41 | * 0 invalid |
41 | * 1 kernel | 42 | * 1 kernel |
@@ -49,6 +50,12 @@ extern unsigned long asid_cache; | |||
49 | #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) | 50 | #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) |
50 | #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) | 51 | #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) |
51 | 52 | ||
53 | #ifdef CONFIG_MMU | ||
54 | void init_mmu(void); | ||
55 | #else | ||
56 | static inline void init_mmu(void) { } | ||
57 | #endif | ||
58 | |||
52 | static inline void set_rasid_register (unsigned long val) | 59 | static inline void set_rasid_register (unsigned long val) |
53 | { | 60 | { |
54 | __asm__ __volatile__ (" wsr %0, rasid\n\t" | 61 | __asm__ __volatile__ (" wsr %0, rasid\n\t" |
@@ -62,64 +69,77 @@ static inline unsigned long get_rasid_register (void) | |||
62 | return tmp; | 69 | return tmp; |
63 | } | 70 | } |
64 | 71 | ||
65 | static inline void | 72 | static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) |
66 | __get_new_mmu_context(struct mm_struct *mm) | ||
67 | { | 73 | { |
68 | extern void flush_tlb_all(void); | 74 | unsigned long asid = cpu_asid_cache(cpu); |
69 | if (! (++asid_cache & ASID_MASK) ) { | 75 | if ((++asid & ASID_MASK) == 0) { |
70 | flush_tlb_all(); /* start new asid cycle */ | 76 | /* |
71 | asid_cache += ASID_USER_FIRST; | 77 | * Start new asid cycle; continue counting with next |
78 | * incarnation bits; skipping over 0, 1, 2, 3. | ||
79 | */ | ||
80 | local_flush_tlb_all(); | ||
81 | asid += ASID_USER_FIRST; | ||
72 | } | 82 | } |
73 | mm->context = asid_cache; | 83 | cpu_asid_cache(cpu) = asid; |
84 | mm->context.asid[cpu] = asid; | ||
85 | mm->context.cpu = cpu; | ||
74 | } | 86 | } |
75 | 87 | ||
76 | static inline void | 88 | static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) |
77 | __load_mmu_context(struct mm_struct *mm) | ||
78 | { | 89 | { |
79 | set_rasid_register(ASID_INSERT(mm->context)); | 90 | /* |
91 | * Check if our ASID is of an older version and thus invalid. | ||
92 | */ | ||
93 | |||
94 | if (mm) { | ||
95 | unsigned long asid = mm->context.asid[cpu]; | ||
96 | |||
97 | if (asid == NO_CONTEXT || | ||
98 | ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK)) | ||
99 | get_new_mmu_context(mm, cpu); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | static inline void activate_context(struct mm_struct *mm, unsigned int cpu) | ||
104 | { | ||
105 | get_mmu_context(mm, cpu); | ||
106 | set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); | ||
80 | invalidate_page_directory(); | 107 | invalidate_page_directory(); |
81 | } | 108 | } |
82 | 109 | ||
83 | /* | 110 | /* |
84 | * Initialize the context related info for a new mm_struct | 111 | * Initialize the context related info for a new mm_struct |
85 | * instance. | 112 | * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing |
113 | * to -1 says the process has never run on any core. | ||
86 | */ | 114 | */ |
87 | 115 | ||
88 | static inline int | 116 | static inline int init_new_context(struct task_struct *tsk, |
89 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 117 | struct mm_struct *mm) |
90 | { | 118 | { |
91 | mm->context = NO_CONTEXT; | 119 | int cpu; |
120 | for_each_possible_cpu(cpu) { | ||
121 | mm->context.asid[cpu] = NO_CONTEXT; | ||
122 | } | ||
123 | mm->context.cpu = -1; | ||
92 | return 0; | 124 | return 0; |
93 | } | 125 | } |
94 | 126 | ||
95 | /* | ||
96 | * After we have set current->mm to a new value, this activates | ||
97 | * the context for the new mm so we see the new mappings. | ||
98 | */ | ||
99 | static inline void | ||
100 | activate_mm(struct mm_struct *prev, struct mm_struct *next) | ||
101 | { | ||
102 | /* Unconditionally get a new ASID. */ | ||
103 | |||
104 | __get_new_mmu_context(next); | ||
105 | __load_mmu_context(next); | ||
106 | } | ||
107 | |||
108 | |||
109 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 127 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
110 | struct task_struct *tsk) | 128 | struct task_struct *tsk) |
111 | { | 129 | { |
112 | unsigned long asid = asid_cache; | 130 | unsigned int cpu = smp_processor_id(); |
113 | 131 | int migrated = next->context.cpu != cpu; | |
114 | /* Check if our ASID is of an older version and thus invalid */ | 132 | /* Flush the icache if we migrated to a new core. */ |
115 | 133 | if (migrated) { | |
116 | if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK)) | 134 | __invalidate_icache_all(); |
117 | __get_new_mmu_context(next); | 135 | next->context.cpu = cpu; |
118 | 136 | } | |
119 | __load_mmu_context(next); | 137 | if (migrated || prev != next) |
138 | activate_context(next, cpu); | ||
120 | } | 139 | } |
121 | 140 | ||
122 | #define deactivate_mm(tsk, mm) do { } while(0) | 141 | #define activate_mm(prev, next) switch_mm((prev), (next), NULL) |
142 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
123 | 143 | ||
124 | /* | 144 | /* |
125 | * Destroy context related info for an mm_struct that is about | 145 | * Destroy context related info for an mm_struct that is about |
diff --git a/arch/xtensa/include/asm/mxregs.h b/arch/xtensa/include/asm/mxregs.h new file mode 100644 index 000000000000..73dcc5456f68 --- /dev/null +++ b/arch/xtensa/include/asm/mxregs.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * Xtensa MX interrupt distributor | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2008 - 2013 Tensilica Inc. | ||
9 | */ | ||
10 | |||
11 | #ifndef _XTENSA_MXREGS_H | ||
12 | #define _XTENSA_MXREGS_H | ||
13 | |||
14 | /* | ||
15 | * RER/WER at, as Read/write external register | ||
16 | * at: value | ||
17 | * as: address | ||
18 | * | ||
19 | * Address Value | ||
20 | * 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p | ||
21 | * 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p | ||
22 | * 0180 0...0m..m Clear enable specified by mask (m) | ||
23 | * 0184 0...0m..m Set enable specified by mask (m) | ||
24 | * 0190 0...0x..x 8-bit IPI partition register | ||
25 | * VVVVVVVVPPPPUUUUUUUUUUUUUUUUU | ||
26 | * V (10-bit) Release/Version | ||
27 | * P ( 4-bit) Number of cores - 1 | ||
28 | * U (18-bit) ID | ||
29 | * 01a0 i.......i 32-bit ConfigID | ||
30 | * 0200 0...0m..m RunStall core 'n' | ||
31 | * 0220 c Cache coherency enabled | ||
32 | */ | ||
33 | |||
34 | #define MIROUT(irq) (0x000 + (irq)) | ||
35 | #define MIPICAUSE(cpu) (0x100 + (cpu)) | ||
36 | #define MIPISET(cause) (0x140 + (cause)) | ||
37 | #define MIENG 0x180 | ||
38 | #define MIENGSET 0x184 | ||
39 | #define MIASG 0x188 /* Read Global Assert Register */ | ||
40 | #define MIASGSET 0x18c /* Set Global Addert Regiter */ | ||
41 | #define MIPIPART 0x190 | ||
42 | #define SYSCFGID 0x1a0 | ||
43 | #define MPSCORE 0x200 | ||
44 | #define CCON 0x220 | ||
45 | |||
46 | #endif /* _XTENSA_MXREGS_H */ | ||
diff --git a/arch/xtensa/include/asm/perf_event.h b/arch/xtensa/include/asm/perf_event.h new file mode 100644 index 000000000000..5aa4590acaae --- /dev/null +++ b/arch/xtensa/include/asm/perf_event.h | |||
@@ -0,0 +1,4 @@ | |||
1 | #ifndef __ASM_XTENSA_PERF_EVENT_H | ||
2 | #define __ASM_XTENSA_PERF_EVENT_H | ||
3 | |||
4 | #endif /* __ASM_XTENSA_PERF_EVENT_H */ | ||
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 7e409a5b0ec5..abb59708a3b7 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h | |||
@@ -191,5 +191,25 @@ extern unsigned long get_wchan(struct task_struct *p); | |||
191 | #define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);}) | 191 | #define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);}) |
192 | #define get_sr(sr) ({unsigned int v; RSR(v,sr); v; }) | 192 | #define get_sr(sr) ({unsigned int v; RSR(v,sr); v; }) |
193 | 193 | ||
194 | #ifndef XCHAL_HAVE_EXTERN_REGS | ||
195 | #define XCHAL_HAVE_EXTERN_REGS 0 | ||
196 | #endif | ||
197 | |||
198 | #if XCHAL_HAVE_EXTERN_REGS | ||
199 | |||
200 | static inline void set_er(unsigned long value, unsigned long addr) | ||
201 | { | ||
202 | asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory"); | ||
203 | } | ||
204 | |||
205 | static inline unsigned long get_er(unsigned long addr) | ||
206 | { | ||
207 | register unsigned long value; | ||
208 | asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory"); | ||
209 | return value; | ||
210 | } | ||
211 | |||
212 | #endif /* XCHAL_HAVE_EXTERN_REGS */ | ||
213 | |||
194 | #endif /* __ASSEMBLY__ */ | 214 | #endif /* __ASSEMBLY__ */ |
195 | #endif /* _XTENSA_PROCESSOR_H */ | 215 | #endif /* _XTENSA_PROCESSOR_H */ |
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index 81f31bc9dde0..598e752dcbcd 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h | |||
@@ -59,9 +59,17 @@ struct pt_regs { | |||
59 | (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) | 59 | (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) |
60 | # define user_mode(regs) (((regs)->ps & 0x00000020)!=0) | 60 | # define user_mode(regs) (((regs)->ps & 0x00000020)!=0) |
61 | # define instruction_pointer(regs) ((regs)->pc) | 61 | # define instruction_pointer(regs) ((regs)->pc) |
62 | # define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \ | ||
63 | (regs)->areg[1])) | ||
62 | 64 | ||
63 | # ifndef CONFIG_SMP | 65 | # ifndef CONFIG_SMP |
64 | # define profile_pc(regs) instruction_pointer(regs) | 66 | # define profile_pc(regs) instruction_pointer(regs) |
67 | # else | ||
68 | # define profile_pc(regs) \ | ||
69 | ({ \ | ||
70 | in_lock_functions(instruction_pointer(regs)) ? \ | ||
71 | return_pointer(regs) : instruction_pointer(regs); \ | ||
72 | }) | ||
65 | # endif | 73 | # endif |
66 | 74 | ||
67 | #define user_stack_pointer(regs) ((regs)->areg[1]) | 75 | #define user_stack_pointer(regs) ((regs)->areg[1]) |
diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h index 83c569e3bdbd..4e43f5643891 100644 --- a/arch/xtensa/include/asm/smp.h +++ b/arch/xtensa/include/asm/smp.h | |||
@@ -1,27 +1,43 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/smp.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 4 | * for more details. |
7 | * | 5 | * |
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 6 | * Copyright (C) 2001 - 2013 Tensilica Inc. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | #ifndef _XTENSA_SMP_H | 9 | #ifndef _XTENSA_SMP_H |
12 | #define _XTENSA_SMP_H | 10 | #define _XTENSA_SMP_H |
13 | 11 | ||
14 | extern struct xtensa_cpuinfo boot_cpu_data; | 12 | #ifdef CONFIG_SMP |
15 | 13 | ||
16 | #define cpu_data (&boot_cpu_data) | 14 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
17 | #define current_cpu_data boot_cpu_data | 15 | #define cpu_logical_map(cpu) (cpu) |
18 | 16 | ||
19 | struct xtensa_cpuinfo { | 17 | struct start_info { |
20 | unsigned long *pgd_cache; | 18 | unsigned long stack; |
21 | unsigned long *pte_cache; | ||
22 | unsigned long pgtable_cache_sz; | ||
23 | }; | 19 | }; |
20 | extern struct start_info start_info; | ||
24 | 21 | ||
25 | #define cpu_logical_map(cpu) (cpu) | 22 | struct cpumask; |
23 | void arch_send_call_function_ipi_mask(const struct cpumask *mask); | ||
24 | void arch_send_call_function_single_ipi(int cpu); | ||
25 | |||
26 | void smp_init_cpus(void); | ||
27 | void secondary_init_irq(void); | ||
28 | void ipi_init(void); | ||
29 | struct seq_file; | ||
30 | void show_ipi_list(struct seq_file *p, int prec); | ||
31 | |||
32 | #ifdef CONFIG_HOTPLUG_CPU | ||
33 | |||
34 | void __cpu_die(unsigned int cpu); | ||
35 | int __cpu_disable(void); | ||
36 | void cpu_die(void); | ||
37 | void cpu_restart(void); | ||
38 | |||
39 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
40 | |||
41 | #endif /* CONFIG_SMP */ | ||
26 | 42 | ||
27 | #endif /* _XTENSA_SMP_H */ | 43 | #endif /* _XTENSA_SMP_H */ |
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index 03975906b36f..1d95fa5dcd10 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h | |||
@@ -28,13 +28,13 @@ | |||
28 | * 1 somebody owns the spinlock | 28 | * 1 somebody owns the spinlock |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #define __raw_spin_is_locked(x) ((x)->slock != 0) | 31 | #define arch_spin_is_locked(x) ((x)->slock != 0) |
32 | #define __raw_spin_unlock_wait(lock) \ | 32 | #define arch_spin_unlock_wait(lock) \ |
33 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 33 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
34 | 34 | ||
35 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 35 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
36 | 36 | ||
37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 37 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
38 | { | 38 | { |
39 | unsigned long tmp; | 39 | unsigned long tmp; |
40 | 40 | ||
@@ -51,7 +51,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
51 | 51 | ||
52 | /* Returns 1 if the lock is obtained, 0 otherwise. */ | 52 | /* Returns 1 if the lock is obtained, 0 otherwise. */ |
53 | 53 | ||
54 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 54 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
55 | { | 55 | { |
56 | unsigned long tmp; | 56 | unsigned long tmp; |
57 | 57 | ||
@@ -67,7 +67,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
67 | return tmp == 0 ? 1 : 0; | 67 | return tmp == 0 ? 1 : 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 70 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
71 | { | 71 | { |
72 | unsigned long tmp; | 72 | unsigned long tmp; |
73 | 73 | ||
@@ -96,9 +96,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
96 | * 0x80000000 one writer owns the rwlock, no other writers, no readers | 96 | * 0x80000000 one writer owns the rwlock, no other writers, no readers |
97 | */ | 97 | */ |
98 | 98 | ||
99 | #define __raw_write_can_lock(x) ((x)->lock == 0) | 99 | #define arch_write_can_lock(x) ((x)->lock == 0) |
100 | 100 | ||
101 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 101 | static inline void arch_write_lock(arch_rwlock_t *rw) |
102 | { | 102 | { |
103 | unsigned long tmp; | 103 | unsigned long tmp; |
104 | 104 | ||
@@ -116,7 +116,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
116 | 116 | ||
117 | /* Returns 1 if the lock is obtained, 0 otherwise. */ | 117 | /* Returns 1 if the lock is obtained, 0 otherwise. */ |
118 | 118 | ||
119 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 119 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
120 | { | 120 | { |
121 | unsigned long tmp; | 121 | unsigned long tmp; |
122 | 122 | ||
@@ -133,7 +133,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
133 | return tmp == 0 ? 1 : 0; | 133 | return tmp == 0 ? 1 : 0; |
134 | } | 134 | } |
135 | 135 | ||
136 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 136 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
137 | { | 137 | { |
138 | unsigned long tmp; | 138 | unsigned long tmp; |
139 | 139 | ||
@@ -145,7 +145,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
145 | : "memory"); | 145 | : "memory"); |
146 | } | 146 | } |
147 | 147 | ||
148 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 148 | static inline void arch_read_lock(arch_rwlock_t *rw) |
149 | { | 149 | { |
150 | unsigned long tmp; | 150 | unsigned long tmp; |
151 | unsigned long result; | 151 | unsigned long result; |
@@ -164,7 +164,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
164 | 164 | ||
165 | /* Returns 1 if the lock is obtained, 0 otherwise. */ | 165 | /* Returns 1 if the lock is obtained, 0 otherwise. */ |
166 | 166 | ||
167 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 167 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
168 | { | 168 | { |
169 | unsigned long result; | 169 | unsigned long result; |
170 | unsigned long tmp; | 170 | unsigned long tmp; |
@@ -184,7 +184,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
184 | return result == 0; | 184 | return result == 0; |
185 | } | 185 | } |
186 | 186 | ||
187 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 187 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
188 | { | 188 | { |
189 | unsigned long tmp1, tmp2; | 189 | unsigned long tmp1, tmp2; |
190 | 190 | ||
@@ -199,4 +199,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
199 | : "memory"); | 199 | : "memory"); |
200 | } | 200 | } |
201 | 201 | ||
202 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
203 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
204 | |||
202 | #endif /* _XTENSA_SPINLOCK_H */ | 205 | #endif /* _XTENSA_SPINLOCK_H */ |
diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h new file mode 100644 index 000000000000..7ec5ce10c9e9 --- /dev/null +++ b/arch/xtensa/include/asm/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int slock; | ||
10 | } arch_spinlock_t; | ||
11 | |||
12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } arch_rwlock_t; | ||
17 | |||
18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h index 27fa3c170662..ca929e6a38b5 100644 --- a/arch/xtensa/include/asm/timex.h +++ b/arch/xtensa/include/asm/timex.h | |||
@@ -1,18 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/timex.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 4 | * for more details. |
7 | * | 5 | * |
8 | * Copyright (C) 2001 - 2008 Tensilica Inc. | 6 | * Copyright (C) 2001 - 2013 Tensilica Inc. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | #ifndef _XTENSA_TIMEX_H | 9 | #ifndef _XTENSA_TIMEX_H |
12 | #define _XTENSA_TIMEX_H | 10 | #define _XTENSA_TIMEX_H |
13 | 11 | ||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
17 | #include <linux/stringify.h> | 13 | #include <linux/stringify.h> |
18 | 14 | ||
@@ -39,14 +35,9 @@ extern unsigned long ccount_freq; | |||
39 | 35 | ||
40 | typedef unsigned long long cycles_t; | 36 | typedef unsigned long long cycles_t; |
41 | 37 | ||
42 | /* | ||
43 | * Only used for SMP. | ||
44 | */ | ||
45 | |||
46 | extern cycles_t cacheflush_time; | ||
47 | |||
48 | #define get_cycles() (0) | 38 | #define get_cycles() (0) |
49 | 39 | ||
40 | void local_timer_setup(unsigned cpu); | ||
50 | 41 | ||
51 | /* | 42 | /* |
52 | * Register access. | 43 | * Register access. |
@@ -81,5 +72,4 @@ static inline void set_linux_timer (unsigned long ccompare) | |||
81 | WSR_CCOMPARE(LINUX_TIMER, ccompare); | 72 | WSR_CCOMPARE(LINUX_TIMER, ccompare); |
82 | } | 73 | } |
83 | 74 | ||
84 | #endif /* __KERNEL__ */ | ||
85 | #endif /* _XTENSA_TIMEX_H */ | 75 | #endif /* _XTENSA_TIMEX_H */ |
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h index 43dd348a5a47..fc34274ce41b 100644 --- a/arch/xtensa/include/asm/tlbflush.h +++ b/arch/xtensa/include/asm/tlbflush.h | |||
@@ -1,18 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/tlbflush.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 4 | * for more details. |
7 | * | 5 | * |
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 6 | * Copyright (C) 2001 - 2013 Tensilica Inc. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | #ifndef _XTENSA_TLBFLUSH_H | 9 | #ifndef _XTENSA_TLBFLUSH_H |
12 | #define _XTENSA_TLBFLUSH_H | 10 | #define _XTENSA_TLBFLUSH_H |
13 | 11 | ||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | #include <linux/stringify.h> | 12 | #include <linux/stringify.h> |
17 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
18 | 14 | ||
@@ -34,12 +30,37 @@ | |||
34 | * - flush_tlb_range(mm, start, end) flushes a range of pages | 30 | * - flush_tlb_range(mm, start, end) flushes a range of pages |
35 | */ | 31 | */ |
36 | 32 | ||
37 | extern void flush_tlb_all(void); | 33 | void local_flush_tlb_all(void); |
38 | extern void flush_tlb_mm(struct mm_struct*); | 34 | void local_flush_tlb_mm(struct mm_struct *mm); |
39 | extern void flush_tlb_page(struct vm_area_struct*,unsigned long); | 35 | void local_flush_tlb_page(struct vm_area_struct *vma, |
40 | extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long); | 36 | unsigned long page); |
37 | void local_flush_tlb_range(struct vm_area_struct *vma, | ||
38 | unsigned long start, unsigned long end); | ||
39 | |||
40 | #ifdef CONFIG_SMP | ||
41 | |||
42 | void flush_tlb_all(void); | ||
43 | void flush_tlb_mm(struct mm_struct *); | ||
44 | void flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
45 | void flush_tlb_range(struct vm_area_struct *, unsigned long, | ||
46 | unsigned long); | ||
47 | |||
48 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
49 | unsigned long end) | ||
50 | { | ||
51 | flush_tlb_all(); | ||
52 | } | ||
53 | |||
54 | #else /* !CONFIG_SMP */ | ||
55 | |||
56 | #define flush_tlb_all() local_flush_tlb_all() | ||
57 | #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) | ||
58 | #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) | ||
59 | #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \ | ||
60 | end) | ||
61 | #define flush_tlb_kernel_range(start, end) local_flush_tlb_all() | ||
41 | 62 | ||
42 | #define flush_tlb_kernel_range(start,end) flush_tlb_all() | 63 | #endif /* CONFIG_SMP */ |
43 | 64 | ||
44 | /* TLB operations. */ | 65 | /* TLB operations. */ |
45 | 66 | ||
@@ -187,5 +208,4 @@ static inline unsigned long read_itlb_translation (int way) | |||
187 | } | 208 | } |
188 | 209 | ||
189 | #endif /* __ASSEMBLY__ */ | 210 | #endif /* __ASSEMBLY__ */ |
190 | #endif /* __KERNEL__ */ | ||
191 | #endif /* _XTENSA_TLBFLUSH_H */ | 211 | #endif /* _XTENSA_TLBFLUSH_H */ |
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index 917488a0ab00..8c194f6af45e 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h | |||
@@ -19,6 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | extern void * __init trap_set_handler(int cause, void *handler); | 20 | extern void * __init trap_set_handler(int cause, void *handler); |
21 | extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); | 21 | extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); |
22 | void secondary_trap_init(void); | ||
22 | 23 | ||
23 | static inline void spill_registers(void) | 24 | static inline void spill_registers(void) |
24 | { | 25 | { |
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index c52b656d0310..5791b45d5a5d 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h | |||
@@ -20,6 +20,17 @@ | |||
20 | 20 | ||
21 | #include <variant/core.h> | 21 | #include <variant/core.h> |
22 | 22 | ||
23 | #define XCHAL_KIO_CACHED_VADDR 0xe0000000 | ||
24 | #define XCHAL_KIO_BYPASS_VADDR 0xf0000000 | ||
25 | #define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 | ||
26 | #define XCHAL_KIO_SIZE 0x10000000 | ||
27 | |||
28 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF | ||
29 | #define XCHAL_KIO_PADDR xtensa_get_kio_paddr() | ||
30 | #else | ||
31 | #define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR | ||
32 | #endif | ||
33 | |||
23 | #if defined(CONFIG_MMU) | 34 | #if defined(CONFIG_MMU) |
24 | 35 | ||
25 | /* Will Become VECBASE */ | 36 | /* Will Become VECBASE */ |
@@ -30,11 +41,9 @@ | |||
30 | 41 | ||
31 | #if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY | 42 | #if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY |
32 | /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ | 43 | /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ |
33 | #define PHYSICAL_MEMORY_ADDRESS 0x00000000 | ||
34 | #define LOAD_MEMORY_ADDRESS 0x00003000 | 44 | #define LOAD_MEMORY_ADDRESS 0x00003000 |
35 | #else | 45 | #else |
36 | /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ | 46 | /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ |
37 | #define PHYSICAL_MEMORY_ADDRESS 0xD0000000 | ||
38 | #define LOAD_MEMORY_ADDRESS 0xD0003000 | 47 | #define LOAD_MEMORY_ADDRESS 0xD0003000 |
39 | #endif | 48 | #endif |
40 | 49 | ||
@@ -46,7 +55,6 @@ | |||
46 | 55 | ||
47 | /* Location of the start of the kernel text, _start */ | 56 | /* Location of the start of the kernel text, _start */ |
48 | #define KERNELOFFSET 0x00003000 | 57 | #define KERNELOFFSET 0x00003000 |
49 | #define PHYSICAL_MEMORY_ADDRESS 0x00000000 | ||
50 | 58 | ||
51 | /* Loaded just above possibly live vectors */ | 59 | /* Loaded just above possibly live vectors */ |
52 | #define LOAD_MEMORY_ADDRESS 0x00003000 | 60 | #define LOAD_MEMORY_ADDRESS 0x00003000 |
@@ -54,7 +62,6 @@ | |||
54 | #endif /* CONFIG_MMU */ | 62 | #endif /* CONFIG_MMU */ |
55 | 63 | ||
56 | #define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) | 64 | #define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) |
57 | #define XC_PADDR(offset) (PHYSICAL_MEMORY_ADDRESS + offset) | ||
58 | 65 | ||
59 | /* Used to set VECBASE register */ | 66 | /* Used to set VECBASE register */ |
60 | #define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS | 67 | #define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS |
@@ -67,7 +74,7 @@ | |||
67 | VECBASE_RESET_VADDR) | 74 | VECBASE_RESET_VADDR) |
68 | #define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS) | 75 | #define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS) |
69 | 76 | ||
70 | #if XCHAL_HAVE_VECBASE | 77 | #if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE |
71 | 78 | ||
72 | #define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) | 79 | #define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) |
73 | #define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS) | 80 | #define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS) |
@@ -81,11 +88,9 @@ | |||
81 | 88 | ||
82 | #define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS) | 89 | #define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS) |
83 | 90 | ||
84 | #undef XCHAL_NMI_VECTOR_VADDR | 91 | #define NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS) |
85 | #define XCHAL_NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS) | ||
86 | 92 | ||
87 | #undef XCHAL_INTLEVEL7_VECTOR_VADDR | 93 | #define INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS) |
88 | #define XCHAL_INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS) | ||
89 | 94 | ||
90 | /* | 95 | /* |
91 | * These XCHAL_* #defines from varian/core.h | 96 | * These XCHAL_* #defines from varian/core.h |
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index f90265ec1ccc..18d962a8c0c2 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile | |||
@@ -12,6 +12,7 @@ obj-$(CONFIG_KGDB) += xtensa-stub.o | |||
12 | obj-$(CONFIG_PCI) += pci.o | 12 | obj-$(CONFIG_PCI) += pci.o |
13 | obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o | 13 | obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o |
14 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o | 14 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o |
15 | obj-$(CONFIG_SMP) += smp.o mxhead.o | ||
15 | 16 | ||
16 | AFLAGS_head.o += -mtext-section-literals | 17 | AFLAGS_head.o += -mtext-section-literals |
17 | 18 | ||
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 7d740ebbe198..aeeb3cc8a410 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/page.h> | 19 | #include <asm/page.h> |
20 | #include <asm/cacheasm.h> | 20 | #include <asm/cacheasm.h> |
21 | #include <asm/initialize_mmu.h> | 21 | #include <asm/initialize_mmu.h> |
22 | #include <asm/mxregs.h> | ||
22 | 23 | ||
23 | #include <linux/init.h> | 24 | #include <linux/init.h> |
24 | #include <linux/linkage.h> | 25 | #include <linux/linkage.h> |
@@ -54,7 +55,7 @@ ENTRY(_start) | |||
54 | 55 | ||
55 | /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ | 56 | /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ |
56 | wsr a2, excsave1 | 57 | wsr a2, excsave1 |
57 | _j _SetupMMU | 58 | _j _SetupOCD |
58 | 59 | ||
59 | .align 4 | 60 | .align 4 |
60 | .literal_position | 61 | .literal_position |
@@ -62,6 +63,23 @@ ENTRY(_start) | |||
62 | .word _startup | 63 | .word _startup |
63 | 64 | ||
64 | .align 4 | 65 | .align 4 |
66 | _SetupOCD: | ||
67 | /* | ||
68 | * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). | ||
69 | * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow | ||
70 | * xt-gdb to single step via DEBUG exceptions received directly | ||
71 | * by ocd. | ||
72 | */ | ||
73 | movi a1, 1 | ||
74 | movi a0, 0 | ||
75 | wsr a1, windowstart | ||
76 | wsr a0, windowbase | ||
77 | rsync | ||
78 | |||
79 | movi a1, LOCKLEVEL | ||
80 | wsr a1, ps | ||
81 | rsync | ||
82 | |||
65 | .global _SetupMMU | 83 | .global _SetupMMU |
66 | _SetupMMU: | 84 | _SetupMMU: |
67 | Offset = _SetupMMU - _start | 85 | Offset = _SetupMMU - _start |
@@ -85,24 +103,11 @@ _SetupMMU: | |||
85 | 103 | ||
86 | ENDPROC(_start) | 104 | ENDPROC(_start) |
87 | 105 | ||
88 | __INIT | 106 | __REF |
89 | .literal_position | 107 | .literal_position |
90 | 108 | ||
91 | ENTRY(_startup) | 109 | ENTRY(_startup) |
92 | 110 | ||
93 | /* Disable interrupts and exceptions. */ | ||
94 | |||
95 | movi a0, LOCKLEVEL | ||
96 | wsr a0, ps | ||
97 | |||
98 | /* Start with a fresh windowbase and windowstart. */ | ||
99 | |||
100 | movi a1, 1 | ||
101 | movi a0, 0 | ||
102 | wsr a1, windowstart | ||
103 | wsr a0, windowbase | ||
104 | rsync | ||
105 | |||
106 | /* Set a0 to 0 for the remaining initialization. */ | 111 | /* Set a0 to 0 for the remaining initialization. */ |
107 | 112 | ||
108 | movi a0, 0 | 113 | movi a0, 0 |
@@ -154,17 +159,6 @@ ENTRY(_startup) | |||
154 | wsr a0, cpenable | 159 | wsr a0, cpenable |
155 | #endif | 160 | #endif |
156 | 161 | ||
157 | /* Set PS.INTLEVEL=LOCKLEVEL, PS.WOE=0, kernel stack, PS.EXCM=0 | ||
158 | * | ||
159 | * Note: PS.EXCM must be cleared before using any loop | ||
160 | * instructions; otherwise, they are silently disabled, and | ||
161 | * at most one iteration of the loop is executed. | ||
162 | */ | ||
163 | |||
164 | movi a1, LOCKLEVEL | ||
165 | wsr a1, ps | ||
166 | rsync | ||
167 | |||
168 | /* Initialize the caches. | 162 | /* Initialize the caches. |
169 | * a2, a3 are just working registers (clobbered). | 163 | * a2, a3 are just working registers (clobbered). |
170 | */ | 164 | */ |
@@ -182,6 +176,37 @@ ENTRY(_startup) | |||
182 | 176 | ||
183 | isync | 177 | isync |
184 | 178 | ||
179 | #ifdef CONFIG_HAVE_SMP | ||
180 | movi a2, CCON # MX External Register to Configure Cache | ||
181 | movi a3, 1 | ||
182 | wer a3, a2 | ||
183 | #endif | ||
184 | |||
185 | /* Setup stack and enable window exceptions (keep irqs disabled) */ | ||
186 | |||
187 | movi a1, start_info | ||
188 | l32i a1, a1, 0 | ||
189 | |||
190 | movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL | ||
191 | # WOE=1, INTLEVEL=LOCKLEVEL, UM=0 | ||
192 | wsr a2, ps # (enable reg-windows; progmode stack) | ||
193 | rsync | ||
194 | |||
195 | /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/ | ||
196 | |||
197 | movi a2, debug_exception | ||
198 | wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL | ||
199 | |||
200 | #ifdef CONFIG_SMP | ||
201 | /* | ||
202 | * Notice that we assume with SMP that cores have PRID | ||
203 | * supported by the cores. | ||
204 | */ | ||
205 | rsr a2, prid | ||
206 | bnez a2, .Lboot_secondary | ||
207 | |||
208 | #endif /* CONFIG_SMP */ | ||
209 | |||
185 | /* Unpack data sections | 210 | /* Unpack data sections |
186 | * | 211 | * |
187 | * The linker script used to build the Linux kernel image | 212 | * The linker script used to build the Linux kernel image |
@@ -234,24 +259,7 @@ ENTRY(_startup) | |||
234 | ___invalidate_icache_all a2 a3 | 259 | ___invalidate_icache_all a2 a3 |
235 | isync | 260 | isync |
236 | 261 | ||
237 | /* Setup stack and enable window exceptions (keep irqs disabled) */ | 262 | movi a6, 0 |
238 | |||
239 | movi a1, init_thread_union | ||
240 | addi a1, a1, KERNEL_STACK_SIZE | ||
241 | |||
242 | movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL | ||
243 | # WOE=1, INTLEVEL=LOCKLEVEL, UM=0 | ||
244 | wsr a2, ps # (enable reg-windows; progmode stack) | ||
245 | rsync | ||
246 | |||
247 | /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/ | ||
248 | |||
249 | movi a2, debug_exception | ||
250 | wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL | ||
251 | |||
252 | /* Set up EXCSAVE[1] to point to the exc_table. */ | ||
253 | |||
254 | movi a6, exc_table | ||
255 | xsr a6, excsave1 | 263 | xsr a6, excsave1 |
256 | 264 | ||
257 | /* init_arch kick-starts the linux kernel */ | 265 | /* init_arch kick-starts the linux kernel */ |
@@ -265,8 +273,93 @@ ENTRY(_startup) | |||
265 | should_never_return: | 273 | should_never_return: |
266 | j should_never_return | 274 | j should_never_return |
267 | 275 | ||
276 | #ifdef CONFIG_SMP | ||
277 | .Lboot_secondary: | ||
278 | |||
279 | movi a2, cpu_start_ccount | ||
280 | 1: | ||
281 | l32i a3, a2, 0 | ||
282 | beqi a3, 0, 1b | ||
283 | movi a3, 0 | ||
284 | s32i a3, a2, 0 | ||
285 | memw | ||
286 | 1: | ||
287 | l32i a3, a2, 0 | ||
288 | beqi a3, 0, 1b | ||
289 | wsr a3, ccount | ||
290 | movi a3, 0 | ||
291 | s32i a3, a2, 0 | ||
292 | memw | ||
293 | |||
294 | movi a6, 0 | ||
295 | wsr a6, excsave1 | ||
296 | |||
297 | movi a4, secondary_start_kernel | ||
298 | callx4 a4 | ||
299 | j should_never_return | ||
300 | |||
301 | #endif /* CONFIG_SMP */ | ||
302 | |||
268 | ENDPROC(_startup) | 303 | ENDPROC(_startup) |
269 | 304 | ||
305 | #ifdef CONFIG_HOTPLUG_CPU | ||
306 | |||
307 | ENTRY(cpu_restart) | ||
308 | |||
309 | #if XCHAL_DCACHE_IS_WRITEBACK | ||
310 | ___flush_invalidate_dcache_all a2 a3 | ||
311 | #else | ||
312 | ___invalidate_dcache_all a2 a3 | ||
313 | #endif | ||
314 | memw | ||
315 | movi a2, CCON # MX External Register to Configure Cache | ||
316 | movi a3, 0 | ||
317 | wer a3, a2 | ||
318 | extw | ||
319 | |||
320 | rsr a0, prid | ||
321 | neg a2, a0 | ||
322 | movi a3, cpu_start_id | ||
323 | s32i a2, a3, 0 | ||
324 | #if XCHAL_DCACHE_IS_WRITEBACK | ||
325 | dhwbi a3, 0 | ||
326 | #endif | ||
327 | 1: | ||
328 | l32i a2, a3, 0 | ||
329 | dhi a3, 0 | ||
330 | bne a2, a0, 1b | ||
331 | |||
332 | /* | ||
333 | * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). | ||
334 | * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow | ||
335 | * xt-gdb to single step via DEBUG exceptions received directly | ||
336 | * by ocd. | ||
337 | */ | ||
338 | movi a1, 1 | ||
339 | movi a0, 0 | ||
340 | wsr a1, windowstart | ||
341 | wsr a0, windowbase | ||
342 | rsync | ||
343 | |||
344 | movi a1, LOCKLEVEL | ||
345 | wsr a1, ps | ||
346 | rsync | ||
347 | |||
348 | j _startup | ||
349 | |||
350 | ENDPROC(cpu_restart) | ||
351 | |||
352 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
353 | |||
354 | /* | ||
355 | * DATA section | ||
356 | */ | ||
357 | |||
358 | .section ".data.init.refok" | ||
359 | .align 4 | ||
360 | ENTRY(start_info) | ||
361 | .long init_thread_union + KERNEL_STACK_SIZE | ||
362 | |||
270 | /* | 363 | /* |
271 | * BSS section | 364 | * BSS section |
272 | */ | 365 | */ |
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 6f4f9749cff7..482868a2de6e 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Xtensa built-in interrupt controller and some generic functions copied | 4 | * Xtensa built-in interrupt controller and some generic functions copied |
5 | * from i386. | 5 | * from i386. |
6 | * | 6 | * |
7 | * Copyright (C) 2002 - 2006 Tensilica, Inc. | 7 | * Copyright (C) 2002 - 2013 Tensilica, Inc. |
8 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | 8 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
9 | * | 9 | * |
10 | * | 10 | * |
@@ -18,36 +18,27 @@ | |||
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/irq.h> | 19 | #include <linux/irq.h> |
20 | #include <linux/kernel_stat.h> | 20 | #include <linux/kernel_stat.h> |
21 | #include <linux/irqchip.h> | ||
22 | #include <linux/irqchip/xtensa-mx.h> | ||
23 | #include <linux/irqchip/xtensa-pic.h> | ||
21 | #include <linux/irqdomain.h> | 24 | #include <linux/irqdomain.h> |
22 | #include <linux/of.h> | 25 | #include <linux/of.h> |
23 | 26 | ||
27 | #include <asm/mxregs.h> | ||
24 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
25 | #include <asm/platform.h> | 29 | #include <asm/platform.h> |
26 | 30 | ||
27 | static unsigned int cached_irq_mask; | ||
28 | |||
29 | atomic_t irq_err_count; | 31 | atomic_t irq_err_count; |
30 | 32 | ||
31 | static struct irq_domain *root_domain; | ||
32 | |||
33 | /* | ||
34 | * do_IRQ handles all normal device IRQ's (the special | ||
35 | * SMP cross-CPU interrupts have their own specific | ||
36 | * handlers). | ||
37 | */ | ||
38 | |||
39 | asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) | 33 | asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) |
40 | { | 34 | { |
41 | struct pt_regs *old_regs = set_irq_regs(regs); | 35 | int irq = irq_find_mapping(NULL, hwirq); |
42 | int irq = irq_find_mapping(root_domain, hwirq); | ||
43 | 36 | ||
44 | if (hwirq >= NR_IRQS) { | 37 | if (hwirq >= NR_IRQS) { |
45 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | 38 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", |
46 | __func__, hwirq); | 39 | __func__, hwirq); |
47 | } | 40 | } |
48 | 41 | ||
49 | irq_enter(); | ||
50 | |||
51 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 42 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
52 | /* Debugging check for stack overflow: is there less than 1KB free? */ | 43 | /* Debugging check for stack overflow: is there less than 1KB free? */ |
53 | { | 44 | { |
@@ -62,95 +53,69 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) | |||
62 | } | 53 | } |
63 | #endif | 54 | #endif |
64 | generic_handle_irq(irq); | 55 | generic_handle_irq(irq); |
65 | |||
66 | irq_exit(); | ||
67 | set_irq_regs(old_regs); | ||
68 | } | 56 | } |
69 | 57 | ||
70 | int arch_show_interrupts(struct seq_file *p, int prec) | 58 | int arch_show_interrupts(struct seq_file *p, int prec) |
71 | { | 59 | { |
60 | #ifdef CONFIG_SMP | ||
61 | show_ipi_list(p, prec); | ||
62 | #endif | ||
72 | seq_printf(p, "%*s: ", prec, "ERR"); | 63 | seq_printf(p, "%*s: ", prec, "ERR"); |
73 | seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); | 64 | seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); |
74 | return 0; | 65 | return 0; |
75 | } | 66 | } |
76 | 67 | ||
77 | static void xtensa_irq_mask(struct irq_data *d) | 68 | int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize, |
78 | { | 69 | unsigned long int_irq, unsigned long ext_irq, |
79 | cached_irq_mask &= ~(1 << d->hwirq); | 70 | unsigned long *out_hwirq, unsigned int *out_type) |
80 | set_sr (cached_irq_mask, intenable); | ||
81 | } | ||
82 | |||
83 | static void xtensa_irq_unmask(struct irq_data *d) | ||
84 | { | ||
85 | cached_irq_mask |= 1 << d->hwirq; | ||
86 | set_sr (cached_irq_mask, intenable); | ||
87 | } | ||
88 | |||
89 | static void xtensa_irq_enable(struct irq_data *d) | ||
90 | { | ||
91 | variant_irq_enable(d->hwirq); | ||
92 | xtensa_irq_unmask(d); | ||
93 | } | ||
94 | |||
95 | static void xtensa_irq_disable(struct irq_data *d) | ||
96 | { | ||
97 | xtensa_irq_mask(d); | ||
98 | variant_irq_disable(d->hwirq); | ||
99 | } | ||
100 | |||
101 | static void xtensa_irq_ack(struct irq_data *d) | ||
102 | { | ||
103 | set_sr(1 << d->hwirq, intclear); | ||
104 | } | ||
105 | |||
106 | static int xtensa_irq_retrigger(struct irq_data *d) | ||
107 | { | 71 | { |
108 | set_sr(1 << d->hwirq, intset); | 72 | if (WARN_ON(intsize < 1 || intsize > 2)) |
109 | return 1; | 73 | return -EINVAL; |
74 | if (intsize == 2 && intspec[1] == 1) { | ||
75 | int_irq = xtensa_map_ext_irq(ext_irq); | ||
76 | if (int_irq < XCHAL_NUM_INTERRUPTS) | ||
77 | *out_hwirq = int_irq; | ||
78 | else | ||
79 | return -EINVAL; | ||
80 | } else { | ||
81 | *out_hwirq = int_irq; | ||
82 | } | ||
83 | *out_type = IRQ_TYPE_NONE; | ||
84 | return 0; | ||
110 | } | 85 | } |
111 | 86 | ||
112 | static struct irq_chip xtensa_irq_chip = { | 87 | int xtensa_irq_map(struct irq_domain *d, unsigned int irq, |
113 | .name = "xtensa", | ||
114 | .irq_enable = xtensa_irq_enable, | ||
115 | .irq_disable = xtensa_irq_disable, | ||
116 | .irq_mask = xtensa_irq_mask, | ||
117 | .irq_unmask = xtensa_irq_unmask, | ||
118 | .irq_ack = xtensa_irq_ack, | ||
119 | .irq_retrigger = xtensa_irq_retrigger, | ||
120 | }; | ||
121 | |||
122 | static int xtensa_irq_map(struct irq_domain *d, unsigned int irq, | ||
123 | irq_hw_number_t hw) | 88 | irq_hw_number_t hw) |
124 | { | 89 | { |
90 | struct irq_chip *irq_chip = d->host_data; | ||
125 | u32 mask = 1 << hw; | 91 | u32 mask = 1 << hw; |
126 | 92 | ||
127 | if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) { | 93 | if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) { |
128 | irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, | 94 | irq_set_chip_and_handler_name(irq, irq_chip, |
129 | handle_simple_irq, "level"); | 95 | handle_simple_irq, "level"); |
130 | irq_set_status_flags(irq, IRQ_LEVEL); | 96 | irq_set_status_flags(irq, IRQ_LEVEL); |
131 | } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) { | 97 | } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) { |
132 | irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, | 98 | irq_set_chip_and_handler_name(irq, irq_chip, |
133 | handle_edge_irq, "edge"); | 99 | handle_edge_irq, "edge"); |
134 | irq_clear_status_flags(irq, IRQ_LEVEL); | 100 | irq_clear_status_flags(irq, IRQ_LEVEL); |
135 | } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) { | 101 | } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) { |
136 | irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, | 102 | irq_set_chip_and_handler_name(irq, irq_chip, |
137 | handle_level_irq, "level"); | 103 | handle_level_irq, "level"); |
138 | irq_set_status_flags(irq, IRQ_LEVEL); | 104 | irq_set_status_flags(irq, IRQ_LEVEL); |
139 | } else if (mask & XCHAL_INTTYPE_MASK_TIMER) { | 105 | } else if (mask & XCHAL_INTTYPE_MASK_TIMER) { |
140 | irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, | 106 | irq_set_chip_and_handler_name(irq, irq_chip, |
141 | handle_edge_irq, "edge"); | 107 | handle_percpu_irq, "timer"); |
142 | irq_clear_status_flags(irq, IRQ_LEVEL); | 108 | irq_clear_status_flags(irq, IRQ_LEVEL); |
143 | } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */ | 109 | } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */ |
144 | /* XCHAL_INTTYPE_MASK_NMI */ | 110 | /* XCHAL_INTTYPE_MASK_NMI */ |
145 | 111 | irq_set_chip_and_handler_name(irq, irq_chip, | |
146 | irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, | ||
147 | handle_level_irq, "level"); | 112 | handle_level_irq, "level"); |
148 | irq_set_status_flags(irq, IRQ_LEVEL); | 113 | irq_set_status_flags(irq, IRQ_LEVEL); |
149 | } | 114 | } |
150 | return 0; | 115 | return 0; |
151 | } | 116 | } |
152 | 117 | ||
153 | static unsigned map_ext_irq(unsigned ext_irq) | 118 | unsigned xtensa_map_ext_irq(unsigned ext_irq) |
154 | { | 119 | { |
155 | unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE | | 120 | unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE | |
156 | XCHAL_INTTYPE_MASK_EXTERN_LEVEL; | 121 | XCHAL_INTTYPE_MASK_EXTERN_LEVEL; |
@@ -163,55 +128,77 @@ static unsigned map_ext_irq(unsigned ext_irq) | |||
163 | return XCHAL_NUM_INTERRUPTS; | 128 | return XCHAL_NUM_INTERRUPTS; |
164 | } | 129 | } |
165 | 130 | ||
166 | /* | 131 | unsigned xtensa_get_ext_irq_no(unsigned irq) |
167 | * Device Tree IRQ specifier translation function which works with one or | ||
168 | * two cell bindings. First cell value maps directly to the hwirq number. | ||
169 | * Second cell if present specifies whether hwirq number is external (1) or | ||
170 | * internal (0). | ||
171 | */ | ||
172 | int xtensa_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, | ||
173 | const u32 *intspec, unsigned int intsize, | ||
174 | unsigned long *out_hwirq, unsigned int *out_type) | ||
175 | { | 132 | { |
176 | if (WARN_ON(intsize < 1 || intsize > 2)) | 133 | unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE | |
177 | return -EINVAL; | 134 | XCHAL_INTTYPE_MASK_EXTERN_LEVEL) & |
178 | if (intsize == 2 && intspec[1] == 1) { | 135 | ((1u << irq) - 1); |
179 | unsigned int_irq = map_ext_irq(intspec[0]); | 136 | return hweight32(mask); |
180 | if (int_irq < XCHAL_NUM_INTERRUPTS) | ||
181 | *out_hwirq = int_irq; | ||
182 | else | ||
183 | return -EINVAL; | ||
184 | } else { | ||
185 | *out_hwirq = intspec[0]; | ||
186 | } | ||
187 | *out_type = IRQ_TYPE_NONE; | ||
188 | return 0; | ||
189 | } | 137 | } |
190 | 138 | ||
191 | static const struct irq_domain_ops xtensa_irq_domain_ops = { | ||
192 | .xlate = xtensa_irq_domain_xlate, | ||
193 | .map = xtensa_irq_map, | ||
194 | }; | ||
195 | |||
196 | void __init init_IRQ(void) | 139 | void __init init_IRQ(void) |
197 | { | 140 | { |
198 | struct device_node *intc = NULL; | ||
199 | |||
200 | cached_irq_mask = 0; | ||
201 | set_sr(~0, intclear); | ||
202 | |||
203 | #ifdef CONFIG_OF | 141 | #ifdef CONFIG_OF |
204 | /* The interrupt controller device node is mandatory */ | 142 | irqchip_init(); |
205 | intc = of_find_compatible_node(NULL, NULL, "xtensa,pic"); | 143 | #else |
206 | BUG_ON(!intc); | 144 | #ifdef CONFIG_HAVE_SMP |
207 | 145 | xtensa_mx_init_legacy(NULL); | |
208 | root_domain = irq_domain_add_linear(intc, NR_IRQS, | ||
209 | &xtensa_irq_domain_ops, NULL); | ||
210 | #else | 146 | #else |
211 | root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0, | 147 | xtensa_pic_init_legacy(NULL); |
212 | &xtensa_irq_domain_ops, NULL); | 148 | #endif |
213 | #endif | 149 | #endif |
214 | irq_set_default_host(root_domain); | ||
215 | 150 | ||
151 | #ifdef CONFIG_SMP | ||
152 | ipi_init(); | ||
153 | #endif | ||
216 | variant_init_irq(); | 154 | variant_init_irq(); |
217 | } | 155 | } |
156 | |||
157 | #ifdef CONFIG_HOTPLUG_CPU | ||
158 | static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) | ||
159 | { | ||
160 | struct irq_desc *desc = irq_to_desc(irq); | ||
161 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
162 | unsigned long flags; | ||
163 | |||
164 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
165 | if (chip->irq_set_affinity) | ||
166 | chip->irq_set_affinity(data, cpumask_of(cpu), false); | ||
167 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * The CPU has been marked offline. Migrate IRQs off this CPU. If | ||
172 | * the affinity settings do not allow other CPUs, force them onto any | ||
173 | * available CPU. | ||
174 | */ | ||
175 | void migrate_irqs(void) | ||
176 | { | ||
177 | unsigned int i, cpu = smp_processor_id(); | ||
178 | struct irq_desc *desc; | ||
179 | |||
180 | for_each_irq_desc(i, desc) { | ||
181 | struct irq_data *data = irq_desc_get_irq_data(desc); | ||
182 | unsigned int newcpu; | ||
183 | |||
184 | if (irqd_is_per_cpu(data)) | ||
185 | continue; | ||
186 | |||
187 | if (!cpumask_test_cpu(cpu, data->affinity)) | ||
188 | continue; | ||
189 | |||
190 | newcpu = cpumask_any_and(data->affinity, cpu_online_mask); | ||
191 | |||
192 | if (newcpu >= nr_cpu_ids) { | ||
193 | pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", | ||
194 | i, cpu); | ||
195 | |||
196 | cpumask_setall(data->affinity); | ||
197 | newcpu = cpumask_any_and(data->affinity, | ||
198 | cpu_online_mask); | ||
199 | } | ||
200 | |||
201 | route_irq(data, i, newcpu); | ||
202 | } | ||
203 | } | ||
204 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S new file mode 100644 index 000000000000..77a161a112c5 --- /dev/null +++ b/arch/xtensa/kernel/mxhead.S | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Xtensa Secondary Processors startup code. | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2001 - 2013 Tensilica Inc. | ||
9 | * | ||
10 | * Joe Taylor <joe@tensilica.com> | ||
11 | * Chris Zankel <chris@zankel.net> | ||
12 | * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> | ||
13 | * Pete Delaney <piet@tensilica.com> | ||
14 | */ | ||
15 | |||
16 | #include <linux/linkage.h> | ||
17 | |||
18 | #include <asm/cacheasm.h> | ||
19 | #include <asm/initialize_mmu.h> | ||
20 | #include <asm/mxregs.h> | ||
21 | #include <asm/regs.h> | ||
22 | |||
23 | |||
24 | .section .SecondaryResetVector.text, "ax" | ||
25 | |||
26 | |||
27 | ENTRY(_SecondaryResetVector) | ||
28 | _j _SetupOCD | ||
29 | |||
30 | .begin no-absolute-literals | ||
31 | .literal_position | ||
32 | |||
33 | _SetupOCD: | ||
34 | /* | ||
35 | * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). | ||
36 | * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow | ||
37 | * xt-gdb to single step via DEBUG exceptions received directly | ||
38 | * by ocd. | ||
39 | */ | ||
40 | movi a1, 1 | ||
41 | movi a0, 0 | ||
42 | wsr a1, windowstart | ||
43 | wsr a0, windowbase | ||
44 | rsync | ||
45 | |||
46 | movi a1, LOCKLEVEL | ||
47 | wsr a1, ps | ||
48 | rsync | ||
49 | |||
50 | _SetupMMU: | ||
51 | Offset = _SetupMMU - _SecondaryResetVector | ||
52 | |||
53 | #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX | ||
54 | initialize_mmu | ||
55 | #endif | ||
56 | |||
57 | /* | ||
58 | * Start Secondary Processors with NULL pointer to boot params. | ||
59 | */ | ||
60 | movi a2, 0 # a2 == NULL | ||
61 | movi a3, _startup | ||
62 | jx a3 | ||
63 | |||
64 | .end no-absolute-literals | ||
65 | |||
66 | |||
67 | .section .SecondaryResetVector.remapped_text, "ax" | ||
68 | .global _RemappedSecondaryResetVector | ||
69 | |||
70 | .org 0 # Need to do org before literals | ||
71 | |||
72 | _RemappedSecondaryResetVector: | ||
73 | .begin no-absolute-literals | ||
74 | .literal_position | ||
75 | |||
76 | _j _RemappedSetupMMU | ||
77 | . = _RemappedSecondaryResetVector + Offset | ||
78 | |||
79 | _RemappedSetupMMU: | ||
80 | |||
81 | #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX | ||
82 | initialize_mmu | ||
83 | #endif | ||
84 | |||
85 | .end no-absolute-literals | ||
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 6e2b6638122d..7d12af1317f1 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/screen_info.h> | 21 | #include <linux/screen_info.h> |
22 | #include <linux/bootmem.h> | 22 | #include <linux/bootmem.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/percpu.h> | ||
25 | #include <linux/cpu.h> | ||
24 | #include <linux/of_fdt.h> | 26 | #include <linux/of_fdt.h> |
25 | #include <linux/of_platform.h> | 27 | #include <linux/of_platform.h> |
26 | 28 | ||
@@ -37,6 +39,7 @@ | |||
37 | #endif | 39 | #endif |
38 | 40 | ||
39 | #include <asm/bootparam.h> | 41 | #include <asm/bootparam.h> |
42 | #include <asm/mmu_context.h> | ||
40 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
41 | #include <asm/processor.h> | 44 | #include <asm/processor.h> |
42 | #include <asm/timex.h> | 45 | #include <asm/timex.h> |
@@ -45,6 +48,7 @@ | |||
45 | #include <asm/setup.h> | 48 | #include <asm/setup.h> |
46 | #include <asm/param.h> | 49 | #include <asm/param.h> |
47 | #include <asm/traps.h> | 50 | #include <asm/traps.h> |
51 | #include <asm/smp.h> | ||
48 | 52 | ||
49 | #include <platform/hardware.h> | 53 | #include <platform/hardware.h> |
50 | 54 | ||
@@ -85,12 +89,6 @@ static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; | |||
85 | 89 | ||
86 | sysmem_info_t __initdata sysmem; | 90 | sysmem_info_t __initdata sysmem; |
87 | 91 | ||
88 | #ifdef CONFIG_MMU | ||
89 | extern void init_mmu(void); | ||
90 | #else | ||
91 | static inline void init_mmu(void) { } | ||
92 | #endif | ||
93 | |||
94 | extern int mem_reserve(unsigned long, unsigned long, int); | 92 | extern int mem_reserve(unsigned long, unsigned long, int); |
95 | extern void bootmem_init(void); | 93 | extern void bootmem_init(void); |
96 | extern void zones_init(void); | 94 | extern void zones_init(void); |
@@ -214,6 +212,42 @@ static int __init parse_bootparam(const bp_tag_t* tag) | |||
214 | #ifdef CONFIG_OF | 212 | #ifdef CONFIG_OF |
215 | bool __initdata dt_memory_scan = false; | 213 | bool __initdata dt_memory_scan = false; |
216 | 214 | ||
215 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY | ||
216 | unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR; | ||
217 | EXPORT_SYMBOL(xtensa_kio_paddr); | ||
218 | |||
219 | static int __init xtensa_dt_io_area(unsigned long node, const char *uname, | ||
220 | int depth, void *data) | ||
221 | { | ||
222 | const __be32 *ranges; | ||
223 | unsigned long len; | ||
224 | |||
225 | if (depth > 1) | ||
226 | return 0; | ||
227 | |||
228 | if (!of_flat_dt_is_compatible(node, "simple-bus")) | ||
229 | return 0; | ||
230 | |||
231 | ranges = of_get_flat_dt_prop(node, "ranges", &len); | ||
232 | if (!ranges) | ||
233 | return 1; | ||
234 | if (len == 0) | ||
235 | return 1; | ||
236 | |||
237 | xtensa_kio_paddr = of_read_ulong(ranges+1, 1); | ||
238 | /* round down to nearest 256MB boundary */ | ||
239 | xtensa_kio_paddr &= 0xf0000000; | ||
240 | |||
241 | return 1; | ||
242 | } | ||
243 | #else | ||
244 | static int __init xtensa_dt_io_area(unsigned long node, const char *uname, | ||
245 | int depth, void *data) | ||
246 | { | ||
247 | return 1; | ||
248 | } | ||
249 | #endif | ||
250 | |||
217 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | 251 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) |
218 | { | 252 | { |
219 | if (!dt_memory_scan) | 253 | if (!dt_memory_scan) |
@@ -234,6 +268,7 @@ void __init early_init_devtree(void *params) | |||
234 | dt_memory_scan = true; | 268 | dt_memory_scan = true; |
235 | 269 | ||
236 | early_init_dt_scan(params); | 270 | early_init_dt_scan(params); |
271 | of_scan_flat_dt(xtensa_dt_io_area, NULL); | ||
237 | 272 | ||
238 | if (!command_line[0]) | 273 | if (!command_line[0]) |
239 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | 274 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); |
@@ -241,7 +276,7 @@ void __init early_init_devtree(void *params) | |||
241 | 276 | ||
242 | static int __init xtensa_device_probe(void) | 277 | static int __init xtensa_device_probe(void) |
243 | { | 278 | { |
244 | of_platform_populate(NULL, NULL, NULL, NULL); | 279 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
245 | return 0; | 280 | return 0; |
246 | } | 281 | } |
247 | 282 | ||
@@ -354,7 +389,8 @@ static inline int probed_compare_swap(int *v, int cmp, int set) | |||
354 | 389 | ||
355 | /* Handle probed exception */ | 390 | /* Handle probed exception */ |
356 | 391 | ||
357 | void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause) | 392 | static void __init do_probed_exception(struct pt_regs *regs, |
393 | unsigned long exccause) | ||
358 | { | 394 | { |
359 | if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */ | 395 | if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */ |
360 | regs->pc += 3; /* skip the s32c1i instruction */ | 396 | regs->pc += 3; /* skip the s32c1i instruction */ |
@@ -366,7 +402,7 @@ void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause) | |||
366 | 402 | ||
367 | /* Simple test of S32C1I (soc bringup assist) */ | 403 | /* Simple test of S32C1I (soc bringup assist) */ |
368 | 404 | ||
369 | void __init check_s32c1i(void) | 405 | static int __init check_s32c1i(void) |
370 | { | 406 | { |
371 | int n, cause1, cause2; | 407 | int n, cause1, cause2; |
372 | void *handbus, *handdata, *handaddr; /* temporarily saved handlers */ | 408 | void *handbus, *handdata, *handaddr; /* temporarily saved handlers */ |
@@ -421,24 +457,21 @@ void __init check_s32c1i(void) | |||
421 | trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus); | 457 | trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus); |
422 | trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata); | 458 | trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata); |
423 | trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr); | 459 | trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr); |
460 | return 0; | ||
424 | } | 461 | } |
425 | 462 | ||
426 | #else /* XCHAL_HAVE_S32C1I */ | 463 | #else /* XCHAL_HAVE_S32C1I */ |
427 | 464 | ||
428 | /* This condition should not occur with a commercially deployed processor. | 465 | /* This condition should not occur with a commercially deployed processor. |
429 | Display reminder for early engr test or demo chips / FPGA bitstreams */ | 466 | Display reminder for early engr test or demo chips / FPGA bitstreams */ |
430 | void __init check_s32c1i(void) | 467 | static int __init check_s32c1i(void) |
431 | { | 468 | { |
432 | pr_warn("Processor configuration lacks atomic compare-and-swap support!\n"); | 469 | pr_warn("Processor configuration lacks atomic compare-and-swap support!\n"); |
470 | return 0; | ||
433 | } | 471 | } |
434 | 472 | ||
435 | #endif /* XCHAL_HAVE_S32C1I */ | 473 | #endif /* XCHAL_HAVE_S32C1I */ |
436 | #else /* CONFIG_S32C1I_SELFTEST */ | 474 | early_initcall(check_s32c1i); |
437 | |||
438 | void __init check_s32c1i(void) | ||
439 | { | ||
440 | } | ||
441 | |||
442 | #endif /* CONFIG_S32C1I_SELFTEST */ | 475 | #endif /* CONFIG_S32C1I_SELFTEST */ |
443 | 476 | ||
444 | 477 | ||
@@ -447,8 +480,6 @@ void __init setup_arch(char **cmdline_p) | |||
447 | strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); | 480 | strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); |
448 | *cmdline_p = command_line; | 481 | *cmdline_p = command_line; |
449 | 482 | ||
450 | check_s32c1i(); | ||
451 | |||
452 | /* Reserve some memory regions */ | 483 | /* Reserve some memory regions */ |
453 | 484 | ||
454 | #ifdef CONFIG_BLK_DEV_INITRD | 485 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -505,6 +536,10 @@ void __init setup_arch(char **cmdline_p) | |||
505 | 536 | ||
506 | platform_setup(cmdline_p); | 537 | platform_setup(cmdline_p); |
507 | 538 | ||
539 | #ifdef CONFIG_SMP | ||
540 | smp_init_cpus(); | ||
541 | #endif | ||
542 | |||
508 | paging_init(); | 543 | paging_init(); |
509 | zones_init(); | 544 | zones_init(); |
510 | 545 | ||
@@ -521,6 +556,22 @@ void __init setup_arch(char **cmdline_p) | |||
521 | #endif | 556 | #endif |
522 | } | 557 | } |
523 | 558 | ||
559 | static DEFINE_PER_CPU(struct cpu, cpu_data); | ||
560 | |||
561 | static int __init topology_init(void) | ||
562 | { | ||
563 | int i; | ||
564 | |||
565 | for_each_possible_cpu(i) { | ||
566 | struct cpu *cpu = &per_cpu(cpu_data, i); | ||
567 | cpu->hotpluggable = !!i; | ||
568 | register_cpu(cpu, i); | ||
569 | } | ||
570 | |||
571 | return 0; | ||
572 | } | ||
573 | subsys_initcall(topology_init); | ||
574 | |||
524 | void machine_restart(char * cmd) | 575 | void machine_restart(char * cmd) |
525 | { | 576 | { |
526 | platform_restart(); | 577 | platform_restart(); |
@@ -546,21 +597,27 @@ void machine_power_off(void) | |||
546 | static int | 597 | static int |
547 | c_show(struct seq_file *f, void *slot) | 598 | c_show(struct seq_file *f, void *slot) |
548 | { | 599 | { |
600 | char buf[NR_CPUS * 5]; | ||
601 | |||
602 | cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask); | ||
549 | /* high-level stuff */ | 603 | /* high-level stuff */ |
550 | seq_printf(f,"processor\t: 0\n" | 604 | seq_printf(f, "CPU count\t: %u\n" |
551 | "vendor_id\t: Tensilica\n" | 605 | "CPU list\t: %s\n" |
552 | "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" | 606 | "vendor_id\t: Tensilica\n" |
553 | "core ID\t\t: " XCHAL_CORE_ID "\n" | 607 | "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" |
554 | "build ID\t: 0x%x\n" | 608 | "core ID\t\t: " XCHAL_CORE_ID "\n" |
555 | "byte order\t: %s\n" | 609 | "build ID\t: 0x%x\n" |
556 | "cpu MHz\t\t: %lu.%02lu\n" | 610 | "byte order\t: %s\n" |
557 | "bogomips\t: %lu.%02lu\n", | 611 | "cpu MHz\t\t: %lu.%02lu\n" |
558 | XCHAL_BUILD_UNIQUE_ID, | 612 | "bogomips\t: %lu.%02lu\n", |
559 | XCHAL_HAVE_BE ? "big" : "little", | 613 | num_online_cpus(), |
560 | ccount_freq/1000000, | 614 | buf, |
561 | (ccount_freq/10000) % 100, | 615 | XCHAL_BUILD_UNIQUE_ID, |
562 | loops_per_jiffy/(500000/HZ), | 616 | XCHAL_HAVE_BE ? "big" : "little", |
563 | (loops_per_jiffy/(5000/HZ)) % 100); | 617 | ccount_freq/1000000, |
618 | (ccount_freq/10000) % 100, | ||
619 | loops_per_jiffy/(500000/HZ), | ||
620 | (loops_per_jiffy/(5000/HZ)) % 100); | ||
564 | 621 | ||
565 | seq_printf(f,"flags\t\t: " | 622 | seq_printf(f,"flags\t\t: " |
566 | #if XCHAL_HAVE_NMI | 623 | #if XCHAL_HAVE_NMI |
@@ -672,7 +729,7 @@ c_show(struct seq_file *f, void *slot) | |||
672 | static void * | 729 | static void * |
673 | c_start(struct seq_file *f, loff_t *pos) | 730 | c_start(struct seq_file *f, loff_t *pos) |
674 | { | 731 | { |
675 | return (void *) ((*pos == 0) ? (void *)1 : NULL); | 732 | return (*pos == 0) ? (void *)1 : NULL; |
676 | } | 733 | } |
677 | 734 | ||
678 | static void * | 735 | static void * |
@@ -688,10 +745,10 @@ c_stop(struct seq_file *f, void *v) | |||
688 | 745 | ||
689 | const struct seq_operations cpuinfo_op = | 746 | const struct seq_operations cpuinfo_op = |
690 | { | 747 | { |
691 | start: c_start, | 748 | .start = c_start, |
692 | next: c_next, | 749 | .next = c_next, |
693 | stop: c_stop, | 750 | .stop = c_stop, |
694 | show: c_show | 751 | .show = c_show, |
695 | }; | 752 | }; |
696 | 753 | ||
697 | #endif /* CONFIG_PROC_FS */ | 754 | #endif /* CONFIG_PROC_FS */ |
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c new file mode 100644 index 000000000000..aa8bd8717927 --- /dev/null +++ b/arch/xtensa/kernel/smp.c | |||
@@ -0,0 +1,592 @@ | |||
1 | /* | ||
2 | * Xtensa SMP support functions. | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2008 - 2013 Tensilica Inc. | ||
9 | * | ||
10 | * Chris Zankel <chris@zankel.net> | ||
11 | * Joe Taylor <joe@tensilica.com> | ||
12 | * Pete Delaney <piet@tensilica.com | ||
13 | */ | ||
14 | |||
15 | #include <linux/cpu.h> | ||
16 | #include <linux/cpumask.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/irqdomain.h> | ||
21 | #include <linux/irq.h> | ||
22 | #include <linux/kdebug.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/reboot.h> | ||
25 | #include <linux/seq_file.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <linux/thread_info.h> | ||
28 | |||
29 | #include <asm/cacheflush.h> | ||
30 | #include <asm/kdebug.h> | ||
31 | #include <asm/mmu_context.h> | ||
32 | #include <asm/mxregs.h> | ||
33 | #include <asm/platform.h> | ||
34 | #include <asm/tlbflush.h> | ||
35 | #include <asm/traps.h> | ||
36 | |||
37 | #ifdef CONFIG_SMP | ||
38 | # if XCHAL_HAVE_S32C1I == 0 | ||
39 | # error "The S32C1I option is required for SMP." | ||
40 | # endif | ||
41 | #endif | ||
42 | |||
43 | static void system_invalidate_dcache_range(unsigned long start, | ||
44 | unsigned long size); | ||
45 | static void system_flush_invalidate_dcache_range(unsigned long start, | ||
46 | unsigned long size); | ||
47 | |||
48 | /* IPI (Inter Process Interrupt) */ | ||
49 | |||
50 | #define IPI_IRQ 0 | ||
51 | |||
52 | static irqreturn_t ipi_interrupt(int irq, void *dev_id); | ||
53 | static struct irqaction ipi_irqaction = { | ||
54 | .handler = ipi_interrupt, | ||
55 | .flags = IRQF_PERCPU, | ||
56 | .name = "ipi", | ||
57 | }; | ||
58 | |||
59 | void ipi_init(void) | ||
60 | { | ||
61 | unsigned irq = irq_create_mapping(NULL, IPI_IRQ); | ||
62 | setup_irq(irq, &ipi_irqaction); | ||
63 | } | ||
64 | |||
65 | static inline unsigned int get_core_count(void) | ||
66 | { | ||
67 | /* Bits 18..21 of SYSCFGID contain the core count minus 1. */ | ||
68 | unsigned int syscfgid = get_er(SYSCFGID); | ||
69 | return ((syscfgid >> 18) & 0xf) + 1; | ||
70 | } | ||
71 | |||
72 | static inline int get_core_id(void) | ||
73 | { | ||
74 | /* Bits 0...18 of SYSCFGID contain the core id */ | ||
75 | unsigned int core_id = get_er(SYSCFGID); | ||
76 | return core_id & 0x3fff; | ||
77 | } | ||
78 | |||
79 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
80 | { | ||
81 | unsigned i; | ||
82 | |||
83 | for (i = 0; i < max_cpus; ++i) | ||
84 | set_cpu_present(i, true); | ||
85 | } | ||
86 | |||
87 | void __init smp_init_cpus(void) | ||
88 | { | ||
89 | unsigned i; | ||
90 | unsigned int ncpus = get_core_count(); | ||
91 | unsigned int core_id = get_core_id(); | ||
92 | |||
93 | pr_info("%s: Core Count = %d\n", __func__, ncpus); | ||
94 | pr_info("%s: Core Id = %d\n", __func__, core_id); | ||
95 | |||
96 | for (i = 0; i < ncpus; ++i) | ||
97 | set_cpu_possible(i, true); | ||
98 | } | ||
99 | |||
100 | void __init smp_prepare_boot_cpu(void) | ||
101 | { | ||
102 | unsigned int cpu = smp_processor_id(); | ||
103 | BUG_ON(cpu != 0); | ||
104 | cpu_asid_cache(cpu) = ASID_USER_FIRST; | ||
105 | } | ||
106 | |||
107 | void __init smp_cpus_done(unsigned int max_cpus) | ||
108 | { | ||
109 | } | ||
110 | |||
111 | static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ | ||
112 | static DECLARE_COMPLETION(cpu_running); | ||
113 | |||
114 | void secondary_start_kernel(void) | ||
115 | { | ||
116 | struct mm_struct *mm = &init_mm; | ||
117 | unsigned int cpu = smp_processor_id(); | ||
118 | |||
119 | init_mmu(); | ||
120 | |||
121 | #ifdef CONFIG_DEBUG_KERNEL | ||
122 | if (boot_secondary_processors == 0) { | ||
123 | pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n", | ||
124 | __func__, boot_secondary_processors, cpu); | ||
125 | for (;;) | ||
126 | __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL)); | ||
127 | } | ||
128 | |||
129 | pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n", | ||
130 | __func__, boot_secondary_processors, cpu); | ||
131 | #endif | ||
132 | /* Init EXCSAVE1 */ | ||
133 | |||
134 | secondary_trap_init(); | ||
135 | |||
136 | /* All kernel threads share the same mm context. */ | ||
137 | |||
138 | atomic_inc(&mm->mm_users); | ||
139 | atomic_inc(&mm->mm_count); | ||
140 | current->active_mm = mm; | ||
141 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
142 | enter_lazy_tlb(mm, current); | ||
143 | |||
144 | preempt_disable(); | ||
145 | trace_hardirqs_off(); | ||
146 | |||
147 | calibrate_delay(); | ||
148 | |||
149 | notify_cpu_starting(cpu); | ||
150 | |||
151 | secondary_init_irq(); | ||
152 | local_timer_setup(cpu); | ||
153 | |||
154 | set_cpu_online(cpu, true); | ||
155 | |||
156 | local_irq_enable(); | ||
157 | |||
158 | complete(&cpu_running); | ||
159 | |||
160 | cpu_startup_entry(CPUHP_ONLINE); | ||
161 | } | ||
162 | |||
163 | static void mx_cpu_start(void *p) | ||
164 | { | ||
165 | unsigned cpu = (unsigned)p; | ||
166 | unsigned long run_stall_mask = get_er(MPSCORE); | ||
167 | |||
168 | set_er(run_stall_mask & ~(1u << cpu), MPSCORE); | ||
169 | pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", | ||
170 | __func__, cpu, run_stall_mask, get_er(MPSCORE)); | ||
171 | } | ||
172 | |||
173 | static void mx_cpu_stop(void *p) | ||
174 | { | ||
175 | unsigned cpu = (unsigned)p; | ||
176 | unsigned long run_stall_mask = get_er(MPSCORE); | ||
177 | |||
178 | set_er(run_stall_mask | (1u << cpu), MPSCORE); | ||
179 | pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", | ||
180 | __func__, cpu, run_stall_mask, get_er(MPSCORE)); | ||
181 | } | ||
182 | |||
183 | #ifdef CONFIG_HOTPLUG_CPU | ||
184 | unsigned long cpu_start_id __cacheline_aligned; | ||
185 | #endif | ||
186 | unsigned long cpu_start_ccount; | ||
187 | |||
188 | static int boot_secondary(unsigned int cpu, struct task_struct *ts) | ||
189 | { | ||
190 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); | ||
191 | unsigned long ccount; | ||
192 | int i; | ||
193 | |||
194 | #ifdef CONFIG_HOTPLUG_CPU | ||
195 | cpu_start_id = cpu; | ||
196 | system_flush_invalidate_dcache_range( | ||
197 | (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); | ||
198 | #endif | ||
199 | smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); | ||
200 | |||
201 | for (i = 0; i < 2; ++i) { | ||
202 | do | ||
203 | ccount = get_ccount(); | ||
204 | while (!ccount); | ||
205 | |||
206 | cpu_start_ccount = ccount; | ||
207 | |||
208 | while (time_before(jiffies, timeout)) { | ||
209 | mb(); | ||
210 | if (!cpu_start_ccount) | ||
211 | break; | ||
212 | } | ||
213 | |||
214 | if (cpu_start_ccount) { | ||
215 | smp_call_function_single(0, mx_cpu_stop, | ||
216 | (void *)cpu, 1); | ||
217 | cpu_start_ccount = 0; | ||
218 | return -EIO; | ||
219 | } | ||
220 | } | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | int __cpu_up(unsigned int cpu, struct task_struct *idle) | ||
225 | { | ||
226 | int ret = 0; | ||
227 | |||
228 | if (cpu_asid_cache(cpu) == 0) | ||
229 | cpu_asid_cache(cpu) = ASID_USER_FIRST; | ||
230 | |||
231 | start_info.stack = (unsigned long)task_pt_regs(idle); | ||
232 | wmb(); | ||
233 | |||
234 | pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", | ||
235 | __func__, cpu, idle, start_info.stack); | ||
236 | |||
237 | ret = boot_secondary(cpu, idle); | ||
238 | if (ret == 0) { | ||
239 | wait_for_completion_timeout(&cpu_running, | ||
240 | msecs_to_jiffies(1000)); | ||
241 | if (!cpu_online(cpu)) | ||
242 | ret = -EIO; | ||
243 | } | ||
244 | |||
245 | if (ret) | ||
246 | pr_err("CPU %u failed to boot\n", cpu); | ||
247 | |||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | #ifdef CONFIG_HOTPLUG_CPU | ||
252 | |||
253 | /* | ||
254 | * __cpu_disable runs on the processor to be shutdown. | ||
255 | */ | ||
256 | int __cpu_disable(void) | ||
257 | { | ||
258 | unsigned int cpu = smp_processor_id(); | ||
259 | |||
260 | /* | ||
261 | * Take this CPU offline. Once we clear this, we can't return, | ||
262 | * and we must not schedule until we're ready to give up the cpu. | ||
263 | */ | ||
264 | set_cpu_online(cpu, false); | ||
265 | |||
266 | /* | ||
267 | * OK - migrate IRQs away from this CPU | ||
268 | */ | ||
269 | migrate_irqs(); | ||
270 | |||
271 | /* | ||
272 | * Flush user cache and TLB mappings, and then remove this CPU | ||
273 | * from the vm mask set of all processes. | ||
274 | */ | ||
275 | local_flush_cache_all(); | ||
276 | local_flush_tlb_all(); | ||
277 | invalidate_page_directory(); | ||
278 | |||
279 | clear_tasks_mm_cpumask(cpu); | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | static void platform_cpu_kill(unsigned int cpu) | ||
285 | { | ||
286 | smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true); | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * called on the thread which is asking for a CPU to be shutdown - | ||
291 | * waits until shutdown has completed, or it is timed out. | ||
292 | */ | ||
293 | void __cpu_die(unsigned int cpu) | ||
294 | { | ||
295 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); | ||
296 | while (time_before(jiffies, timeout)) { | ||
297 | system_invalidate_dcache_range((unsigned long)&cpu_start_id, | ||
298 | sizeof(cpu_start_id)); | ||
299 | if (cpu_start_id == -cpu) { | ||
300 | platform_cpu_kill(cpu); | ||
301 | return; | ||
302 | } | ||
303 | } | ||
304 | pr_err("CPU%u: unable to kill\n", cpu); | ||
305 | } | ||
306 | |||
307 | void arch_cpu_idle_dead(void) | ||
308 | { | ||
309 | cpu_die(); | ||
310 | } | ||
311 | /* | ||
312 | * Called from the idle thread for the CPU which has been shutdown. | ||
313 | * | ||
314 | * Note that we disable IRQs here, but do not re-enable them | ||
315 | * before returning to the caller. This is also the behaviour | ||
316 | * of the other hotplug-cpu capable cores, so presumably coming | ||
317 | * out of idle fixes this. | ||
318 | */ | ||
319 | void __ref cpu_die(void) | ||
320 | { | ||
321 | idle_task_exit(); | ||
322 | local_irq_disable(); | ||
323 | __asm__ __volatile__( | ||
324 | " movi a2, cpu_restart\n" | ||
325 | " jx a2\n"); | ||
326 | } | ||
327 | |||
328 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
329 | |||
330 | enum ipi_msg_type { | ||
331 | IPI_RESCHEDULE = 0, | ||
332 | IPI_CALL_FUNC, | ||
333 | IPI_CPU_STOP, | ||
334 | IPI_MAX | ||
335 | }; | ||
336 | |||
337 | static const struct { | ||
338 | const char *short_text; | ||
339 | const char *long_text; | ||
340 | } ipi_text[] = { | ||
341 | { .short_text = "RES", .long_text = "Rescheduling interrupts" }, | ||
342 | { .short_text = "CAL", .long_text = "Function call interrupts" }, | ||
343 | { .short_text = "DIE", .long_text = "CPU shutdown interrupts" }, | ||
344 | }; | ||
345 | |||
346 | struct ipi_data { | ||
347 | unsigned long ipi_count[IPI_MAX]; | ||
348 | }; | ||
349 | |||
350 | static DEFINE_PER_CPU(struct ipi_data, ipi_data); | ||
351 | |||
352 | static void send_ipi_message(const struct cpumask *callmask, | ||
353 | enum ipi_msg_type msg_id) | ||
354 | { | ||
355 | int index; | ||
356 | unsigned long mask = 0; | ||
357 | |||
358 | for_each_cpu(index, callmask) | ||
359 | if (index != smp_processor_id()) | ||
360 | mask |= 1 << index; | ||
361 | |||
362 | set_er(mask, MIPISET(msg_id)); | ||
363 | } | ||
364 | |||
365 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
366 | { | ||
367 | send_ipi_message(mask, IPI_CALL_FUNC); | ||
368 | } | ||
369 | |||
370 | void arch_send_call_function_single_ipi(int cpu) | ||
371 | { | ||
372 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); | ||
373 | } | ||
374 | |||
375 | void smp_send_reschedule(int cpu) | ||
376 | { | ||
377 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); | ||
378 | } | ||
379 | |||
380 | void smp_send_stop(void) | ||
381 | { | ||
382 | struct cpumask targets; | ||
383 | |||
384 | cpumask_copy(&targets, cpu_online_mask); | ||
385 | cpumask_clear_cpu(smp_processor_id(), &targets); | ||
386 | send_ipi_message(&targets, IPI_CPU_STOP); | ||
387 | } | ||
388 | |||
389 | static void ipi_cpu_stop(unsigned int cpu) | ||
390 | { | ||
391 | set_cpu_online(cpu, false); | ||
392 | machine_halt(); | ||
393 | } | ||
394 | |||
395 | irqreturn_t ipi_interrupt(int irq, void *dev_id) | ||
396 | { | ||
397 | unsigned int cpu = smp_processor_id(); | ||
398 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
399 | unsigned int msg; | ||
400 | unsigned i; | ||
401 | |||
402 | msg = get_er(MIPICAUSE(cpu)); | ||
403 | for (i = 0; i < IPI_MAX; i++) | ||
404 | if (msg & (1 << i)) { | ||
405 | set_er(1 << i, MIPICAUSE(cpu)); | ||
406 | ++ipi->ipi_count[i]; | ||
407 | } | ||
408 | |||
409 | if (msg & (1 << IPI_RESCHEDULE)) | ||
410 | scheduler_ipi(); | ||
411 | if (msg & (1 << IPI_CALL_FUNC)) | ||
412 | generic_smp_call_function_interrupt(); | ||
413 | if (msg & (1 << IPI_CPU_STOP)) | ||
414 | ipi_cpu_stop(cpu); | ||
415 | |||
416 | return IRQ_HANDLED; | ||
417 | } | ||
418 | |||
419 | void show_ipi_list(struct seq_file *p, int prec) | ||
420 | { | ||
421 | unsigned int cpu; | ||
422 | unsigned i; | ||
423 | |||
424 | for (i = 0; i < IPI_MAX; ++i) { | ||
425 | seq_printf(p, "%*s:", prec, ipi_text[i].short_text); | ||
426 | for_each_online_cpu(cpu) | ||
427 | seq_printf(p, " %10lu", | ||
428 | per_cpu(ipi_data, cpu).ipi_count[i]); | ||
429 | seq_printf(p, " %s\n", ipi_text[i].long_text); | ||
430 | } | ||
431 | } | ||
432 | |||
433 | int setup_profiling_timer(unsigned int multiplier) | ||
434 | { | ||
435 | pr_debug("setup_profiling_timer %d\n", multiplier); | ||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | /* TLB flush functions */ | ||
440 | |||
441 | struct flush_data { | ||
442 | struct vm_area_struct *vma; | ||
443 | unsigned long addr1; | ||
444 | unsigned long addr2; | ||
445 | }; | ||
446 | |||
447 | static void ipi_flush_tlb_all(void *arg) | ||
448 | { | ||
449 | local_flush_tlb_all(); | ||
450 | } | ||
451 | |||
452 | void flush_tlb_all(void) | ||
453 | { | ||
454 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); | ||
455 | } | ||
456 | |||
457 | static void ipi_flush_tlb_mm(void *arg) | ||
458 | { | ||
459 | local_flush_tlb_mm(arg); | ||
460 | } | ||
461 | |||
462 | void flush_tlb_mm(struct mm_struct *mm) | ||
463 | { | ||
464 | on_each_cpu(ipi_flush_tlb_mm, mm, 1); | ||
465 | } | ||
466 | |||
467 | static void ipi_flush_tlb_page(void *arg) | ||
468 | { | ||
469 | struct flush_data *fd = arg; | ||
470 | local_flush_tlb_page(fd->vma, fd->addr1); | ||
471 | } | ||
472 | |||
473 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | ||
474 | { | ||
475 | struct flush_data fd = { | ||
476 | .vma = vma, | ||
477 | .addr1 = addr, | ||
478 | }; | ||
479 | on_each_cpu(ipi_flush_tlb_page, &fd, 1); | ||
480 | } | ||
481 | |||
482 | static void ipi_flush_tlb_range(void *arg) | ||
483 | { | ||
484 | struct flush_data *fd = arg; | ||
485 | local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); | ||
486 | } | ||
487 | |||
488 | void flush_tlb_range(struct vm_area_struct *vma, | ||
489 | unsigned long start, unsigned long end) | ||
490 | { | ||
491 | struct flush_data fd = { | ||
492 | .vma = vma, | ||
493 | .addr1 = start, | ||
494 | .addr2 = end, | ||
495 | }; | ||
496 | on_each_cpu(ipi_flush_tlb_range, &fd, 1); | ||
497 | } | ||
498 | |||
499 | /* Cache flush functions */ | ||
500 | |||
501 | static void ipi_flush_cache_all(void *arg) | ||
502 | { | ||
503 | local_flush_cache_all(); | ||
504 | } | ||
505 | |||
506 | void flush_cache_all(void) | ||
507 | { | ||
508 | on_each_cpu(ipi_flush_cache_all, NULL, 1); | ||
509 | } | ||
510 | |||
511 | static void ipi_flush_cache_page(void *arg) | ||
512 | { | ||
513 | struct flush_data *fd = arg; | ||
514 | local_flush_cache_page(fd->vma, fd->addr1, fd->addr2); | ||
515 | } | ||
516 | |||
517 | void flush_cache_page(struct vm_area_struct *vma, | ||
518 | unsigned long address, unsigned long pfn) | ||
519 | { | ||
520 | struct flush_data fd = { | ||
521 | .vma = vma, | ||
522 | .addr1 = address, | ||
523 | .addr2 = pfn, | ||
524 | }; | ||
525 | on_each_cpu(ipi_flush_cache_page, &fd, 1); | ||
526 | } | ||
527 | |||
528 | static void ipi_flush_cache_range(void *arg) | ||
529 | { | ||
530 | struct flush_data *fd = arg; | ||
531 | local_flush_cache_range(fd->vma, fd->addr1, fd->addr2); | ||
532 | } | ||
533 | |||
534 | void flush_cache_range(struct vm_area_struct *vma, | ||
535 | unsigned long start, unsigned long end) | ||
536 | { | ||
537 | struct flush_data fd = { | ||
538 | .vma = vma, | ||
539 | .addr1 = start, | ||
540 | .addr2 = end, | ||
541 | }; | ||
542 | on_each_cpu(ipi_flush_cache_range, &fd, 1); | ||
543 | } | ||
544 | |||
545 | static void ipi_flush_icache_range(void *arg) | ||
546 | { | ||
547 | struct flush_data *fd = arg; | ||
548 | local_flush_icache_range(fd->addr1, fd->addr2); | ||
549 | } | ||
550 | |||
551 | void flush_icache_range(unsigned long start, unsigned long end) | ||
552 | { | ||
553 | struct flush_data fd = { | ||
554 | .addr1 = start, | ||
555 | .addr2 = end, | ||
556 | }; | ||
557 | on_each_cpu(ipi_flush_icache_range, &fd, 1); | ||
558 | } | ||
559 | |||
560 | /* ------------------------------------------------------------------------- */ | ||
561 | |||
562 | static void ipi_invalidate_dcache_range(void *arg) | ||
563 | { | ||
564 | struct flush_data *fd = arg; | ||
565 | __invalidate_dcache_range(fd->addr1, fd->addr2); | ||
566 | } | ||
567 | |||
568 | static void system_invalidate_dcache_range(unsigned long start, | ||
569 | unsigned long size) | ||
570 | { | ||
571 | struct flush_data fd = { | ||
572 | .addr1 = start, | ||
573 | .addr2 = size, | ||
574 | }; | ||
575 | on_each_cpu(ipi_invalidate_dcache_range, &fd, 1); | ||
576 | } | ||
577 | |||
578 | static void ipi_flush_invalidate_dcache_range(void *arg) | ||
579 | { | ||
580 | struct flush_data *fd = arg; | ||
581 | __flush_invalidate_dcache_range(fd->addr1, fd->addr2); | ||
582 | } | ||
583 | |||
584 | static void system_flush_invalidate_dcache_range(unsigned long start, | ||
585 | unsigned long size) | ||
586 | { | ||
587 | struct flush_data fd = { | ||
588 | .addr1 = start, | ||
589 | .addr2 = size, | ||
590 | }; | ||
591 | on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1); | ||
592 | } | ||
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 9af3dd88ad7e..08b769d3b3a1 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c | |||
@@ -36,7 +36,7 @@ static cycle_t ccount_read(struct clocksource *cs) | |||
36 | return (cycle_t)get_ccount(); | 36 | return (cycle_t)get_ccount(); |
37 | } | 37 | } |
38 | 38 | ||
39 | static u32 notrace ccount_sched_clock_read(void) | 39 | static u64 notrace ccount_sched_clock_read(void) |
40 | { | 40 | { |
41 | return get_ccount(); | 41 | return get_ccount(); |
42 | } | 42 | } |
@@ -46,24 +46,19 @@ static struct clocksource ccount_clocksource = { | |||
46 | .rating = 200, | 46 | .rating = 200, |
47 | .read = ccount_read, | 47 | .read = ccount_read, |
48 | .mask = CLOCKSOURCE_MASK(32), | 48 | .mask = CLOCKSOURCE_MASK(32), |
49 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
49 | }; | 50 | }; |
50 | 51 | ||
51 | static int ccount_timer_set_next_event(unsigned long delta, | 52 | static int ccount_timer_set_next_event(unsigned long delta, |
52 | struct clock_event_device *dev); | 53 | struct clock_event_device *dev); |
53 | static void ccount_timer_set_mode(enum clock_event_mode mode, | 54 | static void ccount_timer_set_mode(enum clock_event_mode mode, |
54 | struct clock_event_device *evt); | 55 | struct clock_event_device *evt); |
55 | static struct ccount_timer_t { | 56 | struct ccount_timer { |
56 | struct clock_event_device evt; | 57 | struct clock_event_device evt; |
57 | int irq_enabled; | 58 | int irq_enabled; |
58 | } ccount_timer = { | 59 | char name[24]; |
59 | .evt = { | ||
60 | .name = "ccount_clockevent", | ||
61 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
62 | .rating = 300, | ||
63 | .set_next_event = ccount_timer_set_next_event, | ||
64 | .set_mode = ccount_timer_set_mode, | ||
65 | }, | ||
66 | }; | 60 | }; |
61 | static DEFINE_PER_CPU(struct ccount_timer, ccount_timer); | ||
67 | 62 | ||
68 | static int ccount_timer_set_next_event(unsigned long delta, | 63 | static int ccount_timer_set_next_event(unsigned long delta, |
69 | struct clock_event_device *dev) | 64 | struct clock_event_device *dev) |
@@ -84,8 +79,8 @@ static int ccount_timer_set_next_event(unsigned long delta, | |||
84 | static void ccount_timer_set_mode(enum clock_event_mode mode, | 79 | static void ccount_timer_set_mode(enum clock_event_mode mode, |
85 | struct clock_event_device *evt) | 80 | struct clock_event_device *evt) |
86 | { | 81 | { |
87 | struct ccount_timer_t *timer = | 82 | struct ccount_timer *timer = |
88 | container_of(evt, struct ccount_timer_t, evt); | 83 | container_of(evt, struct ccount_timer, evt); |
89 | 84 | ||
90 | /* | 85 | /* |
91 | * There is no way to disable the timer interrupt at the device level, | 86 | * There is no way to disable the timer interrupt at the device level, |
@@ -117,9 +112,28 @@ static struct irqaction timer_irqaction = { | |||
117 | .handler = timer_interrupt, | 112 | .handler = timer_interrupt, |
118 | .flags = IRQF_TIMER, | 113 | .flags = IRQF_TIMER, |
119 | .name = "timer", | 114 | .name = "timer", |
120 | .dev_id = &ccount_timer, | ||
121 | }; | 115 | }; |
122 | 116 | ||
117 | void local_timer_setup(unsigned cpu) | ||
118 | { | ||
119 | struct ccount_timer *timer = &per_cpu(ccount_timer, cpu); | ||
120 | struct clock_event_device *clockevent = &timer->evt; | ||
121 | |||
122 | timer->irq_enabled = 1; | ||
123 | clockevent->name = timer->name; | ||
124 | snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu); | ||
125 | clockevent->features = CLOCK_EVT_FEAT_ONESHOT; | ||
126 | clockevent->rating = 300; | ||
127 | clockevent->set_next_event = ccount_timer_set_next_event; | ||
128 | clockevent->set_mode = ccount_timer_set_mode; | ||
129 | clockevent->cpumask = cpumask_of(cpu); | ||
130 | clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT); | ||
131 | if (WARN(!clockevent->irq, "error: can't map timer irq")) | ||
132 | return; | ||
133 | clockevents_config_and_register(clockevent, ccount_freq, | ||
134 | 0xf, 0xffffffff); | ||
135 | } | ||
136 | |||
123 | void __init time_init(void) | 137 | void __init time_init(void) |
124 | { | 138 | { |
125 | #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT | 139 | #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT |
@@ -131,28 +145,21 @@ void __init time_init(void) | |||
131 | ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; | 145 | ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; |
132 | #endif | 146 | #endif |
133 | clocksource_register_hz(&ccount_clocksource, ccount_freq); | 147 | clocksource_register_hz(&ccount_clocksource, ccount_freq); |
134 | 148 | local_timer_setup(0); | |
135 | ccount_timer.evt.cpumask = cpumask_of(0); | 149 | setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction); |
136 | ccount_timer.evt.irq = irq_create_mapping(NULL, LINUX_TIMER_INT); | 150 | sched_clock_register(ccount_sched_clock_read, 32, ccount_freq); |
137 | if (WARN(!ccount_timer.evt.irq, "error: can't map timer irq")) | 151 | clocksource_of_init(); |
138 | return; | ||
139 | clockevents_config_and_register(&ccount_timer.evt, ccount_freq, 0xf, | ||
140 | 0xffffffff); | ||
141 | setup_irq(ccount_timer.evt.irq, &timer_irqaction); | ||
142 | ccount_timer.irq_enabled = 1; | ||
143 | |||
144 | setup_sched_clock(ccount_sched_clock_read, 32, ccount_freq); | ||
145 | } | 152 | } |
146 | 153 | ||
147 | /* | 154 | /* |
148 | * The timer interrupt is called HZ times per second. | 155 | * The timer interrupt is called HZ times per second. |
149 | */ | 156 | */ |
150 | 157 | ||
151 | irqreturn_t timer_interrupt (int irq, void *dev_id) | 158 | irqreturn_t timer_interrupt(int irq, void *dev_id) |
152 | { | 159 | { |
153 | struct ccount_timer_t *timer = dev_id; | 160 | struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt; |
154 | struct clock_event_device *evt = &timer->evt; | ||
155 | 161 | ||
162 | set_linux_timer(get_linux_timer()); | ||
156 | evt->event_handler(evt); | 163 | evt->event_handler(evt); |
157 | 164 | ||
158 | /* Allow platform to do something useful (Wdog). */ | 165 | /* Allow platform to do something useful (Wdog). */ |
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 3e8a05c874cd..eebbfd8c26fc 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c | |||
@@ -157,7 +157,7 @@ COPROCESSOR(7), | |||
157 | * 2. it is a temporary memory buffer for the exception handlers. | 157 | * 2. it is a temporary memory buffer for the exception handlers. |
158 | */ | 158 | */ |
159 | 159 | ||
160 | unsigned long exc_table[EXC_TABLE_SIZE/4]; | 160 | DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]); |
161 | 161 | ||
162 | void die(const char*, struct pt_regs*, long); | 162 | void die(const char*, struct pt_regs*, long); |
163 | 163 | ||
@@ -212,6 +212,9 @@ void do_interrupt(struct pt_regs *regs) | |||
212 | XCHAL_INTLEVEL6_MASK, | 212 | XCHAL_INTLEVEL6_MASK, |
213 | XCHAL_INTLEVEL7_MASK, | 213 | XCHAL_INTLEVEL7_MASK, |
214 | }; | 214 | }; |
215 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
216 | |||
217 | irq_enter(); | ||
215 | 218 | ||
216 | for (;;) { | 219 | for (;;) { |
217 | unsigned intread = get_sr(interrupt); | 220 | unsigned intread = get_sr(interrupt); |
@@ -227,21 +230,13 @@ void do_interrupt(struct pt_regs *regs) | |||
227 | } | 230 | } |
228 | 231 | ||
229 | if (level == 0) | 232 | if (level == 0) |
230 | return; | 233 | break; |
231 | 234 | ||
232 | /* | 235 | do_IRQ(__ffs(int_at_level), regs); |
233 | * Clear the interrupt before processing, in case it's | ||
234 | * edge-triggered or software-generated | ||
235 | */ | ||
236 | while (int_at_level) { | ||
237 | unsigned i = __ffs(int_at_level); | ||
238 | unsigned mask = 1 << i; | ||
239 | |||
240 | int_at_level ^= mask; | ||
241 | set_sr(mask, intclear); | ||
242 | do_IRQ(i, regs); | ||
243 | } | ||
244 | } | 236 | } |
237 | |||
238 | irq_exit(); | ||
239 | set_irq_regs(old_regs); | ||
245 | } | 240 | } |
246 | 241 | ||
247 | /* | 242 | /* |
@@ -318,17 +313,31 @@ do_debug(struct pt_regs *regs) | |||
318 | } | 313 | } |
319 | 314 | ||
320 | 315 | ||
316 | static void set_handler(int idx, void *handler) | ||
317 | { | ||
318 | unsigned int cpu; | ||
319 | |||
320 | for_each_possible_cpu(cpu) | ||
321 | per_cpu(exc_table, cpu)[idx] = (unsigned long)handler; | ||
322 | } | ||
323 | |||
321 | /* Set exception C handler - for temporary use when probing exceptions */ | 324 | /* Set exception C handler - for temporary use when probing exceptions */ |
322 | 325 | ||
323 | void * __init trap_set_handler(int cause, void *handler) | 326 | void * __init trap_set_handler(int cause, void *handler) |
324 | { | 327 | { |
325 | unsigned long *entry = &exc_table[EXC_TABLE_DEFAULT / 4 + cause]; | 328 | void *previous = (void *)per_cpu(exc_table, 0)[ |
326 | void *previous = (void *)*entry; | 329 | EXC_TABLE_DEFAULT / 4 + cause]; |
327 | *entry = (unsigned long)handler; | 330 | set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler); |
328 | return previous; | 331 | return previous; |
329 | } | 332 | } |
330 | 333 | ||
331 | 334 | ||
335 | static void trap_init_excsave(void) | ||
336 | { | ||
337 | unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table); | ||
338 | __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); | ||
339 | } | ||
340 | |||
332 | /* | 341 | /* |
333 | * Initialize dispatch tables. | 342 | * Initialize dispatch tables. |
334 | * | 343 | * |
@@ -342,8 +351,6 @@ void * __init trap_set_handler(int cause, void *handler) | |||
342 | * See vectors.S for more details. | 351 | * See vectors.S for more details. |
343 | */ | 352 | */ |
344 | 353 | ||
345 | #define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler)) | ||
346 | |||
347 | void __init trap_init(void) | 354 | void __init trap_init(void) |
348 | { | 355 | { |
349 | int i; | 356 | int i; |
@@ -373,10 +380,15 @@ void __init trap_init(void) | |||
373 | } | 380 | } |
374 | 381 | ||
375 | /* Initialize EXCSAVE_1 to hold the address of the exception table. */ | 382 | /* Initialize EXCSAVE_1 to hold the address of the exception table. */ |
383 | trap_init_excsave(); | ||
384 | } | ||
376 | 385 | ||
377 | i = (unsigned long)exc_table; | 386 | #ifdef CONFIG_SMP |
378 | __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (i)); | 387 | void secondary_trap_init(void) |
388 | { | ||
389 | trap_init_excsave(); | ||
379 | } | 390 | } |
391 | #endif | ||
380 | 392 | ||
381 | /* | 393 | /* |
382 | * This function dumps the current valid window frame and other base registers. | 394 | * This function dumps the current valid window frame and other base registers. |
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 21acd11b5df2..ee32c0085dff 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S | |||
@@ -165,6 +165,13 @@ SECTIONS | |||
165 | .DoubleExceptionVector.text); | 165 | .DoubleExceptionVector.text); |
166 | RELOCATE_ENTRY(_DebugInterruptVector_text, | 166 | RELOCATE_ENTRY(_DebugInterruptVector_text, |
167 | .DebugInterruptVector.text); | 167 | .DebugInterruptVector.text); |
168 | #if defined(CONFIG_SMP) | ||
169 | RELOCATE_ENTRY(_SecondaryResetVector_literal, | ||
170 | .SecondaryResetVector.literal); | ||
171 | RELOCATE_ENTRY(_SecondaryResetVector_text, | ||
172 | .SecondaryResetVector.text); | ||
173 | #endif | ||
174 | |||
168 | 175 | ||
169 | __boot_reloc_table_end = ABSOLUTE(.) ; | 176 | __boot_reloc_table_end = ABSOLUTE(.) ; |
170 | 177 | ||
@@ -272,6 +279,25 @@ SECTIONS | |||
272 | .DoubleExceptionVector.literal) | 279 | .DoubleExceptionVector.literal) |
273 | 280 | ||
274 | . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; | 281 | . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; |
282 | |||
283 | #if defined(CONFIG_SMP) | ||
284 | |||
285 | SECTION_VECTOR (_SecondaryResetVector_literal, | ||
286 | .SecondaryResetVector.literal, | ||
287 | RESET_VECTOR1_VADDR - 4, | ||
288 | SIZEOF(.DoubleExceptionVector.text), | ||
289 | .DoubleExceptionVector.text) | ||
290 | |||
291 | SECTION_VECTOR (_SecondaryResetVector_text, | ||
292 | .SecondaryResetVector.text, | ||
293 | RESET_VECTOR1_VADDR, | ||
294 | 4, | ||
295 | .SecondaryResetVector.literal) | ||
296 | |||
297 | . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text); | ||
298 | |||
299 | #endif | ||
300 | |||
275 | . = ALIGN(PAGE_SIZE); | 301 | . = ALIGN(PAGE_SIZE); |
276 | 302 | ||
277 | __init_end = .; | 303 | __init_end = .; |
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 81edeab82d17..ba4c47f291b1 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c | |||
@@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page) | |||
118 | * For now, flush the whole cache. FIXME?? | 118 | * For now, flush the whole cache. FIXME?? |
119 | */ | 119 | */ |
120 | 120 | ||
121 | void flush_cache_range(struct vm_area_struct* vma, | 121 | void local_flush_cache_range(struct vm_area_struct *vma, |
122 | unsigned long start, unsigned long end) | 122 | unsigned long start, unsigned long end) |
123 | { | 123 | { |
124 | __flush_invalidate_dcache_all(); | 124 | __flush_invalidate_dcache_all(); |
@@ -132,7 +132,7 @@ void flush_cache_range(struct vm_area_struct* vma, | |||
132 | * alias versions of the cache flush functions. | 132 | * alias versions of the cache flush functions. |
133 | */ | 133 | */ |
134 | 134 | ||
135 | void flush_cache_page(struct vm_area_struct* vma, unsigned long address, | 135 | void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, |
136 | unsigned long pfn) | 136 | unsigned long pfn) |
137 | { | 137 | { |
138 | /* Note that we have to use the 'alias' address to avoid multi-hit */ | 138 | /* Note that we have to use the 'alias' address to avoid multi-hit */ |
@@ -159,8 +159,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) | |||
159 | 159 | ||
160 | /* Invalidate old entry in TLBs */ | 160 | /* Invalidate old entry in TLBs */ |
161 | 161 | ||
162 | invalidate_itlb_mapping(addr); | 162 | flush_tlb_page(vma, addr); |
163 | invalidate_dtlb_mapping(addr); | ||
164 | 163 | ||
165 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | 164 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK |
166 | 165 | ||
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 70fa7bc42b4a..b57c4f91f487 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
22 | #include <asm/pgalloc.h> | 22 | #include <asm/pgalloc.h> |
23 | 23 | ||
24 | unsigned long asid_cache = ASID_USER_FIRST; | 24 | DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; |
25 | void bad_page_fault(struct pt_regs*, unsigned long, int); | 25 | void bad_page_fault(struct pt_regs*, unsigned long, int); |
26 | 26 | ||
27 | #undef DEBUG_PAGE_FAULT | 27 | #undef DEBUG_PAGE_FAULT |
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index d97ed1ba7b0a..1f68558dbcc2 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S | |||
@@ -140,7 +140,7 @@ ENTRY(clear_user_page) | |||
140 | 140 | ||
141 | /* Setup a temporary DTLB with the color of the VPN */ | 141 | /* Setup a temporary DTLB with the color of the VPN */ |
142 | 142 | ||
143 | movi a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) | 143 | movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff |
144 | movi a5, TLBTEMP_BASE_1 # virt | 144 | movi a5, TLBTEMP_BASE_1 # virt |
145 | add a6, a2, a4 # ppn | 145 | add a6, a2, a4 # ppn |
146 | add a2, a5, a3 # add 'color' | 146 | add a2, a5, a3 # add 'color' |
@@ -194,7 +194,7 @@ ENTRY(copy_user_page) | |||
194 | or a9, a9, a8 | 194 | or a9, a9, a8 |
195 | slli a4, a4, PAGE_SHIFT | 195 | slli a4, a4, PAGE_SHIFT |
196 | s32i a9, a5, PAGE_FLAGS | 196 | s32i a9, a5, PAGE_FLAGS |
197 | movi a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) | 197 | movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff |
198 | 198 | ||
199 | beqz a6, 1f | 199 | beqz a6, 1f |
200 | 200 | ||
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index c43771c974be..36ec171698b8 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
14 | #include <asm/mmu_context.h> | 14 | #include <asm/mmu_context.h> |
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | #include <asm/initialize_mmu.h> | ||
17 | #include <asm/io.h> | ||
16 | 18 | ||
17 | void __init paging_init(void) | 19 | void __init paging_init(void) |
18 | { | 20 | { |
@@ -22,7 +24,7 @@ void __init paging_init(void) | |||
22 | /* | 24 | /* |
23 | * Flush the mmu and reset associated register to default values. | 25 | * Flush the mmu and reset associated register to default values. |
24 | */ | 26 | */ |
25 | void __init init_mmu(void) | 27 | void init_mmu(void) |
26 | { | 28 | { |
27 | #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) | 29 | #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) |
28 | /* | 30 | /* |
@@ -37,7 +39,21 @@ void __init init_mmu(void) | |||
37 | set_itlbcfg_register(0); | 39 | set_itlbcfg_register(0); |
38 | set_dtlbcfg_register(0); | 40 | set_dtlbcfg_register(0); |
39 | #endif | 41 | #endif |
40 | flush_tlb_all(); | 42 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF |
43 | /* | ||
44 | * Update the IO area mapping in case xtensa_kio_paddr has changed | ||
45 | */ | ||
46 | write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), | ||
47 | XCHAL_KIO_CACHED_VADDR + 6); | ||
48 | write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), | ||
49 | XCHAL_KIO_CACHED_VADDR + 6); | ||
50 | write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), | ||
51 | XCHAL_KIO_BYPASS_VADDR + 6); | ||
52 | write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), | ||
53 | XCHAL_KIO_BYPASS_VADDR + 6); | ||
54 | #endif | ||
55 | |||
56 | local_flush_tlb_all(); | ||
41 | 57 | ||
42 | /* Set rasid register to a known value. */ | 58 | /* Set rasid register to a known value. */ |
43 | 59 | ||
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index ca9d2366bf12..ade623826788 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c | |||
@@ -48,7 +48,7 @@ static inline void __flush_dtlb_all (void) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | 50 | ||
51 | void flush_tlb_all (void) | 51 | void local_flush_tlb_all(void) |
52 | { | 52 | { |
53 | __flush_itlb_all(); | 53 | __flush_itlb_all(); |
54 | __flush_dtlb_all(); | 54 | __flush_dtlb_all(); |
@@ -60,19 +60,23 @@ void flush_tlb_all (void) | |||
60 | * a new context will be assigned to it. | 60 | * a new context will be assigned to it. |
61 | */ | 61 | */ |
62 | 62 | ||
63 | void flush_tlb_mm(struct mm_struct *mm) | 63 | void local_flush_tlb_mm(struct mm_struct *mm) |
64 | { | 64 | { |
65 | int cpu = smp_processor_id(); | ||
66 | |||
65 | if (mm == current->active_mm) { | 67 | if (mm == current->active_mm) { |
66 | unsigned long flags; | 68 | unsigned long flags; |
67 | local_irq_save(flags); | 69 | local_irq_save(flags); |
68 | __get_new_mmu_context(mm); | 70 | mm->context.asid[cpu] = NO_CONTEXT; |
69 | __load_mmu_context(mm); | 71 | activate_context(mm, cpu); |
70 | local_irq_restore(flags); | 72 | local_irq_restore(flags); |
73 | } else { | ||
74 | mm->context.asid[cpu] = NO_CONTEXT; | ||
75 | mm->context.cpu = -1; | ||
71 | } | 76 | } |
72 | else | ||
73 | mm->context = 0; | ||
74 | } | 77 | } |
75 | 78 | ||
79 | |||
76 | #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) | 80 | #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) |
77 | #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) | 81 | #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) |
78 | #if _ITLB_ENTRIES > _DTLB_ENTRIES | 82 | #if _ITLB_ENTRIES > _DTLB_ENTRIES |
@@ -81,24 +85,26 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
81 | # define _TLB_ENTRIES _DTLB_ENTRIES | 85 | # define _TLB_ENTRIES _DTLB_ENTRIES |
82 | #endif | 86 | #endif |
83 | 87 | ||
84 | void flush_tlb_range (struct vm_area_struct *vma, | 88 | void local_flush_tlb_range(struct vm_area_struct *vma, |
85 | unsigned long start, unsigned long end) | 89 | unsigned long start, unsigned long end) |
86 | { | 90 | { |
91 | int cpu = smp_processor_id(); | ||
87 | struct mm_struct *mm = vma->vm_mm; | 92 | struct mm_struct *mm = vma->vm_mm; |
88 | unsigned long flags; | 93 | unsigned long flags; |
89 | 94 | ||
90 | if (mm->context == NO_CONTEXT) | 95 | if (mm->context.asid[cpu] == NO_CONTEXT) |
91 | return; | 96 | return; |
92 | 97 | ||
93 | #if 0 | 98 | #if 0 |
94 | printk("[tlbrange<%02lx,%08lx,%08lx>]\n", | 99 | printk("[tlbrange<%02lx,%08lx,%08lx>]\n", |
95 | (unsigned long)mm->context, start, end); | 100 | (unsigned long)mm->context.asid[cpu], start, end); |
96 | #endif | 101 | #endif |
97 | local_irq_save(flags); | 102 | local_irq_save(flags); |
98 | 103 | ||
99 | if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { | 104 | if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { |
100 | int oldpid = get_rasid_register(); | 105 | int oldpid = get_rasid_register(); |
101 | set_rasid_register (ASID_INSERT(mm->context)); | 106 | |
107 | set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); | ||
102 | start &= PAGE_MASK; | 108 | start &= PAGE_MASK; |
103 | if (vma->vm_flags & VM_EXEC) | 109 | if (vma->vm_flags & VM_EXEC) |
104 | while(start < end) { | 110 | while(start < end) { |
@@ -114,24 +120,25 @@ void flush_tlb_range (struct vm_area_struct *vma, | |||
114 | 120 | ||
115 | set_rasid_register(oldpid); | 121 | set_rasid_register(oldpid); |
116 | } else { | 122 | } else { |
117 | flush_tlb_mm(mm); | 123 | local_flush_tlb_mm(mm); |
118 | } | 124 | } |
119 | local_irq_restore(flags); | 125 | local_irq_restore(flags); |
120 | } | 126 | } |
121 | 127 | ||
122 | void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) | 128 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
123 | { | 129 | { |
130 | int cpu = smp_processor_id(); | ||
124 | struct mm_struct* mm = vma->vm_mm; | 131 | struct mm_struct* mm = vma->vm_mm; |
125 | unsigned long flags; | 132 | unsigned long flags; |
126 | int oldpid; | 133 | int oldpid; |
127 | 134 | ||
128 | if(mm->context == NO_CONTEXT) | 135 | if (mm->context.asid[cpu] == NO_CONTEXT) |
129 | return; | 136 | return; |
130 | 137 | ||
131 | local_irq_save(flags); | 138 | local_irq_save(flags); |
132 | 139 | ||
133 | oldpid = get_rasid_register(); | 140 | oldpid = get_rasid_register(); |
134 | set_rasid_register(ASID_INSERT(mm->context)); | 141 | set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); |
135 | 142 | ||
136 | if (vma->vm_flags & VM_EXEC) | 143 | if (vma->vm_flags & VM_EXEC) |
137 | invalidate_itlb_mapping(page); | 144 | invalidate_itlb_mapping(page); |
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index e9e1aad8c271..d05f8feeb8d7 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #define DRIVER_NAME "iss-netdev" | 38 | #define DRIVER_NAME "iss-netdev" |
39 | #define ETH_MAX_PACKET 1500 | 39 | #define ETH_MAX_PACKET 1500 |
40 | #define ETH_HEADER_OTHER 14 | 40 | #define ETH_HEADER_OTHER 14 |
41 | #define ISS_NET_TIMER_VALUE (2 * HZ) | 41 | #define ISS_NET_TIMER_VALUE (HZ / 10) |
42 | 42 | ||
43 | 43 | ||
44 | static DEFINE_SPINLOCK(opened_lock); | 44 | static DEFINE_SPINLOCK(opened_lock); |
@@ -56,8 +56,6 @@ static LIST_HEAD(devices); | |||
56 | 56 | ||
57 | struct tuntap_info { | 57 | struct tuntap_info { |
58 | char dev_name[IFNAMSIZ]; | 58 | char dev_name[IFNAMSIZ]; |
59 | int fixed_config; | ||
60 | unsigned char gw[ETH_ALEN]; | ||
61 | int fd; | 59 | int fd; |
62 | }; | 60 | }; |
63 | 61 | ||
@@ -67,7 +65,6 @@ struct tuntap_info { | |||
67 | /* This structure contains out private information for the driver. */ | 65 | /* This structure contains out private information for the driver. */ |
68 | 66 | ||
69 | struct iss_net_private { | 67 | struct iss_net_private { |
70 | |||
71 | struct list_head device_list; | 68 | struct list_head device_list; |
72 | struct list_head opened_list; | 69 | struct list_head opened_list; |
73 | 70 | ||
@@ -83,9 +80,6 @@ struct iss_net_private { | |||
83 | int index; | 80 | int index; |
84 | int mtu; | 81 | int mtu; |
85 | 82 | ||
86 | unsigned char mac[ETH_ALEN]; | ||
87 | int have_mac; | ||
88 | |||
89 | struct { | 83 | struct { |
90 | union { | 84 | union { |
91 | struct tuntap_info tuntap; | 85 | struct tuntap_info tuntap; |
@@ -118,68 +112,48 @@ static char *split_if_spec(char *str, ...) | |||
118 | *arg = str; | 112 | *arg = str; |
119 | if (end == NULL) | 113 | if (end == NULL) |
120 | return NULL; | 114 | return NULL; |
121 | *end ++ = '\0'; | 115 | *end++ = '\0'; |
122 | str = end; | 116 | str = end; |
123 | } | 117 | } |
124 | va_end(ap); | 118 | va_end(ap); |
125 | return str; | 119 | return str; |
126 | } | 120 | } |
127 | 121 | ||
122 | /* Set Ethernet address of the specified device. */ | ||
128 | 123 | ||
129 | #if 0 | 124 | static void setup_etheraddr(struct net_device *dev, char *str) |
130 | /* Adjust SKB. */ | ||
131 | |||
132 | struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra) | ||
133 | { | 125 | { |
134 | if ((skb != NULL) && (skb_tailroom(skb) < extra)) { | 126 | unsigned char *addr = dev->dev_addr; |
135 | struct sk_buff *skb2; | ||
136 | |||
137 | skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC); | ||
138 | dev_kfree_skb(skb); | ||
139 | skb = skb2; | ||
140 | } | ||
141 | if (skb != NULL) | ||
142 | skb_put(skb, extra); | ||
143 | |||
144 | return skb; | ||
145 | } | ||
146 | #endif | ||
147 | 127 | ||
148 | /* Return the IP address as a string for a given device. */ | 128 | if (str == NULL) |
129 | goto random; | ||
149 | 130 | ||
150 | static void dev_ip_addr(void *d, char *buf, char *bin_buf) | 131 | if (!mac_pton(str, addr)) { |
151 | { | 132 | pr_err("%s: failed to parse '%s' as an ethernet address\n", |
152 | struct net_device *dev = d; | 133 | dev->name, str); |
153 | struct in_device *ip = dev->ip_ptr; | 134 | goto random; |
154 | struct in_ifaddr *in; | ||
155 | __be32 addr; | ||
156 | |||
157 | if ((ip == NULL) || ((in = ip->ifa_list) == NULL)) { | ||
158 | printk(KERN_WARNING "Device not assigned an IP address!\n"); | ||
159 | return; | ||
160 | } | 135 | } |
161 | 136 | if (is_multicast_ether_addr(addr)) { | |
162 | addr = in->ifa_address; | 137 | pr_err("%s: attempt to assign a multicast ethernet address\n", |
163 | sprintf(buf, "%d.%d.%d.%d", addr & 0xff, (addr >> 8) & 0xff, | 138 | dev->name); |
164 | (addr >> 16) & 0xff, addr >> 24); | 139 | goto random; |
165 | |||
166 | if (bin_buf) { | ||
167 | bin_buf[0] = addr & 0xff; | ||
168 | bin_buf[1] = (addr >> 8) & 0xff; | ||
169 | bin_buf[2] = (addr >> 16) & 0xff; | ||
170 | bin_buf[3] = addr >> 24; | ||
171 | } | 140 | } |
141 | if (!is_valid_ether_addr(addr)) { | ||
142 | pr_err("%s: attempt to assign an invalid ethernet address\n", | ||
143 | dev->name); | ||
144 | goto random; | ||
145 | } | ||
146 | if (!is_local_ether_addr(addr)) | ||
147 | pr_warn("%s: assigning a globally valid ethernet address\n", | ||
148 | dev->name); | ||
149 | return; | ||
150 | |||
151 | random: | ||
152 | pr_info("%s: choosing a random ethernet address\n", | ||
153 | dev->name); | ||
154 | eth_hw_addr_random(dev); | ||
172 | } | 155 | } |
173 | 156 | ||
174 | /* Set Ethernet address of the specified device. */ | ||
175 | |||
176 | static void inline set_ether_mac(void *d, unsigned char *addr) | ||
177 | { | ||
178 | struct net_device *dev = d; | ||
179 | memcpy(dev->dev_addr, addr, ETH_ALEN); | ||
180 | } | ||
181 | |||
182 | |||
183 | /* ======================= TUNTAP TRANSPORT INTERFACE ====================== */ | 157 | /* ======================= TUNTAP TRANSPORT INTERFACE ====================== */ |
184 | 158 | ||
185 | static int tuntap_open(struct iss_net_private *lp) | 159 | static int tuntap_open(struct iss_net_private *lp) |
@@ -189,24 +163,21 @@ static int tuntap_open(struct iss_net_private *lp) | |||
189 | int err = -EINVAL; | 163 | int err = -EINVAL; |
190 | int fd; | 164 | int fd; |
191 | 165 | ||
192 | /* We currently only support a fixed configuration. */ | 166 | fd = simc_open("/dev/net/tun", 02, 0); /* O_RDWR */ |
193 | 167 | if (fd < 0) { | |
194 | if (!lp->tp.info.tuntap.fixed_config) | 168 | pr_err("%s: failed to open /dev/net/tun, returned %d (errno = %d)\n", |
195 | return -EINVAL; | 169 | lp->dev->name, fd, errno); |
196 | |||
197 | if ((fd = simc_open("/dev/net/tun", 02, 0)) < 0) { /* O_RDWR */ | ||
198 | printk("Failed to open /dev/net/tun, returned %d " | ||
199 | "(errno = %d)\n", fd, errno); | ||
200 | return fd; | 170 | return fd; |
201 | } | 171 | } |
202 | 172 | ||
203 | memset(&ifr, 0, sizeof ifr); | 173 | memset(&ifr, 0, sizeof(ifr)); |
204 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI; | 174 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI; |
205 | strlcpy(ifr.ifr_name, dev_name, sizeof ifr.ifr_name); | 175 | strlcpy(ifr.ifr_name, dev_name, sizeof(ifr.ifr_name)); |
206 | 176 | ||
207 | if ((err = simc_ioctl(fd, TUNSETIFF, (void*) &ifr)) < 0) { | 177 | err = simc_ioctl(fd, TUNSETIFF, &ifr); |
208 | printk("Failed to set interface, returned %d " | 178 | if (err < 0) { |
209 | "(errno = %d)\n", err, errno); | 179 | pr_err("%s: failed to set interface %s, returned %d (errno = %d)\n", |
180 | lp->dev->name, dev_name, err, errno); | ||
210 | simc_close(fd); | 181 | simc_close(fd); |
211 | return err; | 182 | return err; |
212 | } | 183 | } |
@@ -217,27 +188,17 @@ static int tuntap_open(struct iss_net_private *lp) | |||
217 | 188 | ||
218 | static void tuntap_close(struct iss_net_private *lp) | 189 | static void tuntap_close(struct iss_net_private *lp) |
219 | { | 190 | { |
220 | #if 0 | ||
221 | if (lp->tp.info.tuntap.fixed_config) | ||
222 | iter_addresses(lp->tp.info.tuntap.dev, close_addr, lp->host.dev_name); | ||
223 | #endif | ||
224 | simc_close(lp->tp.info.tuntap.fd); | 191 | simc_close(lp->tp.info.tuntap.fd); |
225 | lp->tp.info.tuntap.fd = -1; | 192 | lp->tp.info.tuntap.fd = -1; |
226 | } | 193 | } |
227 | 194 | ||
228 | static int tuntap_read (struct iss_net_private *lp, struct sk_buff **skb) | 195 | static int tuntap_read(struct iss_net_private *lp, struct sk_buff **skb) |
229 | { | 196 | { |
230 | #if 0 | ||
231 | *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER); | ||
232 | if (*skb == NULL) | ||
233 | return -ENOMEM; | ||
234 | #endif | ||
235 | |||
236 | return simc_read(lp->tp.info.tuntap.fd, | 197 | return simc_read(lp->tp.info.tuntap.fd, |
237 | (*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER); | 198 | (*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER); |
238 | } | 199 | } |
239 | 200 | ||
240 | static int tuntap_write (struct iss_net_private *lp, struct sk_buff **skb) | 201 | static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb) |
241 | { | 202 | { |
242 | return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len); | 203 | return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len); |
243 | } | 204 | } |
@@ -253,45 +214,45 @@ static int tuntap_poll(struct iss_net_private *lp) | |||
253 | } | 214 | } |
254 | 215 | ||
255 | /* | 216 | /* |
256 | * Currently only a device name is supported. | 217 | * ethX=tuntap,[mac address],device name |
257 | * ethX=tuntap[,[mac address][,[device name]]] | ||
258 | */ | 218 | */ |
259 | 219 | ||
260 | static int tuntap_probe(struct iss_net_private *lp, int index, char *init) | 220 | static int tuntap_probe(struct iss_net_private *lp, int index, char *init) |
261 | { | 221 | { |
262 | const int len = strlen(TRANSPORT_TUNTAP_NAME); | 222 | struct net_device *dev = lp->dev; |
263 | char *dev_name = NULL, *mac_str = NULL, *rem = NULL; | 223 | char *dev_name = NULL, *mac_str = NULL, *rem = NULL; |
264 | 224 | ||
265 | /* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */ | 225 | /* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */ |
266 | 226 | ||
267 | if (strncmp(init, TRANSPORT_TUNTAP_NAME, len)) | 227 | if (strncmp(init, TRANSPORT_TUNTAP_NAME, |
228 | sizeof(TRANSPORT_TUNTAP_NAME) - 1)) | ||
268 | return 0; | 229 | return 0; |
269 | 230 | ||
270 | if (*(init += strlen(TRANSPORT_TUNTAP_NAME)) == ',') { | 231 | init += sizeof(TRANSPORT_TUNTAP_NAME) - 1; |
271 | if ((rem=split_if_spec(init+1, &mac_str, &dev_name)) != NULL) { | 232 | if (*init == ',') { |
272 | printk("Extra garbage on specification : '%s'\n", rem); | 233 | rem = split_if_spec(init + 1, &mac_str, &dev_name); |
234 | if (rem != NULL) { | ||
235 | pr_err("%s: extra garbage on specification : '%s'\n", | ||
236 | dev->name, rem); | ||
273 | return 0; | 237 | return 0; |
274 | } | 238 | } |
275 | } else if (*init != '\0') { | 239 | } else if (*init != '\0') { |
276 | printk("Invalid argument: %s. Skipping device!\n", init); | 240 | pr_err("%s: invalid argument: %s. Skipping device!\n", |
241 | dev->name, init); | ||
277 | return 0; | 242 | return 0; |
278 | } | 243 | } |
279 | 244 | ||
280 | if (dev_name) { | 245 | if (!dev_name) { |
281 | strncpy(lp->tp.info.tuntap.dev_name, dev_name, | 246 | pr_err("%s: missing tuntap device name\n", dev->name); |
282 | sizeof lp->tp.info.tuntap.dev_name); | 247 | return 0; |
283 | lp->tp.info.tuntap.fixed_config = 1; | 248 | } |
284 | } else | ||
285 | strcpy(lp->tp.info.tuntap.dev_name, TRANSPORT_TUNTAP_NAME); | ||
286 | 249 | ||
250 | strlcpy(lp->tp.info.tuntap.dev_name, dev_name, | ||
251 | sizeof(lp->tp.info.tuntap.dev_name)); | ||
287 | 252 | ||
288 | #if 0 | 253 | setup_etheraddr(dev, mac_str); |
289 | if (setup_etheraddr(mac_str, lp->mac)) | ||
290 | lp->have_mac = 1; | ||
291 | #endif | ||
292 | lp->mtu = TRANSPORT_TUNTAP_MTU; | ||
293 | 254 | ||
294 | //lp->info.tuntap.gate_addr = gate_addr; | 255 | lp->mtu = TRANSPORT_TUNTAP_MTU; |
295 | 256 | ||
296 | lp->tp.info.tuntap.fd = -1; | 257 | lp->tp.info.tuntap.fd = -1; |
297 | 258 | ||
@@ -302,13 +263,6 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init) | |||
302 | lp->tp.protocol = tuntap_protocol; | 263 | lp->tp.protocol = tuntap_protocol; |
303 | lp->tp.poll = tuntap_poll; | 264 | lp->tp.poll = tuntap_poll; |
304 | 265 | ||
305 | printk("TUN/TAP backend - "); | ||
306 | #if 0 | ||
307 | if (lp->host.gate_addr != NULL) | ||
308 | printk("IP = %s", lp->host.gate_addr); | ||
309 | #endif | ||
310 | printk("\n"); | ||
311 | |||
312 | return 1; | 266 | return 1; |
313 | } | 267 | } |
314 | 268 | ||
@@ -327,7 +281,8 @@ static int iss_net_rx(struct net_device *dev) | |||
327 | 281 | ||
328 | /* Try to allocate memory, if it fails, try again next round. */ | 282 | /* Try to allocate memory, if it fails, try again next round. */ |
329 | 283 | ||
330 | if ((skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER)) == NULL) { | 284 | skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER); |
285 | if (skb == NULL) { | ||
331 | lp->stats.rx_dropped++; | 286 | lp->stats.rx_dropped++; |
332 | return 0; | 287 | return 0; |
333 | } | 288 | } |
@@ -347,7 +302,6 @@ static int iss_net_rx(struct net_device *dev) | |||
347 | 302 | ||
348 | lp->stats.rx_bytes += skb->len; | 303 | lp->stats.rx_bytes += skb->len; |
349 | lp->stats.rx_packets++; | 304 | lp->stats.rx_packets++; |
350 | // netif_rx(skb); | ||
351 | netif_rx_ni(skb); | 305 | netif_rx_ni(skb); |
352 | return pkt_len; | 306 | return pkt_len; |
353 | } | 307 | } |
@@ -378,11 +332,11 @@ static int iss_net_poll(void) | |||
378 | spin_unlock(&lp->lock); | 332 | spin_unlock(&lp->lock); |
379 | 333 | ||
380 | if (err < 0) { | 334 | if (err < 0) { |
381 | printk(KERN_ERR "Device '%s' read returned %d, " | 335 | pr_err("Device '%s' read returned %d, shutting it down\n", |
382 | "shutting it down\n", lp->dev->name, err); | 336 | lp->dev->name, err); |
383 | dev_close(lp->dev); | 337 | dev_close(lp->dev); |
384 | } else { | 338 | } else { |
385 | // FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); | 339 | /* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */ |
386 | } | 340 | } |
387 | } | 341 | } |
388 | 342 | ||
@@ -393,14 +347,11 @@ static int iss_net_poll(void) | |||
393 | 347 | ||
394 | static void iss_net_timer(unsigned long priv) | 348 | static void iss_net_timer(unsigned long priv) |
395 | { | 349 | { |
396 | struct iss_net_private* lp = (struct iss_net_private*) priv; | 350 | struct iss_net_private *lp = (struct iss_net_private *)priv; |
397 | 351 | ||
398 | spin_lock(&lp->lock); | 352 | spin_lock(&lp->lock); |
399 | |||
400 | iss_net_poll(); | 353 | iss_net_poll(); |
401 | |||
402 | mod_timer(&lp->timer, jiffies + lp->timer_val); | 354 | mod_timer(&lp->timer, jiffies + lp->timer_val); |
403 | |||
404 | spin_unlock(&lp->lock); | 355 | spin_unlock(&lp->lock); |
405 | } | 356 | } |
406 | 357 | ||
@@ -408,19 +359,14 @@ static void iss_net_timer(unsigned long priv) | |||
408 | static int iss_net_open(struct net_device *dev) | 359 | static int iss_net_open(struct net_device *dev) |
409 | { | 360 | { |
410 | struct iss_net_private *lp = netdev_priv(dev); | 361 | struct iss_net_private *lp = netdev_priv(dev); |
411 | char addr[sizeof "255.255.255.255\0"]; | ||
412 | int err; | 362 | int err; |
413 | 363 | ||
414 | spin_lock(&lp->lock); | 364 | spin_lock(&lp->lock); |
415 | 365 | ||
416 | if ((err = lp->tp.open(lp)) < 0) | 366 | err = lp->tp.open(lp); |
367 | if (err < 0) | ||
417 | goto out; | 368 | goto out; |
418 | 369 | ||
419 | if (!lp->have_mac) { | ||
420 | dev_ip_addr(dev, addr, &lp->mac[2]); | ||
421 | set_ether_mac(dev, lp->mac); | ||
422 | } | ||
423 | |||
424 | netif_start_queue(dev); | 370 | netif_start_queue(dev); |
425 | 371 | ||
426 | /* clear buffer - it can happen that the host side of the interface | 372 | /* clear buffer - it can happen that the host side of the interface |
@@ -448,7 +394,6 @@ out: | |||
448 | static int iss_net_close(struct net_device *dev) | 394 | static int iss_net_close(struct net_device *dev) |
449 | { | 395 | { |
450 | struct iss_net_private *lp = netdev_priv(dev); | 396 | struct iss_net_private *lp = netdev_priv(dev); |
451 | printk("iss_net_close!\n"); | ||
452 | netif_stop_queue(dev); | 397 | netif_stop_queue(dev); |
453 | spin_lock(&lp->lock); | 398 | spin_lock(&lp->lock); |
454 | 399 | ||
@@ -490,7 +435,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
490 | 435 | ||
491 | } else { | 436 | } else { |
492 | netif_start_queue(dev); | 437 | netif_start_queue(dev); |
493 | printk(KERN_ERR "iss_net_start_xmit: failed(%d)\n", len); | 438 | pr_err("%s: %s failed(%d)\n", dev->name, __func__, len); |
494 | } | 439 | } |
495 | 440 | ||
496 | spin_unlock_irqrestore(&lp->lock, flags); | 441 | spin_unlock_irqrestore(&lp->lock, flags); |
@@ -508,56 +453,27 @@ static struct net_device_stats *iss_net_get_stats(struct net_device *dev) | |||
508 | 453 | ||
509 | static void iss_net_set_multicast_list(struct net_device *dev) | 454 | static void iss_net_set_multicast_list(struct net_device *dev) |
510 | { | 455 | { |
511 | #if 0 | ||
512 | if (dev->flags & IFF_PROMISC) | ||
513 | return; | ||
514 | else if (!netdev_mc_empty(dev)) | ||
515 | dev->flags |= IFF_ALLMULTI; | ||
516 | else | ||
517 | dev->flags &= ~IFF_ALLMULTI; | ||
518 | #endif | ||
519 | } | 456 | } |
520 | 457 | ||
521 | static void iss_net_tx_timeout(struct net_device *dev) | 458 | static void iss_net_tx_timeout(struct net_device *dev) |
522 | { | 459 | { |
523 | #if 0 | ||
524 | dev->trans_start = jiffies; | ||
525 | netif_wake_queue(dev); | ||
526 | #endif | ||
527 | } | 460 | } |
528 | 461 | ||
529 | static int iss_net_set_mac(struct net_device *dev, void *addr) | 462 | static int iss_net_set_mac(struct net_device *dev, void *addr) |
530 | { | 463 | { |
531 | #if 0 | ||
532 | struct iss_net_private *lp = netdev_priv(dev); | 464 | struct iss_net_private *lp = netdev_priv(dev); |
533 | struct sockaddr *hwaddr = addr; | 465 | struct sockaddr *hwaddr = addr; |
534 | 466 | ||
467 | if (!is_valid_ether_addr(hwaddr->sa_data)) | ||
468 | return -EADDRNOTAVAIL; | ||
535 | spin_lock(&lp->lock); | 469 | spin_lock(&lp->lock); |
536 | memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN); | 470 | memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN); |
537 | spin_unlock(&lp->lock); | 471 | spin_unlock(&lp->lock); |
538 | #endif | ||
539 | |||
540 | return 0; | 472 | return 0; |
541 | } | 473 | } |
542 | 474 | ||
543 | static int iss_net_change_mtu(struct net_device *dev, int new_mtu) | 475 | static int iss_net_change_mtu(struct net_device *dev, int new_mtu) |
544 | { | 476 | { |
545 | #if 0 | ||
546 | struct iss_net_private *lp = netdev_priv(dev); | ||
547 | int err = 0; | ||
548 | |||
549 | spin_lock(&lp->lock); | ||
550 | |||
551 | // FIXME not needed new_mtu = transport_set_mtu(new_mtu, &lp->user); | ||
552 | |||
553 | if (new_mtu < 0) | ||
554 | err = new_mtu; | ||
555 | else | ||
556 | dev->mtu = new_mtu; | ||
557 | |||
558 | spin_unlock(&lp->lock); | ||
559 | return err; | ||
560 | #endif | ||
561 | return -EINVAL; | 477 | return -EINVAL; |
562 | } | 478 | } |
563 | 479 | ||
@@ -582,7 +498,6 @@ static const struct net_device_ops iss_netdev_ops = { | |||
582 | .ndo_validate_addr = eth_validate_addr, | 498 | .ndo_validate_addr = eth_validate_addr, |
583 | .ndo_change_mtu = iss_net_change_mtu, | 499 | .ndo_change_mtu = iss_net_change_mtu, |
584 | .ndo_set_mac_address = iss_net_set_mac, | 500 | .ndo_set_mac_address = iss_net_set_mac, |
585 | //.ndo_do_ioctl = iss_net_ioctl, | ||
586 | .ndo_tx_timeout = iss_net_tx_timeout, | 501 | .ndo_tx_timeout = iss_net_tx_timeout, |
587 | .ndo_set_rx_mode = iss_net_set_multicast_list, | 502 | .ndo_set_rx_mode = iss_net_set_multicast_list, |
588 | }; | 503 | }; |
@@ -593,24 +508,29 @@ static int iss_net_configure(int index, char *init) | |||
593 | struct iss_net_private *lp; | 508 | struct iss_net_private *lp; |
594 | int err; | 509 | int err; |
595 | 510 | ||
596 | if ((dev = alloc_etherdev(sizeof *lp)) == NULL) { | 511 | dev = alloc_etherdev(sizeof(*lp)); |
597 | printk(KERN_ERR "eth_configure: failed to allocate device\n"); | 512 | if (dev == NULL) { |
513 | pr_err("eth_configure: failed to allocate device\n"); | ||
598 | return 1; | 514 | return 1; |
599 | } | 515 | } |
600 | 516 | ||
601 | /* Initialize private element. */ | 517 | /* Initialize private element. */ |
602 | 518 | ||
603 | lp = netdev_priv(dev); | 519 | lp = netdev_priv(dev); |
604 | *lp = ((struct iss_net_private) { | 520 | *lp = (struct iss_net_private) { |
605 | .device_list = LIST_HEAD_INIT(lp->device_list), | 521 | .device_list = LIST_HEAD_INIT(lp->device_list), |
606 | .opened_list = LIST_HEAD_INIT(lp->opened_list), | 522 | .opened_list = LIST_HEAD_INIT(lp->opened_list), |
607 | .lock = __SPIN_LOCK_UNLOCKED(lp.lock), | 523 | .lock = __SPIN_LOCK_UNLOCKED(lp.lock), |
608 | .dev = dev, | 524 | .dev = dev, |
609 | .index = index, | 525 | .index = index, |
610 | //.fd = -1, | 526 | }; |
611 | .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0 }, | 527 | |
612 | .have_mac = 0, | 528 | /* |
613 | }); | 529 | * If this name ends up conflicting with an existing registered |
530 | * netdevice, that is OK, register_netdev{,ice}() will notice this | ||
531 | * and fail. | ||
532 | */ | ||
533 | snprintf(dev->name, sizeof(dev->name), "eth%d", index); | ||
614 | 534 | ||
615 | /* | 535 | /* |
616 | * Try all transport protocols. | 536 | * Try all transport protocols. |
@@ -618,14 +538,12 @@ static int iss_net_configure(int index, char *init) | |||
618 | */ | 538 | */ |
619 | 539 | ||
620 | if (!tuntap_probe(lp, index, init)) { | 540 | if (!tuntap_probe(lp, index, init)) { |
621 | printk("Invalid arguments. Skipping device!\n"); | 541 | pr_err("%s: invalid arguments. Skipping device!\n", |
542 | dev->name); | ||
622 | goto errout; | 543 | goto errout; |
623 | } | 544 | } |
624 | 545 | ||
625 | printk(KERN_INFO "Netdevice %d ", index); | 546 | pr_info("Netdevice %d (%pM)\n", index, dev->dev_addr); |
626 | if (lp->have_mac) | ||
627 | printk("(%pM) ", lp->mac); | ||
628 | printk(": "); | ||
629 | 547 | ||
630 | /* sysfs register */ | 548 | /* sysfs register */ |
631 | 549 | ||
@@ -641,14 +559,7 @@ static int iss_net_configure(int index, char *init) | |||
641 | lp->pdev.id = index; | 559 | lp->pdev.id = index; |
642 | lp->pdev.name = DRIVER_NAME; | 560 | lp->pdev.name = DRIVER_NAME; |
643 | platform_device_register(&lp->pdev); | 561 | platform_device_register(&lp->pdev); |
644 | SET_NETDEV_DEV(dev,&lp->pdev.dev); | 562 | SET_NETDEV_DEV(dev, &lp->pdev.dev); |
645 | |||
646 | /* | ||
647 | * If this name ends up conflicting with an existing registered | ||
648 | * netdevice, that is OK, register_netdev{,ice}() will notice this | ||
649 | * and fail. | ||
650 | */ | ||
651 | snprintf(dev->name, sizeof dev->name, "eth%d", index); | ||
652 | 563 | ||
653 | dev->netdev_ops = &iss_netdev_ops; | 564 | dev->netdev_ops = &iss_netdev_ops; |
654 | dev->mtu = lp->mtu; | 565 | dev->mtu = lp->mtu; |
@@ -660,7 +571,7 @@ static int iss_net_configure(int index, char *init) | |||
660 | rtnl_unlock(); | 571 | rtnl_unlock(); |
661 | 572 | ||
662 | if (err) { | 573 | if (err) { |
663 | printk("Error registering net device!\n"); | 574 | pr_err("%s: error registering net device!\n", dev->name); |
664 | /* XXX: should we call ->remove() here? */ | 575 | /* XXX: should we call ->remove() here? */ |
665 | free_netdev(dev); | 576 | free_netdev(dev); |
666 | return 1; | 577 | return 1; |
@@ -669,16 +580,11 @@ static int iss_net_configure(int index, char *init) | |||
669 | init_timer(&lp->tl); | 580 | init_timer(&lp->tl); |
670 | lp->tl.function = iss_net_user_timer_expire; | 581 | lp->tl.function = iss_net_user_timer_expire; |
671 | 582 | ||
672 | #if 0 | ||
673 | if (lp->have_mac) | ||
674 | set_ether_mac(dev, lp->mac); | ||
675 | #endif | ||
676 | return 0; | 583 | return 0; |
677 | 584 | ||
678 | errout: | 585 | errout: |
679 | // FIXME: unregister; free, etc.. | 586 | /* FIXME: unregister; free, etc.. */ |
680 | return -EIO; | 587 | return -EIO; |
681 | |||
682 | } | 588 | } |
683 | 589 | ||
684 | /* ------------------------------------------------------------------------- */ | 590 | /* ------------------------------------------------------------------------- */ |
@@ -706,21 +612,22 @@ static int __init iss_net_setup(char *str) | |||
706 | struct iss_net_init *new; | 612 | struct iss_net_init *new; |
707 | struct list_head *ele; | 613 | struct list_head *ele; |
708 | char *end; | 614 | char *end; |
709 | int n; | 615 | int rc; |
616 | unsigned n; | ||
710 | 617 | ||
711 | n = simple_strtoul(str, &end, 0); | 618 | end = strchr(str, '='); |
712 | if (end == str) { | 619 | if (!end) { |
713 | printk(ERR "Failed to parse '%s'\n", str); | 620 | printk(ERR "Expected '=' after device number\n"); |
714 | return 1; | ||
715 | } | ||
716 | if (n < 0) { | ||
717 | printk(ERR "Device %d is negative\n", n); | ||
718 | return 1; | 621 | return 1; |
719 | } | 622 | } |
720 | if (*(str = end) != '=') { | 623 | *end = 0; |
721 | printk(ERR "Expected '=' after device number\n"); | 624 | rc = kstrtouint(str, 0, &n); |
625 | *end = '='; | ||
626 | if (rc < 0) { | ||
627 | printk(ERR "Failed to parse '%s'\n", str); | ||
722 | return 1; | 628 | return 1; |
723 | } | 629 | } |
630 | str = end; | ||
724 | 631 | ||
725 | spin_lock(&devices_lock); | 632 | spin_lock(&devices_lock); |
726 | 633 | ||
@@ -733,13 +640,13 @@ static int __init iss_net_setup(char *str) | |||
733 | spin_unlock(&devices_lock); | 640 | spin_unlock(&devices_lock); |
734 | 641 | ||
735 | if (device && device->index == n) { | 642 | if (device && device->index == n) { |
736 | printk(ERR "Device %d already configured\n", n); | 643 | printk(ERR "Device %u already configured\n", n); |
737 | return 1; | 644 | return 1; |
738 | } | 645 | } |
739 | 646 | ||
740 | new = alloc_bootmem(sizeof(*new)); | 647 | new = alloc_bootmem(sizeof(*new)); |
741 | if (new == NULL) { | 648 | if (new == NULL) { |
742 | printk("Alloc_bootmem failed\n"); | 649 | printk(ERR "Alloc_bootmem failed\n"); |
743 | return 1; | 650 | return 1; |
744 | } | 651 | } |
745 | 652 | ||
@@ -753,7 +660,7 @@ static int __init iss_net_setup(char *str) | |||
753 | 660 | ||
754 | #undef ERR | 661 | #undef ERR |
755 | 662 | ||
756 | __setup("eth=", iss_net_setup); | 663 | __setup("eth", iss_net_setup); |
757 | 664 | ||
758 | /* | 665 | /* |
759 | * Initialize all ISS Ethernet devices previously registered in iss_net_setup. | 666 | * Initialize all ISS Ethernet devices previously registered in iss_net_setup. |
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h index 4416773cbde5..aeb316b7ff88 100644 --- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h | |||
@@ -15,10 +15,6 @@ | |||
15 | #ifndef __XTENSA_XTAVNET_HARDWARE_H | 15 | #ifndef __XTENSA_XTAVNET_HARDWARE_H |
16 | #define __XTENSA_XTAVNET_HARDWARE_H | 16 | #define __XTENSA_XTAVNET_HARDWARE_H |
17 | 17 | ||
18 | /* By default NO_IRQ is defined to 0 in Linux, but we use the | ||
19 | interrupt 0 for UART... */ | ||
20 | #define NO_IRQ -1 | ||
21 | |||
22 | /* Memory configuration. */ | 18 | /* Memory configuration. */ |
23 | 19 | ||
24 | #define PLATFORM_DEFAULT_MEM_START 0x00000000 | 20 | #define PLATFORM_DEFAULT_MEM_START 0x00000000 |
@@ -30,7 +26,7 @@ | |||
30 | 26 | ||
31 | /* Default assignment of LX60 devices to external interrupts. */ | 27 | /* Default assignment of LX60 devices to external interrupts. */ |
32 | 28 | ||
33 | #ifdef CONFIG_ARCH_HAS_SMP | 29 | #ifdef CONFIG_XTENSA_MX |
34 | #define DUART16552_INTNUM XCHAL_EXTINT3_NUM | 30 | #define DUART16552_INTNUM XCHAL_EXTINT3_NUM |
35 | #define OETH_IRQ XCHAL_EXTINT4_NUM | 31 | #define OETH_IRQ XCHAL_EXTINT4_NUM |
36 | #else | 32 | #else |
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 74bb74fa3f87..800227862fe8 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c | |||
@@ -168,7 +168,7 @@ void __init platform_calibrate_ccount(void) | |||
168 | long clk_freq = 0; | 168 | long clk_freq = 0; |
169 | #ifdef CONFIG_OF | 169 | #ifdef CONFIG_OF |
170 | struct device_node *cpu = | 170 | struct device_node *cpu = |
171 | of_find_compatible_node(NULL, NULL, "xtensa,cpu"); | 171 | of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu"); |
172 | if (cpu) { | 172 | if (cpu) { |
173 | u32 freq; | 173 | u32 freq; |
174 | update_clock_frequency(cpu); | 174 | update_clock_frequency(cpu); |
@@ -194,7 +194,7 @@ void __init platform_calibrate_ccount(void) | |||
194 | * Ethernet -- OpenCores Ethernet MAC (ethoc driver) | 194 | * Ethernet -- OpenCores Ethernet MAC (ethoc driver) |
195 | */ | 195 | */ |
196 | 196 | ||
197 | static struct resource ethoc_res[] __initdata = { | 197 | static struct resource ethoc_res[] = { |
198 | [0] = { /* register space */ | 198 | [0] = { /* register space */ |
199 | .start = OETH_REGS_PADDR, | 199 | .start = OETH_REGS_PADDR, |
200 | .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1, | 200 | .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1, |
@@ -212,7 +212,7 @@ static struct resource ethoc_res[] __initdata = { | |||
212 | }, | 212 | }, |
213 | }; | 213 | }; |
214 | 214 | ||
215 | static struct ethoc_platform_data ethoc_pdata __initdata = { | 215 | static struct ethoc_platform_data ethoc_pdata = { |
216 | /* | 216 | /* |
217 | * The MAC address for these boards is 00:50:c2:13:6f:xx. | 217 | * The MAC address for these boards is 00:50:c2:13:6f:xx. |
218 | * The last byte (here as zero) is read from the DIP switches on the | 218 | * The last byte (here as zero) is read from the DIP switches on the |
@@ -222,7 +222,7 @@ static struct ethoc_platform_data ethoc_pdata __initdata = { | |||
222 | .phy_id = -1, | 222 | .phy_id = -1, |
223 | }; | 223 | }; |
224 | 224 | ||
225 | static struct platform_device ethoc_device __initdata = { | 225 | static struct platform_device ethoc_device = { |
226 | .name = "ethoc", | 226 | .name = "ethoc", |
227 | .id = -1, | 227 | .id = -1, |
228 | .num_resources = ARRAY_SIZE(ethoc_res), | 228 | .num_resources = ARRAY_SIZE(ethoc_res), |
@@ -236,13 +236,13 @@ static struct platform_device ethoc_device __initdata = { | |||
236 | * UART | 236 | * UART |
237 | */ | 237 | */ |
238 | 238 | ||
239 | static struct resource serial_resource __initdata = { | 239 | static struct resource serial_resource = { |
240 | .start = DUART16552_PADDR, | 240 | .start = DUART16552_PADDR, |
241 | .end = DUART16552_PADDR + 0x1f, | 241 | .end = DUART16552_PADDR + 0x1f, |
242 | .flags = IORESOURCE_MEM, | 242 | .flags = IORESOURCE_MEM, |
243 | }; | 243 | }; |
244 | 244 | ||
245 | static struct plat_serial8250_port serial_platform_data[] __initdata = { | 245 | static struct plat_serial8250_port serial_platform_data[] = { |
246 | [0] = { | 246 | [0] = { |
247 | .mapbase = DUART16552_PADDR, | 247 | .mapbase = DUART16552_PADDR, |
248 | .irq = DUART16552_INTNUM, | 248 | .irq = DUART16552_INTNUM, |
@@ -255,7 +255,7 @@ static struct plat_serial8250_port serial_platform_data[] __initdata = { | |||
255 | { }, | 255 | { }, |
256 | }; | 256 | }; |
257 | 257 | ||
258 | static struct platform_device xtavnet_uart __initdata = { | 258 | static struct platform_device xtavnet_uart = { |
259 | .name = "serial8250", | 259 | .name = "serial8250", |
260 | .id = PLAT8250_DEV_PLATFORM, | 260 | .id = PLAT8250_DEV_PLATFORM, |
261 | .dev = { | 261 | .dev = { |
diff --git a/arch/xtensa/variants/s6000/include/variant/irq.h b/arch/xtensa/variants/s6000/include/variant/irq.h index 97d6fc48deff..39ca751a6255 100644 --- a/arch/xtensa/variants/s6000/include/variant/irq.h +++ b/arch/xtensa/variants/s6000/include/variant/irq.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _XTENSA_S6000_IRQ_H | 1 | #ifndef _XTENSA_S6000_IRQ_H |
2 | #define _XTENSA_S6000_IRQ_H | 2 | #define _XTENSA_S6000_IRQ_H |
3 | 3 | ||
4 | #define NO_IRQ (-1) | ||
5 | #define VARIANT_NR_IRQS 8 /* GPIO interrupts */ | 4 | #define VARIANT_NR_IRQS 8 /* GPIO interrupts */ |
6 | 5 | ||
7 | extern void variant_irq_enable(unsigned int irq); | 6 | extern void variant_irq_enable(unsigned int irq); |