diff options
Diffstat (limited to 'arch')
94 files changed, 2378 insertions, 1210 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4ed24b4aa714..62079d434581 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -59,6 +59,7 @@ config ARM | |||
59 | select CLONE_BACKWARDS | 59 | select CLONE_BACKWARDS |
60 | select OLD_SIGSUSPEND3 | 60 | select OLD_SIGSUSPEND3 |
61 | select OLD_SIGACTION | 61 | select OLD_SIGACTION |
62 | select HAVE_CONTEXT_TRACKING | ||
62 | help | 63 | help |
63 | The ARM series is a line of low-power-consumption RISC chip designs | 64 | The ARM series is a line of low-power-consumption RISC chip designs |
64 | licensed by ARM Ltd and targeted at embedded applications and | 65 | licensed by ARM Ltd and targeted at embedded applications and |
@@ -1479,6 +1480,14 @@ config HAVE_ARM_TWD | |||
1479 | help | 1480 | help |
1480 | This options enables support for the ARM timer and watchdog unit | 1481 | This options enables support for the ARM timer and watchdog unit |
1481 | 1482 | ||
1483 | config MCPM | ||
1484 | bool "Multi-Cluster Power Management" | ||
1485 | depends on CPU_V7 && SMP | ||
1486 | help | ||
1487 | This option provides the common power management infrastructure | ||
1488 | for (multi-)cluster based systems, such as big.LITTLE based | ||
1489 | systems. | ||
1490 | |||
1482 | choice | 1491 | choice |
1483 | prompt "Memory split" | 1492 | prompt "Memory split" |
1484 | default VMSPLIT_3G | 1493 | default VMSPLIT_3G |
@@ -1565,8 +1574,9 @@ config SCHED_HRTICK | |||
1565 | def_bool HIGH_RES_TIMERS | 1574 | def_bool HIGH_RES_TIMERS |
1566 | 1575 | ||
1567 | config THUMB2_KERNEL | 1576 | config THUMB2_KERNEL |
1568 | bool "Compile the kernel in Thumb-2 mode" | 1577 | bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY |
1569 | depends on CPU_V7 && !CPU_V6 && !CPU_V6K | 1578 | depends on CPU_V7 && !CPU_V6 && !CPU_V6K |
1579 | default y if CPU_THUMBONLY | ||
1570 | select AEABI | 1580 | select AEABI |
1571 | select ARM_ASM_UNIFIED | 1581 | select ARM_ASM_UNIFIED |
1572 | select ARM_UNWIND | 1582 | select ARM_UNWIND |
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 54d6fdc03e04..5c8e59f6a6f4 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug | |||
@@ -641,6 +641,17 @@ config DEBUG_LL_INCLUDE | |||
641 | default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 | 641 | default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 |
642 | default "mach/debug-macro.S" | 642 | default "mach/debug-macro.S" |
643 | 643 | ||
644 | config DEBUG_UNCOMPRESS | ||
645 | bool | ||
646 | default y if ARCH_MULTIPLATFORM && DEBUG_LL && \ | ||
647 | !DEBUG_OMAP2PLUS_UART && \ | ||
648 | !DEBUG_TEGRA_UART | ||
649 | |||
650 | config UNCOMPRESS_INCLUDE | ||
651 | string | ||
652 | default "debug/uncompress.h" if ARCH_MULTIPLATFORM | ||
653 | default "mach/uncompress.h" | ||
654 | |||
644 | config EARLY_PRINTK | 655 | config EARLY_PRINTK |
645 | bool "Early printk" | 656 | bool "Early printk" |
646 | depends on DEBUG_LL | 657 | depends on DEBUG_LL |
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index afed28e37ea5..3580d57ea218 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -24,6 +24,9 @@ endif | |||
24 | AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) | 24 | AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) |
25 | HEAD = head.o | 25 | HEAD = head.o |
26 | OBJS += misc.o decompress.o | 26 | OBJS += misc.o decompress.o |
27 | ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y) | ||
28 | OBJS += debug.o | ||
29 | endif | ||
27 | FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c | 30 | FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c |
28 | 31 | ||
29 | # string library code (-Os is enforced to keep it much smaller) | 32 | # string library code (-Os is enforced to keep it much smaller) |
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S new file mode 100644 index 000000000000..6e8382d5b7a4 --- /dev/null +++ b/arch/arm/boot/compressed/debug.S | |||
@@ -0,0 +1,12 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | #include <asm/assembler.h> | ||
3 | |||
4 | #include CONFIG_DEBUG_LL_INCLUDE | ||
5 | |||
6 | ENTRY(putc) | ||
7 | addruart r1, r2, r3 | ||
8 | waituart r3, r1 | ||
9 | senduart r0, r1 | ||
10 | busyuart r3, r1 | ||
11 | mov pc, lr | ||
12 | ENDPROC(putc) | ||
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index df899834d84e..31bd43b82095 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c | |||
@@ -25,13 +25,7 @@ unsigned int __machine_arch_type; | |||
25 | static void putstr(const char *ptr); | 25 | static void putstr(const char *ptr); |
26 | extern void error(char *x); | 26 | extern void error(char *x); |
27 | 27 | ||
28 | #ifdef CONFIG_ARCH_MULTIPLATFORM | 28 | #include CONFIG_UNCOMPRESS_INCLUDE |
29 | static inline void putc(int c) {} | ||
30 | static inline void flush(void) {} | ||
31 | static inline void arch_decomp_setup(void) {} | ||
32 | #else | ||
33 | #include <mach/uncompress.h> | ||
34 | #endif | ||
35 | 29 | ||
36 | #ifdef CONFIG_DEBUG_ICEDCC | 30 | #ifdef CONFIG_DEBUG_ICEDCC |
37 | 31 | ||
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index dc8dd0de5c0f..53e68b163196 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile | |||
@@ -11,3 +11,6 @@ obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o | |||
11 | obj-$(CONFIG_SHARP_SCOOP) += scoop.o | 11 | obj-$(CONFIG_SHARP_SCOOP) += scoop.o |
12 | obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o | 12 | obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o |
13 | obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o | 13 | obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o |
14 | obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o | ||
15 | AFLAGS_mcpm_head.o := -march=armv7-a | ||
16 | AFLAGS_vlock.o := -march=armv7-a | ||
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c new file mode 100644 index 000000000000..370236dd1a03 --- /dev/null +++ b/arch/arm/common/mcpm_entry.c | |||
@@ -0,0 +1,263 @@ | |||
1 | /* | ||
2 | * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM | ||
3 | * | ||
4 | * Created by: Nicolas Pitre, March 2012 | ||
5 | * Copyright: (C) 2012-2013 Linaro Limited | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/irqflags.h> | ||
15 | |||
16 | #include <asm/mcpm.h> | ||
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/idmap.h> | ||
19 | #include <asm/cputype.h> | ||
20 | |||
21 | extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; | ||
22 | |||
23 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) | ||
24 | { | ||
25 | unsigned long val = ptr ? virt_to_phys(ptr) : 0; | ||
26 | mcpm_entry_vectors[cluster][cpu] = val; | ||
27 | sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); | ||
28 | } | ||
29 | |||
30 | static const struct mcpm_platform_ops *platform_ops; | ||
31 | |||
32 | int __init mcpm_platform_register(const struct mcpm_platform_ops *ops) | ||
33 | { | ||
34 | if (platform_ops) | ||
35 | return -EBUSY; | ||
36 | platform_ops = ops; | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) | ||
41 | { | ||
42 | if (!platform_ops) | ||
43 | return -EUNATCH; /* try not to shadow power_up errors */ | ||
44 | might_sleep(); | ||
45 | return platform_ops->power_up(cpu, cluster); | ||
46 | } | ||
47 | |||
48 | typedef void (*phys_reset_t)(unsigned long); | ||
49 | |||
50 | void mcpm_cpu_power_down(void) | ||
51 | { | ||
52 | phys_reset_t phys_reset; | ||
53 | |||
54 | BUG_ON(!platform_ops); | ||
55 | BUG_ON(!irqs_disabled()); | ||
56 | |||
57 | /* | ||
58 | * Do this before calling into the power_down method, | ||
59 | * as it might not always be safe to do afterwards. | ||
60 | */ | ||
61 | setup_mm_for_reboot(); | ||
62 | |||
63 | platform_ops->power_down(); | ||
64 | |||
65 | /* | ||
66 | * It is possible for a power_up request to happen concurrently | ||
67 | * with a power_down request for the same CPU. In this case the | ||
68 | * power_down method might not be able to actually enter a | ||
69 | * powered down state with the WFI instruction if the power_up | ||
70 | * method has removed the required reset condition. The | ||
71 | * power_down method is then allowed to return. We must perform | ||
72 | * a re-entry in the kernel as if the power_up method just had | ||
73 | * deasserted reset on the CPU. | ||
74 | * | ||
75 | * To simplify race issues, the platform specific implementation | ||
76 | * must accommodate for the possibility of unordered calls to | ||
77 | * power_down and power_up with a usage count. Therefore, if a | ||
78 | * call to power_up is issued for a CPU that is not down, then | ||
79 | * the next call to power_down must not attempt a full shutdown | ||
80 | * but only do the minimum (normally disabling L1 cache and CPU | ||
81 | * coherency) and return just as if a concurrent power_up request | ||
82 | * had happened as described above. | ||
83 | */ | ||
84 | |||
85 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | ||
86 | phys_reset(virt_to_phys(mcpm_entry_point)); | ||
87 | |||
88 | /* should never get here */ | ||
89 | BUG(); | ||
90 | } | ||
91 | |||
92 | void mcpm_cpu_suspend(u64 expected_residency) | ||
93 | { | ||
94 | phys_reset_t phys_reset; | ||
95 | |||
96 | BUG_ON(!platform_ops); | ||
97 | BUG_ON(!irqs_disabled()); | ||
98 | |||
99 | /* Very similar to mcpm_cpu_power_down() */ | ||
100 | setup_mm_for_reboot(); | ||
101 | platform_ops->suspend(expected_residency); | ||
102 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | ||
103 | phys_reset(virt_to_phys(mcpm_entry_point)); | ||
104 | BUG(); | ||
105 | } | ||
106 | |||
107 | int mcpm_cpu_powered_up(void) | ||
108 | { | ||
109 | if (!platform_ops) | ||
110 | return -EUNATCH; | ||
111 | if (platform_ops->powered_up) | ||
112 | platform_ops->powered_up(); | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | struct sync_struct mcpm_sync; | ||
117 | |||
118 | /* | ||
119 | * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. | ||
120 | * This must be called at the point of committing to teardown of a CPU. | ||
121 | * The CPU cache (SCTRL.C bit) is expected to still be active. | ||
122 | */ | ||
123 | void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) | ||
124 | { | ||
125 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; | ||
126 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the | ||
131 | * cluster can be torn down without disrupting this CPU. | ||
132 | * To avoid deadlocks, this must be called before a CPU is powered down. | ||
133 | * The CPU cache (SCTRL.C bit) is expected to be off. | ||
134 | * However L2 cache might or might not be active. | ||
135 | */ | ||
136 | void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) | ||
137 | { | ||
138 | dmb(); | ||
139 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; | ||
140 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); | ||
141 | dsb_sev(); | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. | ||
146 | * @state: the final state of the cluster: | ||
147 | * CLUSTER_UP: no destructive teardown was done and the cluster has been | ||
148 | * restored to the previous state (CPU cache still active); or | ||
149 | * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off | ||
150 | * (CPU cache disabled, L2 cache either enabled or disabled). | ||
151 | */ | ||
152 | void __mcpm_outbound_leave_critical(unsigned int cluster, int state) | ||
153 | { | ||
154 | dmb(); | ||
155 | mcpm_sync.clusters[cluster].cluster = state; | ||
156 | sync_cache_w(&mcpm_sync.clusters[cluster].cluster); | ||
157 | dsb_sev(); | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. | ||
162 | * This function should be called by the last man, after local CPU teardown | ||
163 | * is complete. CPU cache expected to be active. | ||
164 | * | ||
165 | * Returns: | ||
166 | * false: the critical section was not entered because an inbound CPU was | ||
167 | * observed, or the cluster is already being set up; | ||
168 | * true: the critical section was entered: it is now safe to tear down the | ||
169 | * cluster. | ||
170 | */ | ||
171 | bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) | ||
172 | { | ||
173 | unsigned int i; | ||
174 | struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; | ||
175 | |||
176 | /* Warn inbound CPUs that the cluster is being torn down: */ | ||
177 | c->cluster = CLUSTER_GOING_DOWN; | ||
178 | sync_cache_w(&c->cluster); | ||
179 | |||
180 | /* Back out if the inbound cluster is already in the critical region: */ | ||
181 | sync_cache_r(&c->inbound); | ||
182 | if (c->inbound == INBOUND_COMING_UP) | ||
183 | goto abort; | ||
184 | |||
185 | /* | ||
186 | * Wait for all CPUs to get out of the GOING_DOWN state, so that local | ||
187 | * teardown is complete on each CPU before tearing down the cluster. | ||
188 | * | ||
189 | * If any CPU has been woken up again from the DOWN state, then we | ||
190 | * shouldn't be taking the cluster down at all: abort in that case. | ||
191 | */ | ||
192 | sync_cache_r(&c->cpus); | ||
193 | for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { | ||
194 | int cpustate; | ||
195 | |||
196 | if (i == cpu) | ||
197 | continue; | ||
198 | |||
199 | while (1) { | ||
200 | cpustate = c->cpus[i].cpu; | ||
201 | if (cpustate != CPU_GOING_DOWN) | ||
202 | break; | ||
203 | |||
204 | wfe(); | ||
205 | sync_cache_r(&c->cpus[i].cpu); | ||
206 | } | ||
207 | |||
208 | switch (cpustate) { | ||
209 | case CPU_DOWN: | ||
210 | continue; | ||
211 | |||
212 | default: | ||
213 | goto abort; | ||
214 | } | ||
215 | } | ||
216 | |||
217 | return true; | ||
218 | |||
219 | abort: | ||
220 | __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); | ||
221 | return false; | ||
222 | } | ||
223 | |||
224 | int __mcpm_cluster_state(unsigned int cluster) | ||
225 | { | ||
226 | sync_cache_r(&mcpm_sync.clusters[cluster].cluster); | ||
227 | return mcpm_sync.clusters[cluster].cluster; | ||
228 | } | ||
229 | |||
230 | extern unsigned long mcpm_power_up_setup_phys; | ||
231 | |||
232 | int __init mcpm_sync_init( | ||
233 | void (*power_up_setup)(unsigned int affinity_level)) | ||
234 | { | ||
235 | unsigned int i, j, mpidr, this_cluster; | ||
236 | |||
237 | BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync); | ||
238 | BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1)); | ||
239 | |||
240 | /* | ||
241 | * Set initial CPU and cluster states. | ||
242 | * Only one cluster is assumed to be active at this point. | ||
243 | */ | ||
244 | for (i = 0; i < MAX_NR_CLUSTERS; i++) { | ||
245 | mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; | ||
246 | mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; | ||
247 | for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++) | ||
248 | mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; | ||
249 | } | ||
250 | mpidr = read_cpuid_mpidr(); | ||
251 | this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
252 | for_each_online_cpu(i) | ||
253 | mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; | ||
254 | mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; | ||
255 | sync_cache_w(&mcpm_sync); | ||
256 | |||
257 | if (power_up_setup) { | ||
258 | mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); | ||
259 | sync_cache_w(&mcpm_power_up_setup_phys); | ||
260 | } | ||
261 | |||
262 | return 0; | ||
263 | } | ||
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S new file mode 100644 index 000000000000..8178705c4b24 --- /dev/null +++ b/arch/arm/common/mcpm_head.S | |||
@@ -0,0 +1,219 @@ | |||
1 | /* | ||
2 | * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM | ||
3 | * | ||
4 | * Created by: Nicolas Pitre, March 2012 | ||
5 | * Copyright: (C) 2012-2013 Linaro Limited | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * | ||
12 | * Refer to Documentation/arm/cluster-pm-race-avoidance.txt | ||
13 | * for details of the synchronisation algorithms used here. | ||
14 | */ | ||
15 | |||
16 | #include <linux/linkage.h> | ||
17 | #include <asm/mcpm.h> | ||
18 | |||
19 | #include "vlock.h" | ||
20 | |||
21 | .if MCPM_SYNC_CLUSTER_CPUS | ||
22 | .error "cpus must be the first member of struct mcpm_sync_struct" | ||
23 | .endif | ||
24 | |||
25 | .macro pr_dbg string | ||
26 | #if defined(CONFIG_DEBUG_LL) && defined(DEBUG) | ||
27 | b 1901f | ||
28 | 1902: .asciz "CPU" | ||
29 | 1903: .asciz " cluster" | ||
30 | 1904: .asciz ": \string" | ||
31 | .align | ||
32 | 1901: adr r0, 1902b | ||
33 | bl printascii | ||
34 | mov r0, r9 | ||
35 | bl printhex8 | ||
36 | adr r0, 1903b | ||
37 | bl printascii | ||
38 | mov r0, r10 | ||
39 | bl printhex8 | ||
40 | adr r0, 1904b | ||
41 | bl printascii | ||
42 | #endif | ||
43 | .endm | ||
44 | |||
45 | .arm | ||
46 | .align | ||
47 | |||
48 | ENTRY(mcpm_entry_point) | ||
49 | |||
50 | THUMB( adr r12, BSYM(1f) ) | ||
51 | THUMB( bx r12 ) | ||
52 | THUMB( .thumb ) | ||
53 | 1: | ||
54 | mrc p15, 0, r0, c0, c0, 5 @ MPIDR | ||
55 | ubfx r9, r0, #0, #8 @ r9 = cpu | ||
56 | ubfx r10, r0, #8, #8 @ r10 = cluster | ||
57 | mov r3, #MAX_CPUS_PER_CLUSTER | ||
58 | mla r4, r3, r10, r9 @ r4 = canonical CPU index | ||
59 | cmp r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS) | ||
60 | blo 2f | ||
61 | |||
62 | /* We didn't expect this CPU. Try to cheaply make it quiet. */ | ||
63 | 1: wfi | ||
64 | wfe | ||
65 | b 1b | ||
66 | |||
67 | 2: pr_dbg "kernel mcpm_entry_point\n" | ||
68 | |||
69 | /* | ||
70 | * MMU is off so we need to get to various variables in a | ||
71 | * position independent way. | ||
72 | */ | ||
73 | adr r5, 3f | ||
74 | ldmia r5, {r6, r7, r8, r11} | ||
75 | add r6, r5, r6 @ r6 = mcpm_entry_vectors | ||
76 | ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys | ||
77 | add r8, r5, r8 @ r8 = mcpm_sync | ||
78 | add r11, r5, r11 @ r11 = first_man_locks | ||
79 | |||
80 | mov r0, #MCPM_SYNC_CLUSTER_SIZE | ||
81 | mla r8, r0, r10, r8 @ r8 = sync cluster base | ||
82 | |||
83 | @ Signal that this CPU is coming UP: | ||
84 | mov r0, #CPU_COMING_UP | ||
85 | mov r5, #MCPM_SYNC_CPU_SIZE | ||
86 | mla r5, r9, r5, r8 @ r5 = sync cpu address | ||
87 | strb r0, [r5] | ||
88 | |||
89 | @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN | ||
90 | @ state, because there is at least one active CPU (this CPU). | ||
91 | |||
92 | mov r0, #VLOCK_SIZE | ||
93 | mla r11, r0, r10, r11 @ r11 = cluster first man lock | ||
94 | mov r0, r11 | ||
95 | mov r1, r9 @ cpu | ||
96 | bl vlock_trylock @ implies DMB | ||
97 | |||
98 | cmp r0, #0 @ failed to get the lock? | ||
99 | bne mcpm_setup_wait @ wait for cluster setup if so | ||
100 | |||
101 | ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] | ||
102 | cmp r0, #CLUSTER_UP @ cluster already up? | ||
103 | bne mcpm_setup @ if not, set up the cluster | ||
104 | |||
105 | @ Otherwise, release the first man lock and skip setup: | ||
106 | mov r0, r11 | ||
107 | bl vlock_unlock | ||
108 | b mcpm_setup_complete | ||
109 | |||
110 | mcpm_setup: | ||
111 | @ Control dependency implies strb not observable before previous ldrb. | ||
112 | |||
113 | @ Signal that the cluster is being brought up: | ||
114 | mov r0, #INBOUND_COMING_UP | ||
115 | strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND] | ||
116 | dmb | ||
117 | |||
118 | @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this | ||
119 | @ point onwards will observe INBOUND_COMING_UP and abort. | ||
120 | |||
121 | @ Wait for any previously-pending cluster teardown operations to abort | ||
122 | @ or complete: | ||
123 | mcpm_teardown_wait: | ||
124 | ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] | ||
125 | cmp r0, #CLUSTER_GOING_DOWN | ||
126 | bne first_man_setup | ||
127 | wfe | ||
128 | b mcpm_teardown_wait | ||
129 | |||
130 | first_man_setup: | ||
131 | dmb | ||
132 | |||
133 | @ If the outbound gave up before teardown started, skip cluster setup: | ||
134 | |||
135 | cmp r0, #CLUSTER_UP | ||
136 | beq mcpm_setup_leave | ||
137 | |||
138 | @ power_up_setup is now responsible for setting up the cluster: | ||
139 | |||
140 | cmp r7, #0 | ||
141 | mov r0, #1 @ second (cluster) affinity level | ||
142 | blxne r7 @ Call power_up_setup if defined | ||
143 | dmb | ||
144 | |||
145 | mov r0, #CLUSTER_UP | ||
146 | strb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] | ||
147 | dmb | ||
148 | |||
149 | mcpm_setup_leave: | ||
150 | @ Leave the cluster setup critical section: | ||
151 | |||
152 | mov r0, #INBOUND_NOT_COMING_UP | ||
153 | strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND] | ||
154 | dsb | ||
155 | sev | ||
156 | |||
157 | mov r0, r11 | ||
158 | bl vlock_unlock @ implies DMB | ||
159 | b mcpm_setup_complete | ||
160 | |||
161 | @ In the contended case, non-first men wait here for cluster setup | ||
162 | @ to complete: | ||
163 | mcpm_setup_wait: | ||
164 | ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] | ||
165 | cmp r0, #CLUSTER_UP | ||
166 | wfene | ||
167 | bne mcpm_setup_wait | ||
168 | dmb | ||
169 | |||
170 | mcpm_setup_complete: | ||
171 | @ If a platform-specific CPU setup hook is needed, it is | ||
172 | @ called from here. | ||
173 | |||
174 | cmp r7, #0 | ||
175 | mov r0, #0 @ first (CPU) affinity level | ||
176 | blxne r7 @ Call power_up_setup if defined | ||
177 | dmb | ||
178 | |||
179 | @ Mark the CPU as up: | ||
180 | |||
181 | mov r0, #CPU_UP | ||
182 | strb r0, [r5] | ||
183 | |||
184 | @ Observability order of CPU_UP and opening of the gate does not matter. | ||
185 | |||
186 | mcpm_entry_gated: | ||
187 | ldr r5, [r6, r4, lsl #2] @ r5 = CPU entry vector | ||
188 | cmp r5, #0 | ||
189 | wfeeq | ||
190 | beq mcpm_entry_gated | ||
191 | dmb | ||
192 | |||
193 | pr_dbg "released\n" | ||
194 | bx r5 | ||
195 | |||
196 | .align 2 | ||
197 | |||
198 | 3: .word mcpm_entry_vectors - . | ||
199 | .word mcpm_power_up_setup_phys - 3b | ||
200 | .word mcpm_sync - 3b | ||
201 | .word first_man_locks - 3b | ||
202 | |||
203 | ENDPROC(mcpm_entry_point) | ||
204 | |||
205 | .bss | ||
206 | |||
207 | .align CACHE_WRITEBACK_ORDER | ||
208 | .type first_man_locks, #object | ||
209 | first_man_locks: | ||
210 | .space VLOCK_SIZE * MAX_NR_CLUSTERS | ||
211 | .align CACHE_WRITEBACK_ORDER | ||
212 | |||
213 | .type mcpm_entry_vectors, #object | ||
214 | ENTRY(mcpm_entry_vectors) | ||
215 | .space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER | ||
216 | |||
217 | .type mcpm_power_up_setup_phys, #object | ||
218 | ENTRY(mcpm_power_up_setup_phys) | ||
219 | .space 4 @ set by mcpm_sync_init() | ||
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c new file mode 100644 index 000000000000..52b88d81b7bb --- /dev/null +++ b/arch/arm/common/mcpm_platsmp.c | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-vexpress/mcpm_platsmp.c | ||
3 | * | ||
4 | * Created by: Nicolas Pitre, November 2012 | ||
5 | * Copyright: (C) 2012-2013 Linaro Limited | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Code to handle secondary CPU bringup and hotplug for the cluster power API. | ||
12 | */ | ||
13 | |||
14 | #include <linux/init.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | |||
18 | #include <linux/irqchip/arm-gic.h> | ||
19 | |||
20 | #include <asm/mcpm.h> | ||
21 | #include <asm/smp.h> | ||
22 | #include <asm/smp_plat.h> | ||
23 | |||
24 | static void __init simple_smp_init_cpus(void) | ||
25 | { | ||
26 | } | ||
27 | |||
28 | static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) | ||
29 | { | ||
30 | unsigned int mpidr, pcpu, pcluster, ret; | ||
31 | extern void secondary_startup(void); | ||
32 | |||
33 | mpidr = cpu_logical_map(cpu); | ||
34 | pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
35 | pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
36 | pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n", | ||
37 | __func__, cpu, pcpu, pcluster); | ||
38 | |||
39 | mcpm_set_entry_vector(pcpu, pcluster, NULL); | ||
40 | ret = mcpm_cpu_power_up(pcpu, pcluster); | ||
41 | if (ret) | ||
42 | return ret; | ||
43 | mcpm_set_entry_vector(pcpu, pcluster, secondary_startup); | ||
44 | arch_send_wakeup_ipi_mask(cpumask_of(cpu)); | ||
45 | dsb_sev(); | ||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static void __cpuinit mcpm_secondary_init(unsigned int cpu) | ||
50 | { | ||
51 | mcpm_cpu_powered_up(); | ||
52 | gic_secondary_init(0); | ||
53 | } | ||
54 | |||
55 | #ifdef CONFIG_HOTPLUG_CPU | ||
56 | |||
57 | static int mcpm_cpu_disable(unsigned int cpu) | ||
58 | { | ||
59 | /* | ||
60 | * We assume all CPUs may be shut down. | ||
61 | * This would be the hook to use for eventual Secure | ||
62 | * OS migration requests as described in the PSCI spec. | ||
63 | */ | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | static void mcpm_cpu_die(unsigned int cpu) | ||
68 | { | ||
69 | unsigned int mpidr, pcpu, pcluster; | ||
70 | mpidr = read_cpuid_mpidr(); | ||
71 | pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
72 | pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
73 | mcpm_set_entry_vector(pcpu, pcluster, NULL); | ||
74 | mcpm_cpu_power_down(); | ||
75 | } | ||
76 | |||
77 | #endif | ||
78 | |||
79 | static struct smp_operations __initdata mcpm_smp_ops = { | ||
80 | .smp_init_cpus = simple_smp_init_cpus, | ||
81 | .smp_boot_secondary = mcpm_boot_secondary, | ||
82 | .smp_secondary_init = mcpm_secondary_init, | ||
83 | #ifdef CONFIG_HOTPLUG_CPU | ||
84 | .cpu_disable = mcpm_cpu_disable, | ||
85 | .cpu_die = mcpm_cpu_die, | ||
86 | #endif | ||
87 | }; | ||
88 | |||
89 | void __init mcpm_smp_set_ops(void) | ||
90 | { | ||
91 | smp_set_ops(&mcpm_smp_ops); | ||
92 | } | ||
diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S new file mode 100644 index 000000000000..ff198583f683 --- /dev/null +++ b/arch/arm/common/vlock.S | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * vlock.S - simple voting lock implementation for ARM | ||
3 | * | ||
4 | * Created by: Dave Martin, 2012-08-16 | ||
5 | * Copyright: (C) 2012-2013 Linaro Limited | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * | ||
17 | * This algorithm is described in more detail in | ||
18 | * Documentation/arm/vlocks.txt. | ||
19 | */ | ||
20 | |||
21 | #include <linux/linkage.h> | ||
22 | #include "vlock.h" | ||
23 | |||
24 | /* Select different code if voting flags can fit in a single word. */ | ||
25 | #if VLOCK_VOTING_SIZE > 4 | ||
26 | #define FEW(x...) | ||
27 | #define MANY(x...) x | ||
28 | #else | ||
29 | #define FEW(x...) x | ||
30 | #define MANY(x...) | ||
31 | #endif | ||
32 | |||
33 | @ voting lock for first-man coordination | ||
34 | |||
35 | .macro voting_begin rbase:req, rcpu:req, rscratch:req | ||
36 | mov \rscratch, #1 | ||
37 | strb \rscratch, [\rbase, \rcpu] | ||
38 | dmb | ||
39 | .endm | ||
40 | |||
41 | .macro voting_end rbase:req, rcpu:req, rscratch:req | ||
42 | dmb | ||
43 | mov \rscratch, #0 | ||
44 | strb \rscratch, [\rbase, \rcpu] | ||
45 | dsb | ||
46 | sev | ||
47 | .endm | ||
48 | |||
49 | /* | ||
50 | * The vlock structure must reside in Strongly-Ordered or Device memory. | ||
51 | * This implementation deliberately eliminates most of the barriers which | ||
52 | * would be required for other memory types, and assumes that independent | ||
53 | * writes to neighbouring locations within a cacheline do not interfere | ||
54 | * with one another. | ||
55 | */ | ||
56 | |||
57 | @ r0: lock structure base | ||
58 | @ r1: CPU ID (0-based index within cluster) | ||
59 | ENTRY(vlock_trylock) | ||
60 | add r1, r1, #VLOCK_VOTING_OFFSET | ||
61 | |||
62 | voting_begin r0, r1, r2 | ||
63 | |||
64 | ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held | ||
65 | cmp r2, #VLOCK_OWNER_NONE | ||
66 | bne trylock_fail @ fail if so | ||
67 | |||
68 | @ Control dependency implies strb not observable before previous ldrb. | ||
69 | |||
70 | strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote | ||
71 | |||
72 | voting_end r0, r1, r2 @ implies DMB | ||
73 | |||
74 | @ Wait for the current round of voting to finish: | ||
75 | |||
76 | MANY( mov r3, #VLOCK_VOTING_OFFSET ) | ||
77 | 0: | ||
78 | MANY( ldr r2, [r0, r3] ) | ||
79 | FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] ) | ||
80 | cmp r2, #0 | ||
81 | wfene | ||
82 | bne 0b | ||
83 | MANY( add r3, r3, #4 ) | ||
84 | MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE ) | ||
85 | MANY( bne 0b ) | ||
86 | |||
87 | @ Check who won: | ||
88 | |||
89 | dmb | ||
90 | ldrb r2, [r0, #VLOCK_OWNER_OFFSET] | ||
91 | eor r0, r1, r2 @ zero if I won, else nonzero | ||
92 | bx lr | ||
93 | |||
94 | trylock_fail: | ||
95 | voting_end r0, r1, r2 | ||
96 | mov r0, #1 @ nonzero indicates that I lost | ||
97 | bx lr | ||
98 | ENDPROC(vlock_trylock) | ||
99 | |||
100 | @ r0: lock structure base | ||
101 | ENTRY(vlock_unlock) | ||
102 | dmb | ||
103 | mov r1, #VLOCK_OWNER_NONE | ||
104 | strb r1, [r0, #VLOCK_OWNER_OFFSET] | ||
105 | dsb | ||
106 | sev | ||
107 | bx lr | ||
108 | ENDPROC(vlock_unlock) | ||
diff --git a/arch/arm/common/vlock.h b/arch/arm/common/vlock.h new file mode 100644 index 000000000000..3b441475a59b --- /dev/null +++ b/arch/arm/common/vlock.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * vlock.h - simple voting lock implementation | ||
3 | * | ||
4 | * Created by: Dave Martin, 2012-08-16 | ||
5 | * Copyright: (C) 2012-2013 Linaro Limited | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #ifndef __VLOCK_H | ||
18 | #define __VLOCK_H | ||
19 | |||
20 | #include <asm/mcpm.h> | ||
21 | |||
22 | /* Offsets and sizes are rounded to a word (4 bytes) */ | ||
23 | #define VLOCK_OWNER_OFFSET 0 | ||
24 | #define VLOCK_VOTING_OFFSET 4 | ||
25 | #define VLOCK_VOTING_SIZE ((MAX_CPUS_PER_CLUSTER + 3) / 4 * 4) | ||
26 | #define VLOCK_SIZE (VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE) | ||
27 | #define VLOCK_OWNER_NONE 0 | ||
28 | |||
29 | #endif /* ! __VLOCK_H */ | ||
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index c79f61faa3a5..da1c77d39327 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h | |||
@@ -243,6 +243,29 @@ typedef struct { | |||
243 | 243 | ||
244 | #define ATOMIC64_INIT(i) { (i) } | 244 | #define ATOMIC64_INIT(i) { (i) } |
245 | 245 | ||
246 | #ifdef CONFIG_ARM_LPAE | ||
247 | static inline u64 atomic64_read(const atomic64_t *v) | ||
248 | { | ||
249 | u64 result; | ||
250 | |||
251 | __asm__ __volatile__("@ atomic64_read\n" | ||
252 | " ldrd %0, %H0, [%1]" | ||
253 | : "=&r" (result) | ||
254 | : "r" (&v->counter), "Qo" (v->counter) | ||
255 | ); | ||
256 | |||
257 | return result; | ||
258 | } | ||
259 | |||
260 | static inline void atomic64_set(atomic64_t *v, u64 i) | ||
261 | { | ||
262 | __asm__ __volatile__("@ atomic64_set\n" | ||
263 | " strd %2, %H2, [%1]" | ||
264 | : "=Qo" (v->counter) | ||
265 | : "r" (&v->counter), "r" (i) | ||
266 | ); | ||
267 | } | ||
268 | #else | ||
246 | static inline u64 atomic64_read(const atomic64_t *v) | 269 | static inline u64 atomic64_read(const atomic64_t *v) |
247 | { | 270 | { |
248 | u64 result; | 271 | u64 result; |
@@ -269,6 +292,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i) | |||
269 | : "r" (&v->counter), "r" (i) | 292 | : "r" (&v->counter), "r" (i) |
270 | : "cc"); | 293 | : "cc"); |
271 | } | 294 | } |
295 | #endif | ||
272 | 296 | ||
273 | static inline void atomic64_add(u64 i, atomic64_t *v) | 297 | static inline void atomic64_add(u64 i, atomic64_t *v) |
274 | { | 298 | { |
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e1489c54cd12..bff71388e72a 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | |||
363 | flush_cache_all(); | 363 | flush_cache_all(); |
364 | } | 364 | } |
365 | 365 | ||
366 | /* | ||
367 | * Memory synchronization helpers for mixed cached vs non cached accesses. | ||
368 | * | ||
369 | * Some synchronization algorithms have to set states in memory with the | ||
370 | * cache enabled or disabled depending on the code path. It is crucial | ||
371 | * to always ensure proper cache maintenance to update main memory right | ||
372 | * away in that case. | ||
373 | * | ||
374 | * Any cached write must be followed by a cache clean operation. | ||
375 | * Any cached read must be preceded by a cache invalidate operation. | ||
376 | * Yet, in the read case, a cache flush i.e. atomic clean+invalidate | ||
377 | * operation is needed to avoid discarding possible concurrent writes to the | ||
378 | * accessed memory. | ||
379 | * | ||
380 | * Also, in order to prevent a cached writer from interfering with an | ||
381 | * adjacent non-cached writer, each state variable must be located to | ||
382 | * a separate cache line. | ||
383 | */ | ||
384 | |||
385 | /* | ||
386 | * This needs to be >= the max cache writeback size of all | ||
387 | * supported platforms included in the current kernel configuration. | ||
388 | * This is used to align state variables to their own cache lines. | ||
389 | */ | ||
390 | #define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */ | ||
391 | #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) | ||
392 | |||
393 | /* | ||
394 | * There is no __cpuc_clean_dcache_area but we use it anyway for | ||
395 | * code intent clarity, and alias it to __cpuc_flush_dcache_area. | ||
396 | */ | ||
397 | #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area | ||
398 | |||
399 | /* | ||
400 | * Ensure preceding writes to *p by this CPU are visible to | ||
401 | * subsequent reads by other CPUs: | ||
402 | */ | ||
403 | static inline void __sync_cache_range_w(volatile void *p, size_t size) | ||
404 | { | ||
405 | char *_p = (char *)p; | ||
406 | |||
407 | __cpuc_clean_dcache_area(_p, size); | ||
408 | outer_clean_range(__pa(_p), __pa(_p + size)); | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * Ensure preceding writes to *p by other CPUs are visible to | ||
413 | * subsequent reads by this CPU. We must be careful not to | ||
414 | * discard data simultaneously written by another CPU, hence the | ||
415 | * usage of flush rather than invalidate operations. | ||
416 | */ | ||
417 | static inline void __sync_cache_range_r(volatile void *p, size_t size) | ||
418 | { | ||
419 | char *_p = (char *)p; | ||
420 | |||
421 | #ifdef CONFIG_OUTER_CACHE | ||
422 | if (outer_cache.flush_range) { | ||
423 | /* | ||
424 | * Ensure dirty data migrated from other CPUs into our cache | ||
425 | * are cleaned out safely before the outer cache is cleaned: | ||
426 | */ | ||
427 | __cpuc_clean_dcache_area(_p, size); | ||
428 | |||
429 | /* Clean and invalidate stale data for *p from outer ... */ | ||
430 | outer_flush_range(__pa(_p), __pa(_p + size)); | ||
431 | } | ||
432 | #endif | ||
433 | |||
434 | /* ... and inner cache: */ | ||
435 | __cpuc_flush_dcache_area(_p, size); | ||
436 | } | ||
437 | |||
438 | #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) | ||
439 | #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) | ||
440 | |||
366 | #endif | 441 | #endif |
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 5ef4d8015a60..1f3262e99d81 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h | |||
@@ -42,6 +42,8 @@ | |||
42 | #define vectors_high() (0) | 42 | #define vectors_high() (0) |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #ifdef CONFIG_CPU_CP15 | ||
46 | |||
45 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | 47 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ |
46 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | 48 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ |
47 | 49 | ||
@@ -82,6 +84,18 @@ static inline void set_copro_access(unsigned int val) | |||
82 | isb(); | 84 | isb(); |
83 | } | 85 | } |
84 | 86 | ||
85 | #endif | 87 | #else /* ifdef CONFIG_CPU_CP15 */ |
88 | |||
89 | /* | ||
90 | * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the | ||
91 | * minds of the developers). Yielding 0 for machines without a cp15 (and making | ||
92 | * it read-only) is fine for most cases and saves quite some #ifdeffery. | ||
93 | */ | ||
94 | #define cr_no_alignment UL(0) | ||
95 | #define cr_alignment UL(0) | ||
96 | |||
97 | #endif /* ifdef CONFIG_CPU_CP15 / else */ | ||
98 | |||
99 | #endif /* ifndef __ASSEMBLY__ */ | ||
86 | 100 | ||
87 | #endif | 101 | #endif |
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index ad41ec2471e8..7652712d1d14 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -38,6 +38,24 @@ | |||
38 | #define MPIDR_AFFINITY_LEVEL(mpidr, level) \ | 38 | #define MPIDR_AFFINITY_LEVEL(mpidr, level) \ |
39 | ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) | 39 | ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) |
40 | 40 | ||
41 | #define ARM_CPU_IMP_ARM 0x41 | ||
42 | #define ARM_CPU_IMP_INTEL 0x69 | ||
43 | |||
44 | #define ARM_CPU_PART_ARM1136 0xB360 | ||
45 | #define ARM_CPU_PART_ARM1156 0xB560 | ||
46 | #define ARM_CPU_PART_ARM1176 0xB760 | ||
47 | #define ARM_CPU_PART_ARM11MPCORE 0xB020 | ||
48 | #define ARM_CPU_PART_CORTEX_A8 0xC080 | ||
49 | #define ARM_CPU_PART_CORTEX_A9 0xC090 | ||
50 | #define ARM_CPU_PART_CORTEX_A5 0xC050 | ||
51 | #define ARM_CPU_PART_CORTEX_A15 0xC0F0 | ||
52 | #define ARM_CPU_PART_CORTEX_A7 0xC070 | ||
53 | |||
54 | #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 | ||
55 | #define ARM_CPU_XSCALE_ARCH_V1 0x2000 | ||
56 | #define ARM_CPU_XSCALE_ARCH_V2 0x4000 | ||
57 | #define ARM_CPU_XSCALE_ARCH_V3 0x6000 | ||
58 | |||
41 | extern unsigned int processor_id; | 59 | extern unsigned int processor_id; |
42 | 60 | ||
43 | #ifdef CONFIG_CPU_CP15 | 61 | #ifdef CONFIG_CPU_CP15 |
@@ -50,6 +68,7 @@ extern unsigned int processor_id; | |||
50 | : "cc"); \ | 68 | : "cc"); \ |
51 | __val; \ | 69 | __val; \ |
52 | }) | 70 | }) |
71 | |||
53 | #define read_cpuid_ext(ext_reg) \ | 72 | #define read_cpuid_ext(ext_reg) \ |
54 | ({ \ | 73 | ({ \ |
55 | unsigned int __val; \ | 74 | unsigned int __val; \ |
@@ -59,29 +78,24 @@ extern unsigned int processor_id; | |||
59 | : "cc"); \ | 78 | : "cc"); \ |
60 | __val; \ | 79 | __val; \ |
61 | }) | 80 | }) |
62 | #else | ||
63 | #define read_cpuid(reg) (processor_id) | ||
64 | #define read_cpuid_ext(reg) 0 | ||
65 | #endif | ||
66 | 81 | ||
67 | #define ARM_CPU_IMP_ARM 0x41 | 82 | #else /* ifdef CONFIG_CPU_CP15 */ |
68 | #define ARM_CPU_IMP_INTEL 0x69 | ||
69 | 83 | ||
70 | #define ARM_CPU_PART_ARM1136 0xB360 | 84 | /* |
71 | #define ARM_CPU_PART_ARM1156 0xB560 | 85 | * read_cpuid and read_cpuid_ext should only ever be called on machines that |
72 | #define ARM_CPU_PART_ARM1176 0xB760 | 86 | * have cp15 so warn on other usages. |
73 | #define ARM_CPU_PART_ARM11MPCORE 0xB020 | 87 | */ |
74 | #define ARM_CPU_PART_CORTEX_A8 0xC080 | 88 | #define read_cpuid(reg) \ |
75 | #define ARM_CPU_PART_CORTEX_A9 0xC090 | 89 | ({ \ |
76 | #define ARM_CPU_PART_CORTEX_A5 0xC050 | 90 | WARN_ON_ONCE(1); \ |
77 | #define ARM_CPU_PART_CORTEX_A15 0xC0F0 | 91 | 0; \ |
78 | #define ARM_CPU_PART_CORTEX_A7 0xC070 | 92 | }) |
79 | 93 | ||
80 | #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 | 94 | #define read_cpuid_ext(reg) read_cpuid(reg) |
81 | #define ARM_CPU_XSCALE_ARCH_V1 0x2000 | 95 | |
82 | #define ARM_CPU_XSCALE_ARCH_V2 0x4000 | 96 | #endif /* ifdef CONFIG_CPU_CP15 / else */ |
83 | #define ARM_CPU_XSCALE_ARCH_V3 0x6000 | ||
84 | 97 | ||
98 | #ifdef CONFIG_CPU_CP15 | ||
85 | /* | 99 | /* |
86 | * The CPU ID never changes at run time, so we might as well tell the | 100 | * The CPU ID never changes at run time, so we might as well tell the |
87 | * compiler that it's constant. Use this function to read the CPU ID | 101 | * compiler that it's constant. Use this function to read the CPU ID |
@@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void) | |||
92 | return read_cpuid(CPUID_ID); | 106 | return read_cpuid(CPUID_ID); |
93 | } | 107 | } |
94 | 108 | ||
109 | #else /* ifdef CONFIG_CPU_CP15 */ | ||
110 | |||
111 | static inline unsigned int __attribute_const__ read_cpuid_id(void) | ||
112 | { | ||
113 | return processor_id; | ||
114 | } | ||
115 | |||
116 | #endif /* ifdef CONFIG_CPU_CP15 / else */ | ||
117 | |||
95 | static inline unsigned int __attribute_const__ read_cpuid_implementor(void) | 118 | static inline unsigned int __attribute_const__ read_cpuid_implementor(void) |
96 | { | 119 | { |
97 | return (read_cpuid_id() & 0xFF000000) >> 24; | 120 | return (read_cpuid_id() & 0xFF000000) >> 24; |
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index 8cacbcda76da..b6e9f2c108b5 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h | |||
@@ -18,12 +18,12 @@ | |||
18 | * ================ | 18 | * ================ |
19 | * | 19 | * |
20 | * We have the following to choose from: | 20 | * We have the following to choose from: |
21 | * arm6 - ARM6 style | ||
22 | * arm7 - ARM7 style | 21 | * arm7 - ARM7 style |
23 | * v4_early - ARMv4 without Thumb early abort handler | 22 | * v4_early - ARMv4 without Thumb early abort handler |
24 | * v4t_late - ARMv4 with Thumb late abort handler | 23 | * v4t_late - ARMv4 with Thumb late abort handler |
25 | * v4t_early - ARMv4 with Thumb early abort handler | 24 | * v4t_early - ARMv4 with Thumb early abort handler |
26 | * v5tej_early - ARMv5 with Thumb and Java early abort handler | 25 | * v5t_early - ARMv5 with Thumb early abort handler |
26 | * v5tj_early - ARMv5 with Thumb and Java early abort handler | ||
27 | * xscale - ARMv5 with Thumb with Xscale extensions | 27 | * xscale - ARMv5 with Thumb with Xscale extensions |
28 | * v6_early - ARMv6 generic early abort handler | 28 | * v6_early - ARMv6 generic early abort handler |
29 | * v7_early - ARMv7 generic early abort handler | 29 | * v7_early - ARMv7 generic early abort handler |
@@ -39,19 +39,19 @@ | |||
39 | # endif | 39 | # endif |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #ifdef CONFIG_CPU_ABRT_LV4T | 42 | #ifdef CONFIG_CPU_ABRT_EV4 |
43 | # ifdef CPU_DABORT_HANDLER | 43 | # ifdef CPU_DABORT_HANDLER |
44 | # define MULTI_DABORT 1 | 44 | # define MULTI_DABORT 1 |
45 | # else | 45 | # else |
46 | # define CPU_DABORT_HANDLER v4t_late_abort | 46 | # define CPU_DABORT_HANDLER v4_early_abort |
47 | # endif | 47 | # endif |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #ifdef CONFIG_CPU_ABRT_EV4 | 50 | #ifdef CONFIG_CPU_ABRT_LV4T |
51 | # ifdef CPU_DABORT_HANDLER | 51 | # ifdef CPU_DABORT_HANDLER |
52 | # define MULTI_DABORT 1 | 52 | # define MULTI_DABORT 1 |
53 | # else | 53 | # else |
54 | # define CPU_DABORT_HANDLER v4_early_abort | 54 | # define CPU_DABORT_HANDLER v4t_late_abort |
55 | # endif | 55 | # endif |
56 | #endif | 56 | #endif |
57 | 57 | ||
@@ -63,19 +63,19 @@ | |||
63 | # endif | 63 | # endif |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #ifdef CONFIG_CPU_ABRT_EV5TJ | 66 | #ifdef CONFIG_CPU_ABRT_EV5T |
67 | # ifdef CPU_DABORT_HANDLER | 67 | # ifdef CPU_DABORT_HANDLER |
68 | # define MULTI_DABORT 1 | 68 | # define MULTI_DABORT 1 |
69 | # else | 69 | # else |
70 | # define CPU_DABORT_HANDLER v5tj_early_abort | 70 | # define CPU_DABORT_HANDLER v5t_early_abort |
71 | # endif | 71 | # endif |
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | #ifdef CONFIG_CPU_ABRT_EV5T | 74 | #ifdef CONFIG_CPU_ABRT_EV5TJ |
75 | # ifdef CPU_DABORT_HANDLER | 75 | # ifdef CPU_DABORT_HANDLER |
76 | # define MULTI_DABORT 1 | 76 | # define MULTI_DABORT 1 |
77 | # else | 77 | # else |
78 | # define CPU_DABORT_HANDLER v5t_early_abort | 78 | # define CPU_DABORT_HANDLER v5tj_early_abort |
79 | # endif | 79 | # endif |
80 | #endif | 80 | #endif |
81 | 81 | ||
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 7c3d813e15df..124623e5ef14 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h | |||
@@ -211,4 +211,8 @@ | |||
211 | 211 | ||
212 | #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) | 212 | #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) |
213 | 213 | ||
214 | #define HSR_DABT_S1PTW (1U << 7) | ||
215 | #define HSR_DABT_CM (1U << 8) | ||
216 | #define HSR_DABT_EA (1U << 9) | ||
217 | |||
214 | #endif /* __ARM_KVM_ARM_H__ */ | 218 | #endif /* __ARM_KVM_ARM_H__ */ |
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index e4956f4e23e1..18d50322a9e2 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h | |||
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[]; | |||
75 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | 75 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); |
76 | 76 | ||
77 | extern void __kvm_flush_vm_context(void); | 77 | extern void __kvm_flush_vm_context(void); |
78 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | 78 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
79 | 79 | ||
80 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | 80 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); |
81 | #endif | 81 | #endif |
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index fd611996bfb5..82b4babead2c 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -22,11 +22,12 @@ | |||
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | #include <asm/kvm_asm.h> | 23 | #include <asm/kvm_asm.h> |
24 | #include <asm/kvm_mmio.h> | 24 | #include <asm/kvm_mmio.h> |
25 | #include <asm/kvm_arm.h> | ||
25 | 26 | ||
26 | u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); | 27 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); |
27 | u32 *vcpu_spsr(struct kvm_vcpu *vcpu); | 28 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); |
28 | 29 | ||
29 | int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); | 30 | bool kvm_condition_valid(struct kvm_vcpu *vcpu); |
30 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); | 31 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); |
31 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | 32 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
32 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 33 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) | |||
37 | return 1; | 38 | return 1; |
38 | } | 39 | } |
39 | 40 | ||
40 | static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) | 41 | static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) |
41 | { | 42 | { |
42 | return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; | 43 | return &vcpu->arch.regs.usr_regs.ARM_pc; |
43 | } | 44 | } |
44 | 45 | ||
45 | static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) | 46 | static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) |
46 | { | 47 | { |
47 | return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; | 48 | return &vcpu->arch.regs.usr_regs.ARM_cpsr; |
48 | } | 49 | } |
49 | 50 | ||
50 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | 51 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) | |||
69 | return reg == 15; | 70 | return reg == 15; |
70 | } | 71 | } |
71 | 72 | ||
73 | static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) | ||
74 | { | ||
75 | return vcpu->arch.fault.hsr; | ||
76 | } | ||
77 | |||
78 | static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) | ||
79 | { | ||
80 | return vcpu->arch.fault.hxfar; | ||
81 | } | ||
82 | |||
83 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) | ||
84 | { | ||
85 | return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; | ||
86 | } | ||
87 | |||
88 | static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) | ||
89 | { | ||
90 | return vcpu->arch.fault.hyp_pc; | ||
91 | } | ||
92 | |||
93 | static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) | ||
94 | { | ||
95 | return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; | ||
96 | } | ||
97 | |||
98 | static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) | ||
99 | { | ||
100 | return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; | ||
101 | } | ||
102 | |||
103 | static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) | ||
104 | { | ||
105 | return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; | ||
106 | } | ||
107 | |||
108 | static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) | ||
109 | { | ||
110 | return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; | ||
111 | } | ||
112 | |||
113 | static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) | ||
114 | { | ||
115 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; | ||
116 | } | ||
117 | |||
118 | static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) | ||
119 | { | ||
120 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; | ||
121 | } | ||
122 | |||
123 | /* Get Access Size from a data abort */ | ||
124 | static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) | ||
125 | { | ||
126 | switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { | ||
127 | case 0: | ||
128 | return 1; | ||
129 | case 1: | ||
130 | return 2; | ||
131 | case 2: | ||
132 | return 4; | ||
133 | default: | ||
134 | kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); | ||
135 | return -EFAULT; | ||
136 | } | ||
137 | } | ||
138 | |||
139 | /* This one is not specific to Data Abort */ | ||
140 | static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) | ||
141 | { | ||
142 | return kvm_vcpu_get_hsr(vcpu) & HSR_IL; | ||
143 | } | ||
144 | |||
145 | static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) | ||
146 | { | ||
147 | return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; | ||
148 | } | ||
149 | |||
150 | static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) | ||
151 | { | ||
152 | return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; | ||
153 | } | ||
154 | |||
155 | static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) | ||
156 | { | ||
157 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; | ||
158 | } | ||
159 | |||
160 | static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) | ||
161 | { | ||
162 | return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; | ||
163 | } | ||
164 | |||
72 | #endif /* __ARM_KVM_EMULATE_H__ */ | 165 | #endif /* __ARM_KVM_EMULATE_H__ */ |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index d1736a53b12d..0c4e643d939e 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache { | |||
80 | void *objects[KVM_NR_MEM_OBJS]; | 80 | void *objects[KVM_NR_MEM_OBJS]; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | struct kvm_vcpu_fault_info { | ||
84 | u32 hsr; /* Hyp Syndrome Register */ | ||
85 | u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ | ||
86 | u32 hpfar; /* Hyp IPA Fault Address Register */ | ||
87 | u32 hyp_pc; /* PC when exception was taken from Hyp mode */ | ||
88 | }; | ||
89 | |||
90 | typedef struct vfp_hard_struct kvm_kernel_vfp_t; | ||
91 | |||
83 | struct kvm_vcpu_arch { | 92 | struct kvm_vcpu_arch { |
84 | struct kvm_regs regs; | 93 | struct kvm_regs regs; |
85 | 94 | ||
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch { | |||
93 | u32 midr; | 102 | u32 midr; |
94 | 103 | ||
95 | /* Exception Information */ | 104 | /* Exception Information */ |
96 | u32 hsr; /* Hyp Syndrome Register */ | 105 | struct kvm_vcpu_fault_info fault; |
97 | u32 hxfar; /* Hyp Data/Inst Fault Address Register */ | ||
98 | u32 hpfar; /* Hyp IPA Fault Address Register */ | ||
99 | 106 | ||
100 | /* Floating point registers (VFP and Advanced SIMD/NEON) */ | 107 | /* Floating point registers (VFP and Advanced SIMD/NEON) */ |
101 | struct vfp_hard_struct vfp_guest; | 108 | kvm_kernel_vfp_t vfp_guest; |
102 | struct vfp_hard_struct *vfp_host; | 109 | kvm_kernel_vfp_t *vfp_host; |
103 | 110 | ||
104 | /* VGIC state */ | 111 | /* VGIC state */ |
105 | struct vgic_cpu vgic_cpu; | 112 | struct vgic_cpu vgic_cpu; |
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch { | |||
122 | /* Interrupt related fields */ | 129 | /* Interrupt related fields */ |
123 | u32 irq_lines; /* IRQ and FIQ levels */ | 130 | u32 irq_lines; /* IRQ and FIQ levels */ |
124 | 131 | ||
125 | /* Hyp exception information */ | ||
126 | u32 hyp_pc; /* PC when exception was taken from Hyp mode */ | ||
127 | |||
128 | /* Cache some mmu pages needed inside spinlock regions */ | 132 | /* Cache some mmu pages needed inside spinlock regions */ |
129 | struct kvm_mmu_memory_cache mmu_page_cache; | 133 | struct kvm_mmu_memory_cache mmu_page_cache; |
130 | 134 | ||
@@ -181,4 +185,26 @@ struct kvm_one_reg; | |||
181 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | 185 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); |
182 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | 186 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); |
183 | 187 | ||
188 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
189 | int exception_index); | ||
190 | |||
191 | static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr, | ||
192 | unsigned long hyp_stack_ptr, | ||
193 | unsigned long vector_ptr) | ||
194 | { | ||
195 | unsigned long pgd_low, pgd_high; | ||
196 | |||
197 | pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); | ||
198 | pgd_high = (pgd_ptr >> 32ULL); | ||
199 | |||
200 | /* | ||
201 | * Call initialization code, and switch to the full blown | ||
202 | * HYP code. The init code doesn't need to preserve these registers as | ||
203 | * r1-r3 and r12 are already callee save according to the AAPCS. | ||
204 | * Note that we slightly misuse the prototype by casing the pgd_low to | ||
205 | * a void *. | ||
206 | */ | ||
207 | kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); | ||
208 | } | ||
209 | |||
184 | #endif /* __ARM_KVM_HOST_H__ */ | 210 | #endif /* __ARM_KVM_HOST_H__ */ |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 421a20b34874..970f3b5fa109 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -19,6 +19,18 @@ | |||
19 | #ifndef __ARM_KVM_MMU_H__ | 19 | #ifndef __ARM_KVM_MMU_H__ |
20 | #define __ARM_KVM_MMU_H__ | 20 | #define __ARM_KVM_MMU_H__ |
21 | 21 | ||
22 | #include <asm/cacheflush.h> | ||
23 | #include <asm/pgalloc.h> | ||
24 | #include <asm/idmap.h> | ||
25 | |||
26 | /* | ||
27 | * We directly use the kernel VA for the HYP, as we can directly share | ||
28 | * the mapping (HTTBR "covers" TTBR1). | ||
29 | */ | ||
30 | #define HYP_PAGE_OFFSET_MASK (~0UL) | ||
31 | #define HYP_PAGE_OFFSET PAGE_OFFSET | ||
32 | #define KERN_TO_HYP(kva) (kva) | ||
33 | |||
22 | int create_hyp_mappings(void *from, void *to); | 34 | int create_hyp_mappings(void *from, void *to); |
23 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | 35 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); |
24 | void free_hyp_pmds(void); | 36 | void free_hyp_pmds(void); |
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void); | |||
36 | int kvm_mmu_init(void); | 48 | int kvm_mmu_init(void); |
37 | void kvm_clear_hyp_idmap(void); | 49 | void kvm_clear_hyp_idmap(void); |
38 | 50 | ||
51 | static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) | ||
52 | { | ||
53 | pte_val(*pte) = new_pte; | ||
54 | /* | ||
55 | * flush_pmd_entry just takes a void pointer and cleans the necessary | ||
56 | * cache entries, so we can reuse the function for ptes. | ||
57 | */ | ||
58 | flush_pmd_entry(pte); | ||
59 | } | ||
60 | |||
39 | static inline bool kvm_is_write_fault(unsigned long hsr) | 61 | static inline bool kvm_is_write_fault(unsigned long hsr) |
40 | { | 62 | { |
41 | unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; | 63 | unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; |
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr) | |||
47 | return true; | 69 | return true; |
48 | } | 70 | } |
49 | 71 | ||
72 | static inline void kvm_clean_pgd(pgd_t *pgd) | ||
73 | { | ||
74 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | ||
75 | } | ||
76 | |||
77 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) | ||
78 | { | ||
79 | clean_pmd_entry(pmd); | ||
80 | } | ||
81 | |||
82 | static inline void kvm_clean_pte(pte_t *pte) | ||
83 | { | ||
84 | clean_pte_table(pte); | ||
85 | } | ||
86 | |||
87 | static inline void kvm_set_s2pte_writable(pte_t *pte) | ||
88 | { | ||
89 | pte_val(*pte) |= L_PTE_S2_RDWR; | ||
90 | } | ||
91 | |||
92 | struct kvm; | ||
93 | |||
94 | static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) | ||
95 | { | ||
96 | /* | ||
97 | * If we are going to insert an instruction page and the icache is | ||
98 | * either VIPT or PIPT, there is a potential problem where the host | ||
99 | * (or another VM) may have used the same page as this guest, and we | ||
100 | * read incorrect data from the icache. If we're using a PIPT cache, | ||
101 | * we can invalidate just that page, but if we are using a VIPT cache | ||
102 | * we need to invalidate the entire icache - damn shame - as written | ||
103 | * in the ARM ARM (DDI 0406C.b - Page B3-1393). | ||
104 | * | ||
105 | * VIVT caches are tagged using both the ASID and the VMID and doesn't | ||
106 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). | ||
107 | */ | ||
108 | if (icache_is_pipt()) { | ||
109 | unsigned long hva = gfn_to_hva(kvm, gfn); | ||
110 | __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); | ||
111 | } else if (!icache_is_vivt_asid_tagged()) { | ||
112 | /* any kind of VIPT cache */ | ||
113 | __flush_icache_all(); | ||
114 | } | ||
115 | } | ||
116 | |||
50 | #endif /* __ARM_KVM_MMU_H__ */ | 117 | #endif /* __ARM_KVM_MMU_H__ */ |
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h index ab97207d9cd3..343744e4809c 100644 --- a/arch/arm/include/asm/kvm_vgic.h +++ b/arch/arm/include/asm/kvm_vgic.h | |||
@@ -21,7 +21,6 @@ | |||
21 | 21 | ||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/kvm.h> | 23 | #include <linux/kvm.h> |
24 | #include <linux/kvm_host.h> | ||
25 | #include <linux/irqreturn.h> | 24 | #include <linux/irqreturn.h> |
26 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
27 | #include <linux/types.h> | 26 | #include <linux/types.h> |
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 5cf2e979b4be..7d2c3c843801 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h | |||
@@ -30,6 +30,11 @@ struct hw_pci { | |||
30 | void (*postinit)(void); | 30 | void (*postinit)(void); |
31 | u8 (*swizzle)(struct pci_dev *dev, u8 *pin); | 31 | u8 (*swizzle)(struct pci_dev *dev, u8 *pin); |
32 | int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); | 32 | int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); |
33 | resource_size_t (*align_resource)(struct pci_dev *dev, | ||
34 | const struct resource *res, | ||
35 | resource_size_t start, | ||
36 | resource_size_t size, | ||
37 | resource_size_t align); | ||
33 | }; | 38 | }; |
34 | 39 | ||
35 | /* | 40 | /* |
@@ -51,6 +56,12 @@ struct pci_sys_data { | |||
51 | u8 (*swizzle)(struct pci_dev *, u8 *); | 56 | u8 (*swizzle)(struct pci_dev *, u8 *); |
52 | /* IRQ mapping */ | 57 | /* IRQ mapping */ |
53 | int (*map_irq)(const struct pci_dev *, u8, u8); | 58 | int (*map_irq)(const struct pci_dev *, u8, u8); |
59 | /* Resource alignement requirements */ | ||
60 | resource_size_t (*align_resource)(struct pci_dev *dev, | ||
61 | const struct resource *res, | ||
62 | resource_size_t start, | ||
63 | resource_size_t size, | ||
64 | resource_size_t align); | ||
54 | void *private_data; /* platform controller private data */ | 65 | void *private_data; /* platform controller private data */ |
55 | }; | 66 | }; |
56 | 67 | ||
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h new file mode 100644 index 000000000000..0f7b7620e9a5 --- /dev/null +++ b/arch/arm/include/asm/mcpm.h | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/mcpm.h | ||
3 | * | ||
4 | * Created by: Nicolas Pitre, April 2012 | ||
5 | * Copyright: (C) 2012-2013 Linaro Limited | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef MCPM_H | ||
13 | #define MCPM_H | ||
14 | |||
15 | /* | ||
16 | * Maximum number of possible clusters / CPUs per cluster. | ||
17 | * | ||
18 | * This should be sufficient for quite a while, while keeping the | ||
19 | * (assembly) code simpler. When this starts to grow then we'll have | ||
20 | * to consider dynamic allocation. | ||
21 | */ | ||
22 | #define MAX_CPUS_PER_CLUSTER 4 | ||
23 | #define MAX_NR_CLUSTERS 2 | ||
24 | |||
25 | #ifndef __ASSEMBLY__ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <asm/cacheflush.h> | ||
29 | |||
30 | /* | ||
31 | * Platform specific code should use this symbol to set up secondary | ||
32 | * entry location for processors to use when released from reset. | ||
33 | */ | ||
34 | extern void mcpm_entry_point(void); | ||
35 | |||
36 | /* | ||
37 | * This is used to indicate where the given CPU from given cluster should | ||
38 | * branch once it is ready to re-enter the kernel using ptr, or NULL if it | ||
39 | * should be gated. A gated CPU is held in a WFE loop until its vector | ||
40 | * becomes non NULL. | ||
41 | */ | ||
42 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); | ||
43 | |||
44 | /* | ||
45 | * CPU/cluster power operations API for higher subsystems to use. | ||
46 | */ | ||
47 | |||
48 | /** | ||
49 | * mcpm_cpu_power_up - make given CPU in given cluster runable | ||
50 | * | ||
51 | * @cpu: CPU number within given cluster | ||
52 | * @cluster: cluster number for the CPU | ||
53 | * | ||
54 | * The identified CPU is brought out of reset. If the cluster was powered | ||
55 | * down then it is brought up as well, taking care not to let the other CPUs | ||
56 | * in the cluster run, and ensuring appropriate cluster setup. | ||
57 | * | ||
58 | * Caller must ensure the appropriate entry vector is initialized with | ||
59 | * mcpm_set_entry_vector() prior to calling this. | ||
60 | * | ||
61 | * This must be called in a sleepable context. However, the implementation | ||
62 | * is strongly encouraged to return early and let the operation happen | ||
63 | * asynchronously, especially when significant delays are expected. | ||
64 | * | ||
65 | * If the operation cannot be performed then an error code is returned. | ||
66 | */ | ||
67 | int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); | ||
68 | |||
69 | /** | ||
70 | * mcpm_cpu_power_down - power the calling CPU down | ||
71 | * | ||
72 | * The calling CPU is powered down. | ||
73 | * | ||
74 | * If this CPU is found to be the "last man standing" in the cluster | ||
75 | * then the cluster is prepared for power-down too. | ||
76 | * | ||
77 | * This must be called with interrupts disabled. | ||
78 | * | ||
79 | * This does not return. Re-entry in the kernel is expected via | ||
80 | * mcpm_entry_point. | ||
81 | */ | ||
82 | void mcpm_cpu_power_down(void); | ||
83 | |||
84 | /** | ||
85 | * mcpm_cpu_suspend - bring the calling CPU in a suspended state | ||
86 | * | ||
87 | * @expected_residency: duration in microseconds the CPU is expected | ||
88 | * to remain suspended, or 0 if unknown/infinity. | ||
89 | * | ||
90 | * The calling CPU is suspended. The expected residency argument is used | ||
91 | * as a hint by the platform specific backend to implement the appropriate | ||
92 | * sleep state level according to the knowledge it has on wake-up latency | ||
93 | * for the given hardware. | ||
94 | * | ||
95 | * If this CPU is found to be the "last man standing" in the cluster | ||
96 | * then the cluster may be prepared for power-down too, if the expected | ||
97 | * residency makes it worthwhile. | ||
98 | * | ||
99 | * This must be called with interrupts disabled. | ||
100 | * | ||
101 | * This does not return. Re-entry in the kernel is expected via | ||
102 | * mcpm_entry_point. | ||
103 | */ | ||
104 | void mcpm_cpu_suspend(u64 expected_residency); | ||
105 | |||
106 | /** | ||
107 | * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up | ||
108 | * | ||
109 | * This lets the platform specific backend code perform needed housekeeping | ||
110 | * work. This must be called by the newly activated CPU as soon as it is | ||
111 | * fully operational in kernel space, before it enables interrupts. | ||
112 | * | ||
113 | * If the operation cannot be performed then an error code is returned. | ||
114 | */ | ||
115 | int mcpm_cpu_powered_up(void); | ||
116 | |||
117 | /* | ||
118 | * Platform specific methods used in the implementation of the above API. | ||
119 | */ | ||
120 | struct mcpm_platform_ops { | ||
121 | int (*power_up)(unsigned int cpu, unsigned int cluster); | ||
122 | void (*power_down)(void); | ||
123 | void (*suspend)(u64); | ||
124 | void (*powered_up)(void); | ||
125 | }; | ||
126 | |||
127 | /** | ||
128 | * mcpm_platform_register - register platform specific power methods | ||
129 | * | ||
130 | * @ops: mcpm_platform_ops structure to register | ||
131 | * | ||
132 | * An error is returned if the registration has been done previously. | ||
133 | */ | ||
134 | int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); | ||
135 | |||
136 | /* Synchronisation structures for coordinating safe cluster setup/teardown: */ | ||
137 | |||
138 | /* | ||
139 | * When modifying this structure, make sure you update the MCPM_SYNC_ defines | ||
140 | * to match. | ||
141 | */ | ||
142 | struct mcpm_sync_struct { | ||
143 | /* individual CPU states */ | ||
144 | struct { | ||
145 | s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); | ||
146 | } cpus[MAX_CPUS_PER_CLUSTER]; | ||
147 | |||
148 | /* cluster state */ | ||
149 | s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); | ||
150 | |||
151 | /* inbound-side state */ | ||
152 | s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); | ||
153 | }; | ||
154 | |||
155 | struct sync_struct { | ||
156 | struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; | ||
157 | }; | ||
158 | |||
159 | extern unsigned long sync_phys; /* physical address of *mcpm_sync */ | ||
160 | |||
161 | void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); | ||
162 | void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); | ||
163 | void __mcpm_outbound_leave_critical(unsigned int cluster, int state); | ||
164 | bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); | ||
165 | int __mcpm_cluster_state(unsigned int cluster); | ||
166 | |||
167 | int __init mcpm_sync_init( | ||
168 | void (*power_up_setup)(unsigned int affinity_level)); | ||
169 | |||
170 | void __init mcpm_smp_set_ops(void); | ||
171 | |||
172 | #else | ||
173 | |||
174 | /* | ||
175 | * asm-offsets.h causes trouble when included in .c files, and cacheflush.h | ||
176 | * cannot be included in asm files. Let's work around the conflict like this. | ||
177 | */ | ||
178 | #include <asm/asm-offsets.h> | ||
179 | #define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE | ||
180 | |||
181 | #endif /* ! __ASSEMBLY__ */ | ||
182 | |||
183 | /* Definitions for mcpm_sync_struct */ | ||
184 | #define CPU_DOWN 0x11 | ||
185 | #define CPU_COMING_UP 0x12 | ||
186 | #define CPU_UP 0x13 | ||
187 | #define CPU_GOING_DOWN 0x14 | ||
188 | |||
189 | #define CLUSTER_DOWN 0x21 | ||
190 | #define CLUSTER_UP 0x22 | ||
191 | #define CLUSTER_GOING_DOWN 0x23 | ||
192 | |||
193 | #define INBOUND_NOT_COMING_UP 0x31 | ||
194 | #define INBOUND_COMING_UP 0x32 | ||
195 | |||
196 | /* | ||
197 | * Offsets for the mcpm_sync_struct members, for use in asm. | ||
198 | * We don't want to make them global to the kernel via asm-offsets.c. | ||
199 | */ | ||
200 | #define MCPM_SYNC_CLUSTER_CPUS 0 | ||
201 | #define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE | ||
202 | #define MCPM_SYNC_CLUSTER_CLUSTER \ | ||
203 | (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER) | ||
204 | #define MCPM_SYNC_CLUSTER_INBOUND \ | ||
205 | (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE) | ||
206 | #define MCPM_SYNC_CLUSTER_SIZE \ | ||
207 | (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE) | ||
208 | |||
209 | #endif | ||
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index cddda1f41f0f..1995d1a84060 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -152,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |||
152 | #define TIF_SYSCALL_AUDIT 9 | 152 | #define TIF_SYSCALL_AUDIT 9 |
153 | #define TIF_SYSCALL_TRACEPOINT 10 | 153 | #define TIF_SYSCALL_TRACEPOINT 10 |
154 | #define TIF_SECCOMP 11 /* seccomp syscall filtering active */ | 154 | #define TIF_SECCOMP 11 /* seccomp syscall filtering active */ |
155 | #define TIF_NOHZ 12 /* in adaptive nohz mode */ | ||
155 | #define TIF_USING_IWMMXT 17 | 156 | #define TIF_USING_IWMMXT 17 |
156 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 157 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
157 | #define TIF_RESTORE_SIGMASK 20 | 158 | #define TIF_RESTORE_SIGMASK 20 |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index ab865e65a84c..a3625d141c1d 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -166,7 +166,7 @@ | |||
166 | # define v6wbi_always_flags (-1UL) | 166 | # define v6wbi_always_flags (-1UL) |
167 | #endif | 167 | #endif |
168 | 168 | ||
169 | #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ | 169 | #define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \ |
170 | TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ | 170 | TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ |
171 | TLB_V7_UIS_ASID | TLB_V7_UIS_BP) | 171 | TLB_V7_UIS_ASID | TLB_V7_UIS_BP) |
172 | #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ | 172 | #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ |
diff --git a/arch/arm/include/debug/uncompress.h b/arch/arm/include/debug/uncompress.h new file mode 100644 index 000000000000..0e2949b0fae9 --- /dev/null +++ b/arch/arm/include/debug/uncompress.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifdef CONFIG_DEBUG_UNCOMPRESS | ||
2 | extern void putc(int c); | ||
3 | #else | ||
4 | static inline void putc(int c) {} | ||
5 | #endif | ||
6 | static inline void flush(void) {} | ||
7 | static inline void arch_decomp_setup(void) {} | ||
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 023bfeb367bf..c1ee007523d7 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h | |||
@@ -53,12 +53,12 @@ | |||
53 | #define KVM_ARM_FIQ_spsr fiq_regs[7] | 53 | #define KVM_ARM_FIQ_spsr fiq_regs[7] |
54 | 54 | ||
55 | struct kvm_regs { | 55 | struct kvm_regs { |
56 | struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ | 56 | struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */ |
57 | __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ | 57 | unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ |
58 | __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ | 58 | unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ |
59 | __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */ | 59 | unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */ |
60 | __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ | 60 | unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ |
61 | __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ | 61 | unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ |
62 | }; | 62 | }; |
63 | 63 | ||
64 | /* Supported Processor Types */ | 64 | /* Supported Processor Types */ |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 923eec7105cf..a53efa993690 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -149,6 +149,10 @@ int main(void) | |||
149 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); | 149 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); |
150 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); | 150 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); |
151 | DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); | 151 | DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); |
152 | BLANK(); | ||
153 | DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER); | ||
154 | DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); | ||
155 | BLANK(); | ||
152 | #ifdef CONFIG_KVM_ARM_HOST | 156 | #ifdef CONFIG_KVM_ARM_HOST |
153 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | 157 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); |
154 | DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); | 158 | DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); |
@@ -165,10 +169,10 @@ int main(void) | |||
165 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); | 169 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); |
166 | DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); | 170 | DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); |
167 | DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); | 171 | DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); |
168 | DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); | 172 | DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); |
169 | DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); | 173 | DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); |
170 | DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); | 174 | DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); |
171 | DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); | 175 | DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); |
172 | #ifdef CONFIG_KVM_ARM_VGIC | 176 | #ifdef CONFIG_KVM_ARM_VGIC |
173 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); | 177 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); |
174 | DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); | 178 | DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index a1f73b502ef0..b2ed73c45489 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -462,6 +462,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | |||
462 | sys->busnr = busnr; | 462 | sys->busnr = busnr; |
463 | sys->swizzle = hw->swizzle; | 463 | sys->swizzle = hw->swizzle; |
464 | sys->map_irq = hw->map_irq; | 464 | sys->map_irq = hw->map_irq; |
465 | sys->align_resource = hw->align_resource; | ||
465 | INIT_LIST_HEAD(&sys->resources); | 466 | INIT_LIST_HEAD(&sys->resources); |
466 | 467 | ||
467 | if (hw->private_data) | 468 | if (hw->private_data) |
@@ -574,6 +575,8 @@ char * __init pcibios_setup(char *str) | |||
574 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, | 575 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, |
575 | resource_size_t size, resource_size_t align) | 576 | resource_size_t size, resource_size_t align) |
576 | { | 577 | { |
578 | struct pci_dev *dev = data; | ||
579 | struct pci_sys_data *sys = dev->sysdata; | ||
577 | resource_size_t start = res->start; | 580 | resource_size_t start = res->start; |
578 | 581 | ||
579 | if (res->flags & IORESOURCE_IO && start & 0x300) | 582 | if (res->flags & IORESOURCE_IO && start & 0x300) |
@@ -581,6 +584,9 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, | |||
581 | 584 | ||
582 | start = (start + align - 1) & ~(align - 1); | 585 | start = (start + align - 1) & ~(align - 1); |
583 | 586 | ||
587 | if (sys->align_resource) | ||
588 | return sys->align_resource(dev, res, start, size, align); | ||
589 | |||
584 | return start; | 590 | return start; |
585 | } | 591 | } |
586 | 592 | ||
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 0f82098c9bfe..582b405befc5 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -192,18 +192,6 @@ __dabt_svc: | |||
192 | svc_entry | 192 | svc_entry |
193 | mov r2, sp | 193 | mov r2, sp |
194 | dabt_helper | 194 | dabt_helper |
195 | |||
196 | @ | ||
197 | @ IRQs off again before pulling preserved data off the stack | ||
198 | @ | ||
199 | disable_irq_notrace | ||
200 | |||
201 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
202 | tst r5, #PSR_I_BIT | ||
203 | bleq trace_hardirqs_on | ||
204 | tst r5, #PSR_I_BIT | ||
205 | blne trace_hardirqs_off | ||
206 | #endif | ||
207 | svc_exit r5 @ return from exception | 195 | svc_exit r5 @ return from exception |
208 | UNWIND(.fnend ) | 196 | UNWIND(.fnend ) |
209 | ENDPROC(__dabt_svc) | 197 | ENDPROC(__dabt_svc) |
@@ -223,12 +211,7 @@ __irq_svc: | |||
223 | blne svc_preempt | 211 | blne svc_preempt |
224 | #endif | 212 | #endif |
225 | 213 | ||
226 | #ifdef CONFIG_TRACE_IRQFLAGS | 214 | svc_exit r5, irq = 1 @ return from exception |
227 | @ The parent context IRQs must have been enabled to get here in | ||
228 | @ the first place, so there's no point checking the PSR I bit. | ||
229 | bl trace_hardirqs_on | ||
230 | #endif | ||
231 | svc_exit r5 @ return from exception | ||
232 | UNWIND(.fnend ) | 215 | UNWIND(.fnend ) |
233 | ENDPROC(__irq_svc) | 216 | ENDPROC(__irq_svc) |
234 | 217 | ||
@@ -295,22 +278,8 @@ __und_svc_fault: | |||
295 | mov r0, sp @ struct pt_regs *regs | 278 | mov r0, sp @ struct pt_regs *regs |
296 | bl __und_fault | 279 | bl __und_fault |
297 | 280 | ||
298 | @ | ||
299 | @ IRQs off again before pulling preserved data off the stack | ||
300 | @ | ||
301 | __und_svc_finish: | 281 | __und_svc_finish: |
302 | disable_irq_notrace | ||
303 | |||
304 | @ | ||
305 | @ restore SPSR and restart the instruction | ||
306 | @ | ||
307 | ldr r5, [sp, #S_PSR] @ Get SVC cpsr | 282 | ldr r5, [sp, #S_PSR] @ Get SVC cpsr |
308 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
309 | tst r5, #PSR_I_BIT | ||
310 | bleq trace_hardirqs_on | ||
311 | tst r5, #PSR_I_BIT | ||
312 | blne trace_hardirqs_off | ||
313 | #endif | ||
314 | svc_exit r5 @ return from exception | 283 | svc_exit r5 @ return from exception |
315 | UNWIND(.fnend ) | 284 | UNWIND(.fnend ) |
316 | ENDPROC(__und_svc) | 285 | ENDPROC(__und_svc) |
@@ -320,18 +289,6 @@ __pabt_svc: | |||
320 | svc_entry | 289 | svc_entry |
321 | mov r2, sp @ regs | 290 | mov r2, sp @ regs |
322 | pabt_helper | 291 | pabt_helper |
323 | |||
324 | @ | ||
325 | @ IRQs off again before pulling preserved data off the stack | ||
326 | @ | ||
327 | disable_irq_notrace | ||
328 | |||
329 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
330 | tst r5, #PSR_I_BIT | ||
331 | bleq trace_hardirqs_on | ||
332 | tst r5, #PSR_I_BIT | ||
333 | blne trace_hardirqs_off | ||
334 | #endif | ||
335 | svc_exit r5 @ return from exception | 292 | svc_exit r5 @ return from exception |
336 | UNWIND(.fnend ) | 293 | UNWIND(.fnend ) |
337 | ENDPROC(__pabt_svc) | 294 | ENDPROC(__pabt_svc) |
@@ -396,6 +353,7 @@ ENDPROC(__pabt_svc) | |||
396 | #ifdef CONFIG_IRQSOFF_TRACER | 353 | #ifdef CONFIG_IRQSOFF_TRACER |
397 | bl trace_hardirqs_off | 354 | bl trace_hardirqs_off |
398 | #endif | 355 | #endif |
356 | ct_user_exit save = 0 | ||
399 | .endm | 357 | .endm |
400 | 358 | ||
401 | .macro kuser_cmpxchg_check | 359 | .macro kuser_cmpxchg_check |
@@ -562,21 +520,21 @@ ENDPROC(__und_usr) | |||
562 | @ Fall-through from Thumb-2 __und_usr | 520 | @ Fall-through from Thumb-2 __und_usr |
563 | @ | 521 | @ |
564 | #ifdef CONFIG_NEON | 522 | #ifdef CONFIG_NEON |
523 | get_thread_info r10 @ get current thread | ||
565 | adr r6, .LCneon_thumb_opcodes | 524 | adr r6, .LCneon_thumb_opcodes |
566 | b 2f | 525 | b 2f |
567 | #endif | 526 | #endif |
568 | call_fpe: | 527 | call_fpe: |
528 | get_thread_info r10 @ get current thread | ||
569 | #ifdef CONFIG_NEON | 529 | #ifdef CONFIG_NEON |
570 | adr r6, .LCneon_arm_opcodes | 530 | adr r6, .LCneon_arm_opcodes |
571 | 2: | 531 | 2: ldr r5, [r6], #4 @ mask value |
572 | ldr r7, [r6], #4 @ mask value | ||
573 | cmp r7, #0 @ end mask? | ||
574 | beq 1f | ||
575 | and r8, r0, r7 | ||
576 | ldr r7, [r6], #4 @ opcode bits matching in mask | 532 | ldr r7, [r6], #4 @ opcode bits matching in mask |
533 | cmp r5, #0 @ end mask? | ||
534 | beq 1f | ||
535 | and r8, r0, r5 | ||
577 | cmp r8, r7 @ NEON instruction? | 536 | cmp r8, r7 @ NEON instruction? |
578 | bne 2b | 537 | bne 2b |
579 | get_thread_info r10 | ||
580 | mov r7, #1 | 538 | mov r7, #1 |
581 | strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used | 539 | strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used |
582 | strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used | 540 | strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used |
@@ -586,7 +544,6 @@ call_fpe: | |||
586 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 | 544 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 |
587 | tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 | 545 | tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 |
588 | moveq pc, lr | 546 | moveq pc, lr |
589 | get_thread_info r10 @ get current thread | ||
590 | and r8, r0, #0x00000f00 @ mask out CP number | 547 | and r8, r0, #0x00000f00 @ mask out CP number |
591 | THUMB( lsr r8, r8, #8 ) | 548 | THUMB( lsr r8, r8, #8 ) |
592 | mov r7, #1 | 549 | mov r7, #1 |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index fefd7f971437..bc5bc0a97131 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -35,12 +35,11 @@ ret_fast_syscall: | |||
35 | ldr r1, [tsk, #TI_FLAGS] | 35 | ldr r1, [tsk, #TI_FLAGS] |
36 | tst r1, #_TIF_WORK_MASK | 36 | tst r1, #_TIF_WORK_MASK |
37 | bne fast_work_pending | 37 | bne fast_work_pending |
38 | #if defined(CONFIG_IRQSOFF_TRACER) | ||
39 | asm_trace_hardirqs_on | 38 | asm_trace_hardirqs_on |
40 | #endif | ||
41 | 39 | ||
42 | /* perform architecture specific actions before user return */ | 40 | /* perform architecture specific actions before user return */ |
43 | arch_ret_to_user r1, lr | 41 | arch_ret_to_user r1, lr |
42 | ct_user_enter | ||
44 | 43 | ||
45 | restore_user_regs fast = 1, offset = S_OFF | 44 | restore_user_regs fast = 1, offset = S_OFF |
46 | UNWIND(.fnend ) | 45 | UNWIND(.fnend ) |
@@ -71,11 +70,11 @@ ENTRY(ret_to_user_from_irq) | |||
71 | tst r1, #_TIF_WORK_MASK | 70 | tst r1, #_TIF_WORK_MASK |
72 | bne work_pending | 71 | bne work_pending |
73 | no_work_pending: | 72 | no_work_pending: |
74 | #if defined(CONFIG_IRQSOFF_TRACER) | ||
75 | asm_trace_hardirqs_on | 73 | asm_trace_hardirqs_on |
76 | #endif | 74 | |
77 | /* perform architecture specific actions before user return */ | 75 | /* perform architecture specific actions before user return */ |
78 | arch_ret_to_user r1, lr | 76 | arch_ret_to_user r1, lr |
77 | ct_user_enter save = 0 | ||
79 | 78 | ||
80 | restore_user_regs fast = 0, offset = 0 | 79 | restore_user_regs fast = 0, offset = 0 |
81 | ENDPROC(ret_to_user_from_irq) | 80 | ENDPROC(ret_to_user_from_irq) |
@@ -406,6 +405,7 @@ ENTRY(vector_swi) | |||
406 | mcr p15, 0, ip, c1, c0 @ update control register | 405 | mcr p15, 0, ip, c1, c0 @ update control register |
407 | #endif | 406 | #endif |
408 | enable_irq | 407 | enable_irq |
408 | ct_user_exit | ||
409 | 409 | ||
410 | get_thread_info tsk | 410 | get_thread_info tsk |
411 | adr tbl, sys_call_table @ load syscall table pointer | 411 | adr tbl, sys_call_table @ load syscall table pointer |
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 9a8531eadd3d..160f3376ba6d 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -74,7 +74,24 @@ | |||
74 | .endm | 74 | .endm |
75 | 75 | ||
76 | #ifndef CONFIG_THUMB2_KERNEL | 76 | #ifndef CONFIG_THUMB2_KERNEL |
77 | .macro svc_exit, rpsr | 77 | .macro svc_exit, rpsr, irq = 0 |
78 | .if \irq != 0 | ||
79 | @ IRQs already off | ||
80 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
81 | @ The parent context IRQs must have been enabled to get here in | ||
82 | @ the first place, so there's no point checking the PSR I bit. | ||
83 | bl trace_hardirqs_on | ||
84 | #endif | ||
85 | .else | ||
86 | @ IRQs off again before pulling preserved data off the stack | ||
87 | disable_irq_notrace | ||
88 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
89 | tst \rpsr, #PSR_I_BIT | ||
90 | bleq trace_hardirqs_on | ||
91 | tst \rpsr, #PSR_I_BIT | ||
92 | blne trace_hardirqs_off | ||
93 | #endif | ||
94 | .endif | ||
78 | msr spsr_cxsf, \rpsr | 95 | msr spsr_cxsf, \rpsr |
79 | #if defined(CONFIG_CPU_V6) | 96 | #if defined(CONFIG_CPU_V6) |
80 | ldr r0, [sp] | 97 | ldr r0, [sp] |
@@ -120,7 +137,24 @@ | |||
120 | mov pc, \reg | 137 | mov pc, \reg |
121 | .endm | 138 | .endm |
122 | #else /* CONFIG_THUMB2_KERNEL */ | 139 | #else /* CONFIG_THUMB2_KERNEL */ |
123 | .macro svc_exit, rpsr | 140 | .macro svc_exit, rpsr, irq = 0 |
141 | .if \irq != 0 | ||
142 | @ IRQs already off | ||
143 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
144 | @ The parent context IRQs must have been enabled to get here in | ||
145 | @ the first place, so there's no point checking the PSR I bit. | ||
146 | bl trace_hardirqs_on | ||
147 | #endif | ||
148 | .else | ||
149 | @ IRQs off again before pulling preserved data off the stack | ||
150 | disable_irq_notrace | ||
151 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
152 | tst \rpsr, #PSR_I_BIT | ||
153 | bleq trace_hardirqs_on | ||
154 | tst \rpsr, #PSR_I_BIT | ||
155 | blne trace_hardirqs_off | ||
156 | #endif | ||
157 | .endif | ||
124 | ldr lr, [sp, #S_SP] @ top of the stack | 158 | ldr lr, [sp, #S_SP] @ top of the stack |
125 | ldrd r0, r1, [sp, #S_LR] @ calling lr and pc | 159 | ldrd r0, r1, [sp, #S_LR] @ calling lr and pc |
126 | clrex @ clear the exclusive monitor | 160 | clrex @ clear the exclusive monitor |
@@ -164,6 +198,34 @@ | |||
164 | #endif /* !CONFIG_THUMB2_KERNEL */ | 198 | #endif /* !CONFIG_THUMB2_KERNEL */ |
165 | 199 | ||
166 | /* | 200 | /* |
201 | * Context tracking subsystem. Used to instrument transitions | ||
202 | * between user and kernel mode. | ||
203 | */ | ||
204 | .macro ct_user_exit, save = 1 | ||
205 | #ifdef CONFIG_CONTEXT_TRACKING | ||
206 | .if \save | ||
207 | stmdb sp!, {r0-r3, ip, lr} | ||
208 | bl user_exit | ||
209 | ldmia sp!, {r0-r3, ip, lr} | ||
210 | .else | ||
211 | bl user_exit | ||
212 | .endif | ||
213 | #endif | ||
214 | .endm | ||
215 | |||
216 | .macro ct_user_enter, save = 1 | ||
217 | #ifdef CONFIG_CONTEXT_TRACKING | ||
218 | .if \save | ||
219 | stmdb sp!, {r0-r3, ip, lr} | ||
220 | bl user_enter | ||
221 | ldmia sp!, {r0-r3, ip, lr} | ||
222 | .else | ||
223 | bl user_enter | ||
224 | .endif | ||
225 | #endif | ||
226 | .endm | ||
227 | |||
228 | /* | ||
167 | * These are the registers used in the syscall handler, and allow us to | 229 | * These are the registers used in the syscall handler, and allow us to |
168 | * have in theory up to 7 arguments to a function - r0 to r6. | 230 | * have in theory up to 7 arguments to a function - r0 to r6. |
169 | * | 231 | * |
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 854bd22380d3..5b391a689b47 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S | |||
@@ -98,8 +98,9 @@ __mmap_switched: | |||
98 | str r9, [r4] @ Save processor ID | 98 | str r9, [r4] @ Save processor ID |
99 | str r1, [r5] @ Save machine type | 99 | str r1, [r5] @ Save machine type |
100 | str r2, [r6] @ Save atags pointer | 100 | str r2, [r6] @ Save atags pointer |
101 | bic r4, r0, #CR_A @ Clear 'A' bit | 101 | cmp r7, #0 |
102 | stmia r7, {r0, r4} @ Save control register values | 102 | bicne r4, r0, #CR_A @ Clear 'A' bit |
103 | stmneia r7, {r0, r4} @ Save control register values | ||
103 | b start_kernel | 104 | b start_kernel |
104 | ENDPROC(__mmap_switched) | 105 | ENDPROC(__mmap_switched) |
105 | 106 | ||
@@ -113,7 +114,11 @@ __mmap_switched_data: | |||
113 | .long processor_id @ r4 | 114 | .long processor_id @ r4 |
114 | .long __machine_arch_type @ r5 | 115 | .long __machine_arch_type @ r5 |
115 | .long __atags_pointer @ r6 | 116 | .long __atags_pointer @ r6 |
117 | #ifdef CONFIG_CPU_CP15 | ||
116 | .long cr_alignment @ r7 | 118 | .long cr_alignment @ r7 |
119 | #else | ||
120 | .long 0 @ r7 | ||
121 | #endif | ||
117 | .long init_thread_union + THREAD_START_SP @ sp | 122 | .long init_thread_union + THREAD_START_SP @ sp |
118 | .size __mmap_switched_data, . - __mmap_switched_data | 123 | .size __mmap_switched_data, . - __mmap_switched_data |
119 | 124 | ||
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 2c228a07e58c..6a2e09c952c7 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -32,15 +32,21 @@ | |||
32 | * numbers for r1. | 32 | * numbers for r1. |
33 | * | 33 | * |
34 | */ | 34 | */ |
35 | .arm | ||
36 | 35 | ||
37 | __HEAD | 36 | __HEAD |
37 | |||
38 | #ifdef CONFIG_CPU_THUMBONLY | ||
39 | .thumb | ||
40 | ENTRY(stext) | ||
41 | #else | ||
42 | .arm | ||
38 | ENTRY(stext) | 43 | ENTRY(stext) |
39 | 44 | ||
40 | THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. | 45 | THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. |
41 | THUMB( bx r9 ) @ If this is a Thumb-2 kernel, | 46 | THUMB( bx r9 ) @ If this is a Thumb-2 kernel, |
42 | THUMB( .thumb ) @ switch to Thumb now. | 47 | THUMB( .thumb ) @ switch to Thumb now. |
43 | THUMB(1: ) | 48 | THUMB(1: ) |
49 | #endif | ||
44 | 50 | ||
45 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode | 51 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode |
46 | @ and irqs disabled | 52 | @ and irqs disabled |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index ae58d3b37d9d..f21970316836 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -407,15 +407,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
407 | * atomic helpers and the signal restart code. Insert it into the | 407 | * atomic helpers and the signal restart code. Insert it into the |
408 | * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. | 408 | * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. |
409 | */ | 409 | */ |
410 | static struct vm_area_struct gate_vma; | 410 | static struct vm_area_struct gate_vma = { |
411 | .vm_start = 0xffff0000, | ||
412 | .vm_end = 0xffff0000 + PAGE_SIZE, | ||
413 | .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, | ||
414 | .vm_mm = &init_mm, | ||
415 | }; | ||
411 | 416 | ||
412 | static int __init gate_vma_init(void) | 417 | static int __init gate_vma_init(void) |
413 | { | 418 | { |
414 | gate_vma.vm_start = 0xffff0000; | 419 | gate_vma.vm_page_prot = PAGE_READONLY_EXEC; |
415 | gate_vma.vm_end = 0xffff0000 + PAGE_SIZE; | ||
416 | gate_vma.vm_page_prot = PAGE_READONLY_EXEC; | ||
417 | gate_vma.vm_flags = VM_READ | VM_EXEC | | ||
418 | VM_MAYREAD | VM_MAYEXEC; | ||
419 | return 0; | 420 | return 0; |
420 | } | 421 | } |
421 | arch_initcall(gate_vma_init); | 422 | arch_initcall(gate_vma_init); |
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index 8085417555dd..fafedd86885d 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c | |||
@@ -26,7 +26,7 @@ static int save_return_addr(struct stackframe *frame, void *d) | |||
26 | struct return_address_data *data = d; | 26 | struct return_address_data *data = d; |
27 | 27 | ||
28 | if (!data->level) { | 28 | if (!data->level) { |
29 | data->addr = (void *)frame->lr; | 29 | data->addr = (void *)frame->pc; |
30 | 30 | ||
31 | return 1; | 31 | return 1; |
32 | } else { | 32 | } else { |
@@ -41,7 +41,8 @@ void *return_address(unsigned int level) | |||
41 | struct stackframe frame; | 41 | struct stackframe frame; |
42 | register unsigned long current_sp asm ("sp"); | 42 | register unsigned long current_sp asm ("sp"); |
43 | 43 | ||
44 | data.level = level + 1; | 44 | data.level = level + 2; |
45 | data.addr = NULL; | ||
45 | 46 | ||
46 | frame.fp = (unsigned long)__builtin_frame_address(0); | 47 | frame.fp = (unsigned long)__builtin_frame_address(0); |
47 | frame.sp = current_sp; | 48 | frame.sp = current_sp; |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 234e339196c0..728007c4a2b7 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -290,10 +290,10 @@ static int cpu_has_aliasing_icache(unsigned int arch) | |||
290 | 290 | ||
291 | static void __init cacheid_init(void) | 291 | static void __init cacheid_init(void) |
292 | { | 292 | { |
293 | unsigned int cachetype = read_cpuid_cachetype(); | ||
294 | unsigned int arch = cpu_architecture(); | 293 | unsigned int arch = cpu_architecture(); |
295 | 294 | ||
296 | if (arch >= CPU_ARCH_ARMv6) { | 295 | if (arch >= CPU_ARCH_ARMv6) { |
296 | unsigned int cachetype = read_cpuid_cachetype(); | ||
297 | if ((cachetype & (7 << 29)) == 4 << 29) { | 297 | if ((cachetype & (7 << 29)) == 4 << 29) { |
298 | /* ARMv7 register format */ | 298 | /* ARMv7 register format */ |
299 | arch = CPU_ARCH_ARMv7; | 299 | arch = CPU_ARCH_ARMv7; |
@@ -389,7 +389,7 @@ static void __init feat_v6_fixup(void) | |||
389 | * | 389 | * |
390 | * cpu_init sets up the per-CPU stacks. | 390 | * cpu_init sets up the per-CPU stacks. |
391 | */ | 391 | */ |
392 | void cpu_init(void) | 392 | void notrace cpu_init(void) |
393 | { | 393 | { |
394 | unsigned int cpu = smp_processor_id(); | 394 | unsigned int cpu = smp_processor_id(); |
395 | struct stack *stk = &stacks[cpu]; | 395 | struct stack *stk = &stacks[cpu]; |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 4619177bcfe6..47ab90563bf4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -211,6 +211,13 @@ void __cpuinit __cpu_die(unsigned int cpu) | |||
211 | } | 211 | } |
212 | printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); | 212 | printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); |
213 | 213 | ||
214 | /* | ||
215 | * platform_cpu_kill() is generally expected to do the powering off | ||
216 | * and/or cutting of clocks to the dying CPU. Optionally, this may | ||
217 | * be done by the CPU which is dying in preference to supporting | ||
218 | * this call, but that means there is _no_ synchronisation between | ||
219 | * the requesting CPU and the dying CPU actually losing power. | ||
220 | */ | ||
214 | if (!platform_cpu_kill(cpu)) | 221 | if (!platform_cpu_kill(cpu)) |
215 | printk("CPU%u: unable to kill\n", cpu); | 222 | printk("CPU%u: unable to kill\n", cpu); |
216 | } | 223 | } |
@@ -230,14 +237,41 @@ void __ref cpu_die(void) | |||
230 | idle_task_exit(); | 237 | idle_task_exit(); |
231 | 238 | ||
232 | local_irq_disable(); | 239 | local_irq_disable(); |
233 | mb(); | ||
234 | 240 | ||
235 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ | 241 | /* |
242 | * Flush the data out of the L1 cache for this CPU. This must be | ||
243 | * before the completion to ensure that data is safely written out | ||
244 | * before platform_cpu_kill() gets called - which may disable | ||
245 | * *this* CPU and power down its cache. | ||
246 | */ | ||
247 | flush_cache_louis(); | ||
248 | |||
249 | /* | ||
250 | * Tell __cpu_die() that this CPU is now safe to dispose of. Once | ||
251 | * this returns, power and/or clocks can be removed at any point | ||
252 | * from this CPU and its cache by platform_cpu_kill(). | ||
253 | */ | ||
236 | RCU_NONIDLE(complete(&cpu_died)); | 254 | RCU_NONIDLE(complete(&cpu_died)); |
237 | 255 | ||
238 | /* | 256 | /* |
239 | * actual CPU shutdown procedure is at least platform (if not | 257 | * Ensure that the cache lines associated with that completion are |
240 | * CPU) specific. | 258 | * written out. This covers the case where _this_ CPU is doing the |
259 | * powering down, to ensure that the completion is visible to the | ||
260 | * CPU waiting for this one. | ||
261 | */ | ||
262 | flush_cache_louis(); | ||
263 | |||
264 | /* | ||
265 | * The actual CPU shutdown procedure is at least platform (if not | ||
266 | * CPU) specific. This may remove power, or it may simply spin. | ||
267 | * | ||
268 | * Platforms are generally expected *NOT* to return from this call, | ||
269 | * although there are some which do because they have no way to | ||
270 | * power down the CPU. These platforms are the _only_ reason we | ||
271 | * have a return path which uses the fragment of assembly below. | ||
272 | * | ||
273 | * The return path should not be used for platforms which can | ||
274 | * power off the CPU. | ||
241 | */ | 275 | */ |
242 | if (smp_ops.cpu_die) | 276 | if (smp_ops.cpu_die) |
243 | smp_ops.cpu_die(cpu); | 277 | smp_ops.cpu_die(cpu); |
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 45eac87ed66a..5bc1a63284e3 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c | |||
@@ -41,7 +41,7 @@ void scu_enable(void __iomem *scu_base) | |||
41 | 41 | ||
42 | #ifdef CONFIG_ARM_ERRATA_764369 | 42 | #ifdef CONFIG_ARM_ERRATA_764369 |
43 | /* Cortex-A9 only */ | 43 | /* Cortex-A9 only */ |
44 | if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) { | 44 | if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) { |
45 | scu_ctrl = __raw_readl(scu_base + 0x30); | 45 | scu_ctrl = __raw_readl(scu_base + 0x30); |
46 | if (!(scu_ctrl & 1)) | 46 | if (!(scu_ctrl & 1)) |
47 | __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); | 47 | __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); |
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index e82e1d248772..9a52a07aa40e 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c | |||
@@ -98,21 +98,21 @@ static void broadcast_tlb_a15_erratum(void) | |||
98 | return; | 98 | return; |
99 | 99 | ||
100 | dummy_flush_tlb_a15_erratum(); | 100 | dummy_flush_tlb_a15_erratum(); |
101 | smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, | 101 | smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1); |
102 | NULL, 1); | ||
103 | } | 102 | } |
104 | 103 | ||
105 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | 104 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) |
106 | { | 105 | { |
107 | int cpu; | 106 | int cpu, this_cpu; |
108 | cpumask_t mask = { CPU_BITS_NONE }; | 107 | cpumask_t mask = { CPU_BITS_NONE }; |
109 | 108 | ||
110 | if (!erratum_a15_798181()) | 109 | if (!erratum_a15_798181()) |
111 | return; | 110 | return; |
112 | 111 | ||
113 | dummy_flush_tlb_a15_erratum(); | 112 | dummy_flush_tlb_a15_erratum(); |
113 | this_cpu = get_cpu(); | ||
114 | for_each_online_cpu(cpu) { | 114 | for_each_online_cpu(cpu) { |
115 | if (cpu == smp_processor_id()) | 115 | if (cpu == this_cpu) |
116 | continue; | 116 | continue; |
117 | /* | 117 | /* |
118 | * We only need to send an IPI if the other CPUs are running | 118 | * We only need to send an IPI if the other CPUs are running |
@@ -127,6 +127,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | |||
127 | cpumask_set_cpu(cpu, &mask); | 127 | cpumask_set_cpu(cpu, &mask); |
128 | } | 128 | } |
129 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); | 129 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); |
130 | put_cpu(); | ||
130 | } | 131 | } |
131 | 132 | ||
132 | void flush_tlb_all(void) | 133 | void flush_tlb_all(void) |
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index fc96ce6f2357..8dc5e76cb789 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile | |||
@@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) | |||
17 | kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | 17 | kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) |
18 | 18 | ||
19 | obj-y += kvm-arm.o init.o interrupts.o | 19 | obj-y += kvm-arm.o init.o interrupts.o |
20 | obj-y += arm.o guest.o mmu.o emulate.o reset.o | 20 | obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o |
21 | obj-y += coproc.o coproc_a15.o mmio.o psci.o | 21 | obj-y += coproc.o coproc_a15.o mmio.o psci.o |
22 | obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o | 22 | obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o |
23 | obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o | 23 | obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 842098d78f58..a0dfc2a53f91 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -30,11 +30,9 @@ | |||
30 | #define CREATE_TRACE_POINTS | 30 | #define CREATE_TRACE_POINTS |
31 | #include "trace.h" | 31 | #include "trace.h" |
32 | 32 | ||
33 | #include <asm/unified.h> | ||
34 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
35 | #include <asm/ptrace.h> | 34 | #include <asm/ptrace.h> |
36 | #include <asm/mman.h> | 35 | #include <asm/mman.h> |
37 | #include <asm/cputype.h> | ||
38 | #include <asm/tlbflush.h> | 36 | #include <asm/tlbflush.h> |
39 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
40 | #include <asm/virt.h> | 38 | #include <asm/virt.h> |
@@ -44,14 +42,13 @@ | |||
44 | #include <asm/kvm_emulate.h> | 42 | #include <asm/kvm_emulate.h> |
45 | #include <asm/kvm_coproc.h> | 43 | #include <asm/kvm_coproc.h> |
46 | #include <asm/kvm_psci.h> | 44 | #include <asm/kvm_psci.h> |
47 | #include <asm/opcodes.h> | ||
48 | 45 | ||
49 | #ifdef REQUIRES_VIRT | 46 | #ifdef REQUIRES_VIRT |
50 | __asm__(".arch_extension virt"); | 47 | __asm__(".arch_extension virt"); |
51 | #endif | 48 | #endif |
52 | 49 | ||
53 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); | 50 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); |
54 | static struct vfp_hard_struct __percpu *kvm_host_vfp_state; | 51 | static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state; |
55 | static unsigned long hyp_default_vectors; | 52 | static unsigned long hyp_default_vectors; |
56 | 53 | ||
57 | /* Per-CPU variable containing the currently running vcpu. */ | 54 | /* Per-CPU variable containing the currently running vcpu. */ |
@@ -304,22 +301,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
304 | return 0; | 301 | return 0; |
305 | } | 302 | } |
306 | 303 | ||
307 | int __attribute_const__ kvm_target_cpu(void) | ||
308 | { | ||
309 | unsigned long implementor = read_cpuid_implementor(); | ||
310 | unsigned long part_number = read_cpuid_part_number(); | ||
311 | |||
312 | if (implementor != ARM_CPU_IMP_ARM) | ||
313 | return -EINVAL; | ||
314 | |||
315 | switch (part_number) { | ||
316 | case ARM_CPU_PART_CORTEX_A15: | ||
317 | return KVM_ARM_TARGET_CORTEX_A15; | ||
318 | default: | ||
319 | return -EINVAL; | ||
320 | } | ||
321 | } | ||
322 | |||
323 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 304 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
324 | { | 305 | { |
325 | int ret; | 306 | int ret; |
@@ -482,163 +463,6 @@ static void update_vttbr(struct kvm *kvm) | |||
482 | spin_unlock(&kvm_vmid_lock); | 463 | spin_unlock(&kvm_vmid_lock); |
483 | } | 464 | } |
484 | 465 | ||
485 | static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
486 | { | ||
487 | /* SVC called from Hyp mode should never get here */ | ||
488 | kvm_debug("SVC called from Hyp mode shouldn't go here\n"); | ||
489 | BUG(); | ||
490 | return -EINVAL; /* Squash warning */ | ||
491 | } | ||
492 | |||
493 | static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
494 | { | ||
495 | trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), | ||
496 | vcpu->arch.hsr & HSR_HVC_IMM_MASK); | ||
497 | |||
498 | if (kvm_psci_call(vcpu)) | ||
499 | return 1; | ||
500 | |||
501 | kvm_inject_undefined(vcpu); | ||
502 | return 1; | ||
503 | } | ||
504 | |||
505 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
506 | { | ||
507 | if (kvm_psci_call(vcpu)) | ||
508 | return 1; | ||
509 | |||
510 | kvm_inject_undefined(vcpu); | ||
511 | return 1; | ||
512 | } | ||
513 | |||
514 | static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
515 | { | ||
516 | /* The hypervisor should never cause aborts */ | ||
517 | kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", | ||
518 | vcpu->arch.hxfar, vcpu->arch.hsr); | ||
519 | return -EFAULT; | ||
520 | } | ||
521 | |||
522 | static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
523 | { | ||
524 | /* This is either an error in the ws. code or an external abort */ | ||
525 | kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", | ||
526 | vcpu->arch.hxfar, vcpu->arch.hsr); | ||
527 | return -EFAULT; | ||
528 | } | ||
529 | |||
530 | typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); | ||
531 | static exit_handle_fn arm_exit_handlers[] = { | ||
532 | [HSR_EC_WFI] = kvm_handle_wfi, | ||
533 | [HSR_EC_CP15_32] = kvm_handle_cp15_32, | ||
534 | [HSR_EC_CP15_64] = kvm_handle_cp15_64, | ||
535 | [HSR_EC_CP14_MR] = kvm_handle_cp14_access, | ||
536 | [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, | ||
537 | [HSR_EC_CP14_64] = kvm_handle_cp14_access, | ||
538 | [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, | ||
539 | [HSR_EC_CP10_ID] = kvm_handle_cp10_id, | ||
540 | [HSR_EC_SVC_HYP] = handle_svc_hyp, | ||
541 | [HSR_EC_HVC] = handle_hvc, | ||
542 | [HSR_EC_SMC] = handle_smc, | ||
543 | [HSR_EC_IABT] = kvm_handle_guest_abort, | ||
544 | [HSR_EC_IABT_HYP] = handle_pabt_hyp, | ||
545 | [HSR_EC_DABT] = kvm_handle_guest_abort, | ||
546 | [HSR_EC_DABT_HYP] = handle_dabt_hyp, | ||
547 | }; | ||
548 | |||
549 | /* | ||
550 | * A conditional instruction is allowed to trap, even though it | ||
551 | * wouldn't be executed. So let's re-implement the hardware, in | ||
552 | * software! | ||
553 | */ | ||
554 | static bool kvm_condition_valid(struct kvm_vcpu *vcpu) | ||
555 | { | ||
556 | unsigned long cpsr, cond, insn; | ||
557 | |||
558 | /* | ||
559 | * Exception Code 0 can only happen if we set HCR.TGE to 1, to | ||
560 | * catch undefined instructions, and then we won't get past | ||
561 | * the arm_exit_handlers test anyway. | ||
562 | */ | ||
563 | BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0); | ||
564 | |||
565 | /* Top two bits non-zero? Unconditional. */ | ||
566 | if (vcpu->arch.hsr >> 30) | ||
567 | return true; | ||
568 | |||
569 | cpsr = *vcpu_cpsr(vcpu); | ||
570 | |||
571 | /* Is condition field valid? */ | ||
572 | if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT) | ||
573 | cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT; | ||
574 | else { | ||
575 | /* This can happen in Thumb mode: examine IT state. */ | ||
576 | unsigned long it; | ||
577 | |||
578 | it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); | ||
579 | |||
580 | /* it == 0 => unconditional. */ | ||
581 | if (it == 0) | ||
582 | return true; | ||
583 | |||
584 | /* The cond for this insn works out as the top 4 bits. */ | ||
585 | cond = (it >> 4); | ||
586 | } | ||
587 | |||
588 | /* Shift makes it look like an ARM-mode instruction */ | ||
589 | insn = cond << 28; | ||
590 | return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; | ||
591 | } | ||
592 | |||
593 | /* | ||
594 | * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on | ||
595 | * proper exit to QEMU. | ||
596 | */ | ||
597 | static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
598 | int exception_index) | ||
599 | { | ||
600 | unsigned long hsr_ec; | ||
601 | |||
602 | switch (exception_index) { | ||
603 | case ARM_EXCEPTION_IRQ: | ||
604 | return 1; | ||
605 | case ARM_EXCEPTION_UNDEFINED: | ||
606 | kvm_err("Undefined exception in Hyp mode at: %#08x\n", | ||
607 | vcpu->arch.hyp_pc); | ||
608 | BUG(); | ||
609 | panic("KVM: Hypervisor undefined exception!\n"); | ||
610 | case ARM_EXCEPTION_DATA_ABORT: | ||
611 | case ARM_EXCEPTION_PREF_ABORT: | ||
612 | case ARM_EXCEPTION_HVC: | ||
613 | hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; | ||
614 | |||
615 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) | ||
616 | || !arm_exit_handlers[hsr_ec]) { | ||
617 | kvm_err("Unknown exception class: %#08lx, " | ||
618 | "hsr: %#08x\n", hsr_ec, | ||
619 | (unsigned int)vcpu->arch.hsr); | ||
620 | BUG(); | ||
621 | } | ||
622 | |||
623 | /* | ||
624 | * See ARM ARM B1.14.1: "Hyp traps on instructions | ||
625 | * that fail their condition code check" | ||
626 | */ | ||
627 | if (!kvm_condition_valid(vcpu)) { | ||
628 | bool is_wide = vcpu->arch.hsr & HSR_IL; | ||
629 | kvm_skip_instr(vcpu, is_wide); | ||
630 | return 1; | ||
631 | } | ||
632 | |||
633 | return arm_exit_handlers[hsr_ec](vcpu, run); | ||
634 | default: | ||
635 | kvm_pr_unimpl("Unsupported exception type: %d", | ||
636 | exception_index); | ||
637 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
638 | return 0; | ||
639 | } | ||
640 | } | ||
641 | |||
642 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | 466 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) |
643 | { | 467 | { |
644 | if (likely(vcpu->arch.has_run_once)) | 468 | if (likely(vcpu->arch.has_run_once)) |
@@ -973,7 +797,6 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
973 | static void cpu_init_hyp_mode(void *vector) | 797 | static void cpu_init_hyp_mode(void *vector) |
974 | { | 798 | { |
975 | unsigned long long pgd_ptr; | 799 | unsigned long long pgd_ptr; |
976 | unsigned long pgd_low, pgd_high; | ||
977 | unsigned long hyp_stack_ptr; | 800 | unsigned long hyp_stack_ptr; |
978 | unsigned long stack_page; | 801 | unsigned long stack_page; |
979 | unsigned long vector_ptr; | 802 | unsigned long vector_ptr; |
@@ -982,20 +805,11 @@ static void cpu_init_hyp_mode(void *vector) | |||
982 | __hyp_set_vectors((unsigned long)vector); | 805 | __hyp_set_vectors((unsigned long)vector); |
983 | 806 | ||
984 | pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); | 807 | pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); |
985 | pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); | ||
986 | pgd_high = (pgd_ptr >> 32ULL); | ||
987 | stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); | 808 | stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); |
988 | hyp_stack_ptr = stack_page + PAGE_SIZE; | 809 | hyp_stack_ptr = stack_page + PAGE_SIZE; |
989 | vector_ptr = (unsigned long)__kvm_hyp_vector; | 810 | vector_ptr = (unsigned long)__kvm_hyp_vector; |
990 | 811 | ||
991 | /* | 812 | __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); |
992 | * Call initialization code, and switch to the full blown | ||
993 | * HYP code. The init code doesn't need to preserve these registers as | ||
994 | * r1-r3 and r12 are already callee save according to the AAPCS. | ||
995 | * Note that we slightly misuse the prototype by casing the pgd_low to | ||
996 | * a void *. | ||
997 | */ | ||
998 | kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); | ||
999 | } | 813 | } |
1000 | 814 | ||
1001 | /** | 815 | /** |
@@ -1078,7 +892,7 @@ static int init_hyp_mode(void) | |||
1078 | /* | 892 | /* |
1079 | * Map the host VFP structures | 893 | * Map the host VFP structures |
1080 | */ | 894 | */ |
1081 | kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); | 895 | kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t); |
1082 | if (!kvm_host_vfp_state) { | 896 | if (!kvm_host_vfp_state) { |
1083 | err = -ENOMEM; | 897 | err = -ENOMEM; |
1084 | kvm_err("Cannot allocate host VFP state\n"); | 898 | kvm_err("Cannot allocate host VFP state\n"); |
@@ -1086,7 +900,7 @@ static int init_hyp_mode(void) | |||
1086 | } | 900 | } |
1087 | 901 | ||
1088 | for_each_possible_cpu(cpu) { | 902 | for_each_possible_cpu(cpu) { |
1089 | struct vfp_hard_struct *vfp; | 903 | kvm_kernel_vfp_t *vfp; |
1090 | 904 | ||
1091 | vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); | 905 | vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); |
1092 | err = create_hyp_mappings(vfp, vfp + 1); | 906 | err = create_hyp_mappings(vfp, vfp + 1); |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 7bed7556077a..8eea97be1ed5 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -76,7 +76,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, | |||
76 | const struct coproc_params *p, | 76 | const struct coproc_params *p, |
77 | const struct coproc_reg *r) | 77 | const struct coproc_reg *r) |
78 | { | 78 | { |
79 | u32 val; | 79 | unsigned long val; |
80 | int cpu; | 80 | int cpu; |
81 | 81 | ||
82 | if (!p->is_write) | 82 | if (!p->is_write) |
@@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu, | |||
293 | 293 | ||
294 | if (likely(r->access(vcpu, params, r))) { | 294 | if (likely(r->access(vcpu, params, r))) { |
295 | /* Skip instruction, since it was emulated */ | 295 | /* Skip instruction, since it was emulated */ |
296 | kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); | 296 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
297 | return 1; | 297 | return 1; |
298 | } | 298 | } |
299 | /* If access function fails, it should complain. */ | 299 | /* If access function fails, it should complain. */ |
300 | } else { | 300 | } else { |
301 | kvm_err("Unsupported guest CP15 access at: %08x\n", | 301 | kvm_err("Unsupported guest CP15 access at: %08lx\n", |
302 | *vcpu_pc(vcpu)); | 302 | *vcpu_pc(vcpu)); |
303 | print_cp_instr(params); | 303 | print_cp_instr(params); |
304 | } | 304 | } |
@@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
315 | { | 315 | { |
316 | struct coproc_params params; | 316 | struct coproc_params params; |
317 | 317 | ||
318 | params.CRm = (vcpu->arch.hsr >> 1) & 0xf; | 318 | params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; |
319 | params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; | 319 | params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; |
320 | params.is_write = ((vcpu->arch.hsr & 1) == 0); | 320 | params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); |
321 | params.is_64bit = true; | 321 | params.is_64bit = true; |
322 | 322 | ||
323 | params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; | 323 | params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf; |
324 | params.Op2 = 0; | 324 | params.Op2 = 0; |
325 | params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; | 325 | params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; |
326 | params.CRn = 0; | 326 | params.CRn = 0; |
327 | 327 | ||
328 | return emulate_cp15(vcpu, ¶ms); | 328 | return emulate_cp15(vcpu, ¶ms); |
@@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
347 | { | 347 | { |
348 | struct coproc_params params; | 348 | struct coproc_params params; |
349 | 349 | ||
350 | params.CRm = (vcpu->arch.hsr >> 1) & 0xf; | 350 | params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; |
351 | params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; | 351 | params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; |
352 | params.is_write = ((vcpu->arch.hsr & 1) == 0); | 352 | params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); |
353 | params.is_64bit = false; | 353 | params.is_64bit = false; |
354 | 354 | ||
355 | params.CRn = (vcpu->arch.hsr >> 10) & 0xf; | 355 | params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; |
356 | params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; | 356 | params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7; |
357 | params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; | 357 | params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; |
358 | params.Rt2 = 0; | 358 | params.Rt2 = 0; |
359 | 359 | ||
360 | return emulate_cp15(vcpu, ¶ms); | 360 | return emulate_cp15(vcpu, ¶ms); |
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 992adfafa2ff..b7301d3e4799 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h | |||
@@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu, | |||
84 | static inline bool write_to_read_only(struct kvm_vcpu *vcpu, | 84 | static inline bool write_to_read_only(struct kvm_vcpu *vcpu, |
85 | const struct coproc_params *params) | 85 | const struct coproc_params *params) |
86 | { | 86 | { |
87 | kvm_debug("CP15 write to read-only register at: %08x\n", | 87 | kvm_debug("CP15 write to read-only register at: %08lx\n", |
88 | *vcpu_pc(vcpu)); | 88 | *vcpu_pc(vcpu)); |
89 | print_cp_instr(params); | 89 | print_cp_instr(params); |
90 | return false; | 90 | return false; |
@@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu, | |||
93 | static inline bool read_from_write_only(struct kvm_vcpu *vcpu, | 93 | static inline bool read_from_write_only(struct kvm_vcpu *vcpu, |
94 | const struct coproc_params *params) | 94 | const struct coproc_params *params) |
95 | { | 95 | { |
96 | kvm_debug("CP15 read to write-only register at: %08x\n", | 96 | kvm_debug("CP15 read to write-only register at: %08lx\n", |
97 | *vcpu_pc(vcpu)); | 97 | *vcpu_pc(vcpu)); |
98 | print_cp_instr(params); | 98 | print_cp_instr(params); |
99 | return false; | 99 | return false; |
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index d61450ac6665..bdede9e7da51 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | #include <asm/kvm_arm.h> | 21 | #include <asm/kvm_arm.h> |
22 | #include <asm/kvm_emulate.h> | 22 | #include <asm/kvm_emulate.h> |
23 | #include <asm/opcodes.h> | ||
23 | #include <trace/events/kvm.h> | 24 | #include <trace/events/kvm.h> |
24 | 25 | ||
25 | #include "trace.h" | 26 | #include "trace.h" |
@@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { | |||
109 | * Return a pointer to the register number valid in the current mode of | 110 | * Return a pointer to the register number valid in the current mode of |
110 | * the virtual CPU. | 111 | * the virtual CPU. |
111 | */ | 112 | */ |
112 | u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) | 113 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) |
113 | { | 114 | { |
114 | u32 *reg_array = (u32 *)&vcpu->arch.regs; | 115 | unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs; |
115 | u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; | 116 | unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; |
116 | 117 | ||
117 | switch (mode) { | 118 | switch (mode) { |
118 | case USR_MODE...SVC_MODE: | 119 | case USR_MODE...SVC_MODE: |
@@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) | |||
141 | /* | 142 | /* |
142 | * Return the SPSR for the current mode of the virtual CPU. | 143 | * Return the SPSR for the current mode of the virtual CPU. |
143 | */ | 144 | */ |
144 | u32 *vcpu_spsr(struct kvm_vcpu *vcpu) | 145 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) |
145 | { | 146 | { |
146 | u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; | 147 | unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; |
147 | switch (mode) { | 148 | switch (mode) { |
148 | case SVC_MODE: | 149 | case SVC_MODE: |
149 | return &vcpu->arch.regs.KVM_ARM_SVC_spsr; | 150 | return &vcpu->arch.regs.KVM_ARM_SVC_spsr; |
@@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu) | |||
160 | } | 161 | } |
161 | } | 162 | } |
162 | 163 | ||
163 | /** | 164 | /* |
164 | * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest | 165 | * A conditional instruction is allowed to trap, even though it |
165 | * @vcpu: the vcpu pointer | 166 | * wouldn't be executed. So let's re-implement the hardware, in |
166 | * @run: the kvm_run structure pointer | 167 | * software! |
167 | * | ||
168 | * Simply sets the wait_for_interrupts flag on the vcpu structure, which will | ||
169 | * halt execution of world-switches and schedule other host processes until | ||
170 | * there is an incoming IRQ or FIQ to the VM. | ||
171 | */ | 168 | */ |
172 | int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) | 169 | bool kvm_condition_valid(struct kvm_vcpu *vcpu) |
173 | { | 170 | { |
174 | trace_kvm_wfi(*vcpu_pc(vcpu)); | 171 | unsigned long cpsr, cond, insn; |
175 | kvm_vcpu_block(vcpu); | 172 | |
176 | return 1; | 173 | /* |
174 | * Exception Code 0 can only happen if we set HCR.TGE to 1, to | ||
175 | * catch undefined instructions, and then we won't get past | ||
176 | * the arm_exit_handlers test anyway. | ||
177 | */ | ||
178 | BUG_ON(!kvm_vcpu_trap_get_class(vcpu)); | ||
179 | |||
180 | /* Top two bits non-zero? Unconditional. */ | ||
181 | if (kvm_vcpu_get_hsr(vcpu) >> 30) | ||
182 | return true; | ||
183 | |||
184 | cpsr = *vcpu_cpsr(vcpu); | ||
185 | |||
186 | /* Is condition field valid? */ | ||
187 | if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT) | ||
188 | cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT; | ||
189 | else { | ||
190 | /* This can happen in Thumb mode: examine IT state. */ | ||
191 | unsigned long it; | ||
192 | |||
193 | it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); | ||
194 | |||
195 | /* it == 0 => unconditional. */ | ||
196 | if (it == 0) | ||
197 | return true; | ||
198 | |||
199 | /* The cond for this insn works out as the top 4 bits. */ | ||
200 | cond = (it >> 4); | ||
201 | } | ||
202 | |||
203 | /* Shift makes it look like an ARM-mode instruction */ | ||
204 | insn = cond << 28; | ||
205 | return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; | ||
177 | } | 206 | } |
178 | 207 | ||
179 | /** | 208 | /** |
@@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) | |||
257 | */ | 286 | */ |
258 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | 287 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
259 | { | 288 | { |
260 | u32 new_lr_value; | 289 | unsigned long new_lr_value; |
261 | u32 new_spsr_value; | 290 | unsigned long new_spsr_value; |
262 | u32 cpsr = *vcpu_cpsr(vcpu); | 291 | unsigned long cpsr = *vcpu_cpsr(vcpu); |
263 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; | 292 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; |
264 | bool is_thumb = (cpsr & PSR_T_BIT); | 293 | bool is_thumb = (cpsr & PSR_T_BIT); |
265 | u32 vect_offset = 4; | 294 | u32 vect_offset = 4; |
@@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) | |||
291 | */ | 320 | */ |
292 | static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) | 321 | static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) |
293 | { | 322 | { |
294 | u32 new_lr_value; | 323 | unsigned long new_lr_value; |
295 | u32 new_spsr_value; | 324 | unsigned long new_spsr_value; |
296 | u32 cpsr = *vcpu_cpsr(vcpu); | 325 | unsigned long cpsr = *vcpu_cpsr(vcpu); |
297 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; | 326 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; |
298 | bool is_thumb = (cpsr & PSR_T_BIT); | 327 | bool is_thumb = (cpsr & PSR_T_BIT); |
299 | u32 vect_offset; | 328 | u32 vect_offset; |
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 2339d9609d36..152d03612181 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <asm/cputype.h> | ||
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | #include <asm/kvm.h> | 27 | #include <asm/kvm.h> |
27 | #include <asm/kvm_asm.h> | 28 | #include <asm/kvm_asm.h> |
@@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
180 | return -EINVAL; | 181 | return -EINVAL; |
181 | } | 182 | } |
182 | 183 | ||
184 | int __attribute_const__ kvm_target_cpu(void) | ||
185 | { | ||
186 | unsigned long implementor = read_cpuid_implementor(); | ||
187 | unsigned long part_number = read_cpuid_part_number(); | ||
188 | |||
189 | if (implementor != ARM_CPU_IMP_ARM) | ||
190 | return -EINVAL; | ||
191 | |||
192 | switch (part_number) { | ||
193 | case ARM_CPU_PART_CORTEX_A15: | ||
194 | return KVM_ARM_TARGET_CORTEX_A15; | ||
195 | default: | ||
196 | return -EINVAL; | ||
197 | } | ||
198 | } | ||
199 | |||
183 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | 200 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, |
184 | const struct kvm_vcpu_init *init) | 201 | const struct kvm_vcpu_init *init) |
185 | { | 202 | { |
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c new file mode 100644 index 000000000000..3d74a0be47db --- /dev/null +++ b/arch/arm/kvm/handle_exit.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kvm.h> | ||
20 | #include <linux/kvm_host.h> | ||
21 | #include <asm/kvm_emulate.h> | ||
22 | #include <asm/kvm_coproc.h> | ||
23 | #include <asm/kvm_mmu.h> | ||
24 | #include <asm/kvm_psci.h> | ||
25 | #include <trace/events/kvm.h> | ||
26 | |||
27 | #include "trace.h" | ||
28 | |||
29 | #include "trace.h" | ||
30 | |||
31 | typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); | ||
32 | |||
33 | static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
34 | { | ||
35 | /* SVC called from Hyp mode should never get here */ | ||
36 | kvm_debug("SVC called from Hyp mode shouldn't go here\n"); | ||
37 | BUG(); | ||
38 | return -EINVAL; /* Squash warning */ | ||
39 | } | ||
40 | |||
41 | static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
42 | { | ||
43 | trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), | ||
44 | kvm_vcpu_hvc_get_imm(vcpu)); | ||
45 | |||
46 | if (kvm_psci_call(vcpu)) | ||
47 | return 1; | ||
48 | |||
49 | kvm_inject_undefined(vcpu); | ||
50 | return 1; | ||
51 | } | ||
52 | |||
53 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
54 | { | ||
55 | if (kvm_psci_call(vcpu)) | ||
56 | return 1; | ||
57 | |||
58 | kvm_inject_undefined(vcpu); | ||
59 | return 1; | ||
60 | } | ||
61 | |||
62 | static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
63 | { | ||
64 | /* The hypervisor should never cause aborts */ | ||
65 | kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", | ||
66 | kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); | ||
67 | return -EFAULT; | ||
68 | } | ||
69 | |||
70 | static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
71 | { | ||
72 | /* This is either an error in the ws. code or an external abort */ | ||
73 | kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", | ||
74 | kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); | ||
75 | return -EFAULT; | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest | ||
80 | * @vcpu: the vcpu pointer | ||
81 | * @run: the kvm_run structure pointer | ||
82 | * | ||
83 | * Simply sets the wait_for_interrupts flag on the vcpu structure, which will | ||
84 | * halt execution of world-switches and schedule other host processes until | ||
85 | * there is an incoming IRQ or FIQ to the VM. | ||
86 | */ | ||
87 | static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
88 | { | ||
89 | trace_kvm_wfi(*vcpu_pc(vcpu)); | ||
90 | kvm_vcpu_block(vcpu); | ||
91 | return 1; | ||
92 | } | ||
93 | |||
94 | static exit_handle_fn arm_exit_handlers[] = { | ||
95 | [HSR_EC_WFI] = kvm_handle_wfi, | ||
96 | [HSR_EC_CP15_32] = kvm_handle_cp15_32, | ||
97 | [HSR_EC_CP15_64] = kvm_handle_cp15_64, | ||
98 | [HSR_EC_CP14_MR] = kvm_handle_cp14_access, | ||
99 | [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, | ||
100 | [HSR_EC_CP14_64] = kvm_handle_cp14_access, | ||
101 | [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, | ||
102 | [HSR_EC_CP10_ID] = kvm_handle_cp10_id, | ||
103 | [HSR_EC_SVC_HYP] = handle_svc_hyp, | ||
104 | [HSR_EC_HVC] = handle_hvc, | ||
105 | [HSR_EC_SMC] = handle_smc, | ||
106 | [HSR_EC_IABT] = kvm_handle_guest_abort, | ||
107 | [HSR_EC_IABT_HYP] = handle_pabt_hyp, | ||
108 | [HSR_EC_DABT] = kvm_handle_guest_abort, | ||
109 | [HSR_EC_DABT_HYP] = handle_dabt_hyp, | ||
110 | }; | ||
111 | |||
112 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) | ||
113 | { | ||
114 | u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); | ||
115 | |||
116 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || | ||
117 | !arm_exit_handlers[hsr_ec]) { | ||
118 | kvm_err("Unknown exception class: hsr: %#08x\n", | ||
119 | (unsigned int)kvm_vcpu_get_hsr(vcpu)); | ||
120 | BUG(); | ||
121 | } | ||
122 | |||
123 | return arm_exit_handlers[hsr_ec]; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on | ||
128 | * proper exit to userspace. | ||
129 | */ | ||
130 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
131 | int exception_index) | ||
132 | { | ||
133 | exit_handle_fn exit_handler; | ||
134 | |||
135 | switch (exception_index) { | ||
136 | case ARM_EXCEPTION_IRQ: | ||
137 | return 1; | ||
138 | case ARM_EXCEPTION_UNDEFINED: | ||
139 | kvm_err("Undefined exception in Hyp mode at: %#08lx\n", | ||
140 | kvm_vcpu_get_hyp_pc(vcpu)); | ||
141 | BUG(); | ||
142 | panic("KVM: Hypervisor undefined exception!\n"); | ||
143 | case ARM_EXCEPTION_DATA_ABORT: | ||
144 | case ARM_EXCEPTION_PREF_ABORT: | ||
145 | case ARM_EXCEPTION_HVC: | ||
146 | /* | ||
147 | * See ARM ARM B1.14.1: "Hyp traps on instructions | ||
148 | * that fail their condition code check" | ||
149 | */ | ||
150 | if (!kvm_condition_valid(vcpu)) { | ||
151 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
152 | return 1; | ||
153 | } | ||
154 | |||
155 | exit_handler = kvm_get_exit_handler(vcpu); | ||
156 | |||
157 | return exit_handler(vcpu, run); | ||
158 | default: | ||
159 | kvm_pr_unimpl("Unsupported exception type: %d", | ||
160 | exception_index); | ||
161 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
162 | return 0; | ||
163 | } | ||
164 | } | ||
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 8ca87ab0919d..f7793df62f58 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S | |||
@@ -35,15 +35,18 @@ __kvm_hyp_code_start: | |||
35 | /******************************************************************** | 35 | /******************************************************************** |
36 | * Flush per-VMID TLBs | 36 | * Flush per-VMID TLBs |
37 | * | 37 | * |
38 | * void __kvm_tlb_flush_vmid(struct kvm *kvm); | 38 | * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
39 | * | 39 | * |
40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs | 40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs |
41 | * inside the inner-shareable domain (which is the case for all v7 | 41 | * inside the inner-shareable domain (which is the case for all v7 |
42 | * implementations). If we come across a non-IS SMP implementation, we'll | 42 | * implementations). If we come across a non-IS SMP implementation, we'll |
43 | * have to use an IPI based mechanism. Until then, we stick to the simple | 43 | * have to use an IPI based mechanism. Until then, we stick to the simple |
44 | * hardware assisted version. | 44 | * hardware assisted version. |
45 | * | ||
46 | * As v7 does not support flushing per IPA, just nuke the whole TLB | ||
47 | * instead, ignoring the ipa value. | ||
45 | */ | 48 | */ |
46 | ENTRY(__kvm_tlb_flush_vmid) | 49 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
47 | push {r2, r3} | 50 | push {r2, r3} |
48 | 51 | ||
49 | add r0, r0, #KVM_VTTBR | 52 | add r0, r0, #KVM_VTTBR |
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid) | |||
60 | 63 | ||
61 | pop {r2, r3} | 64 | pop {r2, r3} |
62 | bx lr | 65 | bx lr |
63 | ENDPROC(__kvm_tlb_flush_vmid) | 66 | ENDPROC(__kvm_tlb_flush_vmid_ipa) |
64 | 67 | ||
65 | /******************************************************************** | 68 | /******************************************************************** |
66 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable | 69 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable |
@@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp) | |||
235 | * instruction is issued since all traps are disabled when running the host | 238 | * instruction is issued since all traps are disabled when running the host |
236 | * kernel as per the Hyp-mode initialization at boot time. | 239 | * kernel as per the Hyp-mode initialization at boot time. |
237 | * | 240 | * |
238 | * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc | 241 | * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc |
239 | * below) when the HVC instruction is called from SVC mode (i.e. a guest or the | 242 | * below) when the HVC instruction is called from SVC mode (i.e. a guest or the |
240 | * host kernel) and they cause a trap to the vector page + offset 0xc when HVC | 243 | * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC |
241 | * instructions are called from within Hyp-mode. | 244 | * instructions are called from within Hyp-mode. |
242 | * | 245 | * |
243 | * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): | 246 | * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 98a870ff1a5c..72a12f2171b2 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -33,16 +33,16 @@ | |||
33 | */ | 33 | */ |
34 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | 34 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) |
35 | { | 35 | { |
36 | __u32 *dest; | 36 | unsigned long *dest; |
37 | unsigned int len; | 37 | unsigned int len; |
38 | int mask; | 38 | int mask; |
39 | 39 | ||
40 | if (!run->mmio.is_write) { | 40 | if (!run->mmio.is_write) { |
41 | dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); | 41 | dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); |
42 | memset(dest, 0, sizeof(int)); | 42 | *dest = 0; |
43 | 43 | ||
44 | len = run->mmio.len; | 44 | len = run->mmio.len; |
45 | if (len > 4) | 45 | if (len > sizeof(unsigned long)) |
46 | return -EINVAL; | 46 | return -EINVAL; |
47 | 47 | ||
48 | memcpy(dest, run->mmio.data, len); | 48 | memcpy(dest, run->mmio.data, len); |
@@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
50 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, | 50 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, |
51 | *((u64 *)run->mmio.data)); | 51 | *((u64 *)run->mmio.data)); |
52 | 52 | ||
53 | if (vcpu->arch.mmio_decode.sign_extend && len < 4) { | 53 | if (vcpu->arch.mmio_decode.sign_extend && |
54 | len < sizeof(unsigned long)) { | ||
54 | mask = 1U << ((len * 8) - 1); | 55 | mask = 1U << ((len * 8) - 1); |
55 | *dest = (*dest ^ mask) - mask; | 56 | *dest = (*dest ^ mask) - mask; |
56 | } | 57 | } |
@@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
65 | unsigned long rt, len; | 66 | unsigned long rt, len; |
66 | bool is_write, sign_extend; | 67 | bool is_write, sign_extend; |
67 | 68 | ||
68 | if ((vcpu->arch.hsr >> 8) & 1) { | 69 | if (kvm_vcpu_dabt_isextabt(vcpu)) { |
69 | /* cache operation on I/O addr, tell guest unsupported */ | 70 | /* cache operation on I/O addr, tell guest unsupported */ |
70 | kvm_inject_dabt(vcpu, vcpu->arch.hxfar); | 71 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
71 | return 1; | 72 | return 1; |
72 | } | 73 | } |
73 | 74 | ||
74 | if ((vcpu->arch.hsr >> 7) & 1) { | 75 | if (kvm_vcpu_dabt_iss1tw(vcpu)) { |
75 | /* page table accesses IO mem: tell guest to fix its TTBR */ | 76 | /* page table accesses IO mem: tell guest to fix its TTBR */ |
76 | kvm_inject_dabt(vcpu, vcpu->arch.hxfar); | 77 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
77 | return 1; | 78 | return 1; |
78 | } | 79 | } |
79 | 80 | ||
80 | switch ((vcpu->arch.hsr >> 22) & 0x3) { | 81 | len = kvm_vcpu_dabt_get_as(vcpu); |
81 | case 0: | 82 | if (unlikely(len < 0)) |
82 | len = 1; | 83 | return len; |
83 | break; | ||
84 | case 1: | ||
85 | len = 2; | ||
86 | break; | ||
87 | case 2: | ||
88 | len = 4; | ||
89 | break; | ||
90 | default: | ||
91 | kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); | ||
92 | return -EFAULT; | ||
93 | } | ||
94 | 84 | ||
95 | is_write = vcpu->arch.hsr & HSR_WNR; | 85 | is_write = kvm_vcpu_dabt_iswrite(vcpu); |
96 | sign_extend = vcpu->arch.hsr & HSR_SSE; | 86 | sign_extend = kvm_vcpu_dabt_issext(vcpu); |
97 | rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; | 87 | rt = kvm_vcpu_dabt_get_rd(vcpu); |
98 | 88 | ||
99 | if (kvm_vcpu_reg_is_pc(vcpu, rt)) { | 89 | if (kvm_vcpu_reg_is_pc(vcpu, rt)) { |
100 | /* IO memory trying to read/write pc */ | 90 | /* IO memory trying to read/write pc */ |
101 | kvm_inject_pabt(vcpu, vcpu->arch.hxfar); | 91 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
102 | return 1; | 92 | return 1; |
103 | } | 93 | } |
104 | 94 | ||
@@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
112 | * The MMIO instruction is emulated and should not be re-executed | 102 | * The MMIO instruction is emulated and should not be re-executed |
113 | * in the guest. | 103 | * in the guest. |
114 | */ | 104 | */ |
115 | kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); | 105 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
116 | return 0; | 106 | return 0; |
117 | } | 107 | } |
118 | 108 | ||
@@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
130 | * space do its magic. | 120 | * space do its magic. |
131 | */ | 121 | */ |
132 | 122 | ||
133 | if (vcpu->arch.hsr & HSR_ISV) { | 123 | if (kvm_vcpu_dabt_isvalid(vcpu)) { |
134 | ret = decode_hsr(vcpu, fault_ipa, &mmio); | 124 | ret = decode_hsr(vcpu, fault_ipa, &mmio); |
135 | if (ret) | 125 | if (ret) |
136 | return ret; | 126 | return ret; |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 99e07c7dd745..2f12e4056408 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <trace/events/kvm.h> | 22 | #include <trace/events/kvm.h> |
23 | #include <asm/idmap.h> | ||
24 | #include <asm/pgalloc.h> | 23 | #include <asm/pgalloc.h> |
25 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
26 | #include <asm/kvm_arm.h> | 25 | #include <asm/kvm_arm.h> |
@@ -28,8 +27,6 @@ | |||
28 | #include <asm/kvm_mmio.h> | 27 | #include <asm/kvm_mmio.h> |
29 | #include <asm/kvm_asm.h> | 28 | #include <asm/kvm_asm.h> |
30 | #include <asm/kvm_emulate.h> | 29 | #include <asm/kvm_emulate.h> |
31 | #include <asm/mach/map.h> | ||
32 | #include <trace/events/kvm.h> | ||
33 | 30 | ||
34 | #include "trace.h" | 31 | #include "trace.h" |
35 | 32 | ||
@@ -37,19 +34,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | |||
37 | 34 | ||
38 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); | 35 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
39 | 36 | ||
40 | static void kvm_tlb_flush_vmid(struct kvm *kvm) | 37 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
41 | { | 38 | { |
42 | kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); | 39 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); |
43 | } | ||
44 | |||
45 | static void kvm_set_pte(pte_t *pte, pte_t new_pte) | ||
46 | { | ||
47 | pte_val(*pte) = new_pte; | ||
48 | /* | ||
49 | * flush_pmd_entry just takes a void pointer and cleans the necessary | ||
50 | * cache entries, so we can reuse the function for ptes. | ||
51 | */ | ||
52 | flush_pmd_entry(pte); | ||
53 | } | 40 | } |
54 | 41 | ||
55 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | 42 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
@@ -98,33 +85,42 @@ static void free_ptes(pmd_t *pmd, unsigned long addr) | |||
98 | } | 85 | } |
99 | } | 86 | } |
100 | 87 | ||
88 | static void free_hyp_pgd_entry(unsigned long addr) | ||
89 | { | ||
90 | pgd_t *pgd; | ||
91 | pud_t *pud; | ||
92 | pmd_t *pmd; | ||
93 | unsigned long hyp_addr = KERN_TO_HYP(addr); | ||
94 | |||
95 | pgd = hyp_pgd + pgd_index(hyp_addr); | ||
96 | pud = pud_offset(pgd, hyp_addr); | ||
97 | |||
98 | if (pud_none(*pud)) | ||
99 | return; | ||
100 | BUG_ON(pud_bad(*pud)); | ||
101 | |||
102 | pmd = pmd_offset(pud, hyp_addr); | ||
103 | free_ptes(pmd, addr); | ||
104 | pmd_free(NULL, pmd); | ||
105 | pud_clear(pud); | ||
106 | } | ||
107 | |||
101 | /** | 108 | /** |
102 | * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables | 109 | * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables |
103 | * | 110 | * |
104 | * Assumes this is a page table used strictly in Hyp-mode and therefore contains | 111 | * Assumes this is a page table used strictly in Hyp-mode and therefore contains |
105 | * only mappings in the kernel memory area, which is above PAGE_OFFSET. | 112 | * either mappings in the kernel memory area (above PAGE_OFFSET), or |
113 | * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END). | ||
106 | */ | 114 | */ |
107 | void free_hyp_pmds(void) | 115 | void free_hyp_pmds(void) |
108 | { | 116 | { |
109 | pgd_t *pgd; | ||
110 | pud_t *pud; | ||
111 | pmd_t *pmd; | ||
112 | unsigned long addr; | 117 | unsigned long addr; |
113 | 118 | ||
114 | mutex_lock(&kvm_hyp_pgd_mutex); | 119 | mutex_lock(&kvm_hyp_pgd_mutex); |
115 | for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { | 120 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) |
116 | pgd = hyp_pgd + pgd_index(addr); | 121 | free_hyp_pgd_entry(addr); |
117 | pud = pud_offset(pgd, addr); | 122 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
118 | 123 | free_hyp_pgd_entry(addr); | |
119 | if (pud_none(*pud)) | ||
120 | continue; | ||
121 | BUG_ON(pud_bad(*pud)); | ||
122 | |||
123 | pmd = pmd_offset(pud, addr); | ||
124 | free_ptes(pmd, addr); | ||
125 | pmd_free(NULL, pmd); | ||
126 | pud_clear(pud); | ||
127 | } | ||
128 | mutex_unlock(&kvm_hyp_pgd_mutex); | 124 | mutex_unlock(&kvm_hyp_pgd_mutex); |
129 | } | 125 | } |
130 | 126 | ||
@@ -136,7 +132,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, | |||
136 | struct page *page; | 132 | struct page *page; |
137 | 133 | ||
138 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | 134 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { |
139 | pte = pte_offset_kernel(pmd, addr); | 135 | unsigned long hyp_addr = KERN_TO_HYP(addr); |
136 | |||
137 | pte = pte_offset_kernel(pmd, hyp_addr); | ||
140 | BUG_ON(!virt_addr_valid(addr)); | 138 | BUG_ON(!virt_addr_valid(addr)); |
141 | page = virt_to_page(addr); | 139 | page = virt_to_page(addr); |
142 | kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); | 140 | kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); |
@@ -151,7 +149,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, | |||
151 | unsigned long addr; | 149 | unsigned long addr; |
152 | 150 | ||
153 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | 151 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { |
154 | pte = pte_offset_kernel(pmd, addr); | 152 | unsigned long hyp_addr = KERN_TO_HYP(addr); |
153 | |||
154 | pte = pte_offset_kernel(pmd, hyp_addr); | ||
155 | BUG_ON(pfn_valid(*pfn_base)); | 155 | BUG_ON(pfn_valid(*pfn_base)); |
156 | kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); | 156 | kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); |
157 | (*pfn_base)++; | 157 | (*pfn_base)++; |
@@ -166,12 +166,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, | |||
166 | unsigned long addr, next; | 166 | unsigned long addr, next; |
167 | 167 | ||
168 | for (addr = start; addr < end; addr = next) { | 168 | for (addr = start; addr < end; addr = next) { |
169 | pmd = pmd_offset(pud, addr); | 169 | unsigned long hyp_addr = KERN_TO_HYP(addr); |
170 | pmd = pmd_offset(pud, hyp_addr); | ||
170 | 171 | ||
171 | BUG_ON(pmd_sect(*pmd)); | 172 | BUG_ON(pmd_sect(*pmd)); |
172 | 173 | ||
173 | if (pmd_none(*pmd)) { | 174 | if (pmd_none(*pmd)) { |
174 | pte = pte_alloc_one_kernel(NULL, addr); | 175 | pte = pte_alloc_one_kernel(NULL, hyp_addr); |
175 | if (!pte) { | 176 | if (!pte) { |
176 | kvm_err("Cannot allocate Hyp pte\n"); | 177 | kvm_err("Cannot allocate Hyp pte\n"); |
177 | return -ENOMEM; | 178 | return -ENOMEM; |
@@ -206,17 +207,23 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) | |||
206 | unsigned long addr, next; | 207 | unsigned long addr, next; |
207 | int err = 0; | 208 | int err = 0; |
208 | 209 | ||
209 | BUG_ON(start > end); | 210 | if (start >= end) |
210 | if (start < PAGE_OFFSET) | 211 | return -EINVAL; |
212 | /* Check for a valid kernel memory mapping */ | ||
213 | if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1))) | ||
214 | return -EINVAL; | ||
215 | /* Check for a valid kernel IO mapping */ | ||
216 | if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))) | ||
211 | return -EINVAL; | 217 | return -EINVAL; |
212 | 218 | ||
213 | mutex_lock(&kvm_hyp_pgd_mutex); | 219 | mutex_lock(&kvm_hyp_pgd_mutex); |
214 | for (addr = start; addr < end; addr = next) { | 220 | for (addr = start; addr < end; addr = next) { |
215 | pgd = hyp_pgd + pgd_index(addr); | 221 | unsigned long hyp_addr = KERN_TO_HYP(addr); |
216 | pud = pud_offset(pgd, addr); | 222 | pgd = hyp_pgd + pgd_index(hyp_addr); |
223 | pud = pud_offset(pgd, hyp_addr); | ||
217 | 224 | ||
218 | if (pud_none_or_clear_bad(pud)) { | 225 | if (pud_none_or_clear_bad(pud)) { |
219 | pmd = pmd_alloc_one(NULL, addr); | 226 | pmd = pmd_alloc_one(NULL, hyp_addr); |
220 | if (!pmd) { | 227 | if (!pmd) { |
221 | kvm_err("Cannot allocate Hyp pmd\n"); | 228 | kvm_err("Cannot allocate Hyp pmd\n"); |
222 | err = -ENOMEM; | 229 | err = -ENOMEM; |
@@ -236,12 +243,13 @@ out: | |||
236 | } | 243 | } |
237 | 244 | ||
238 | /** | 245 | /** |
239 | * create_hyp_mappings - map a kernel virtual address range in Hyp mode | 246 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
240 | * @from: The virtual kernel start address of the range | 247 | * @from: The virtual kernel start address of the range |
241 | * @to: The virtual kernel end address of the range (exclusive) | 248 | * @to: The virtual kernel end address of the range (exclusive) |
242 | * | 249 | * |
243 | * The same virtual address as the kernel virtual address is also used in | 250 | * The same virtual address as the kernel virtual address is also used |
244 | * Hyp-mode mapping to the same underlying physical pages. | 251 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying |
252 | * physical pages. | ||
245 | * | 253 | * |
246 | * Note: Wrapping around zero in the "to" address is not supported. | 254 | * Note: Wrapping around zero in the "to" address is not supported. |
247 | */ | 255 | */ |
@@ -251,10 +259,13 @@ int create_hyp_mappings(void *from, void *to) | |||
251 | } | 259 | } |
252 | 260 | ||
253 | /** | 261 | /** |
254 | * create_hyp_io_mappings - map a physical IO range in Hyp mode | 262 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode |
255 | * @from: The virtual HYP start address of the range | 263 | * @from: The kernel start VA of the range |
256 | * @to: The virtual HYP end address of the range (exclusive) | 264 | * @to: The kernel end VA of the range (exclusive) |
257 | * @addr: The physical start address which gets mapped | 265 | * @addr: The physical start address which gets mapped |
266 | * | ||
267 | * The resulting HYP VA is the same as the kernel VA, modulo | ||
268 | * HYP_PAGE_OFFSET. | ||
258 | */ | 269 | */ |
259 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) | 270 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) |
260 | { | 271 | { |
@@ -290,7 +301,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
290 | VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); | 301 | VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); |
291 | 302 | ||
292 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); | 303 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); |
293 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | 304 | kvm_clean_pgd(pgd); |
294 | kvm->arch.pgd = pgd; | 305 | kvm->arch.pgd = pgd; |
295 | 306 | ||
296 | return 0; | 307 | return 0; |
@@ -422,22 +433,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |||
422 | return 0; /* ignore calls from kvm_set_spte_hva */ | 433 | return 0; /* ignore calls from kvm_set_spte_hva */ |
423 | pmd = mmu_memory_cache_alloc(cache); | 434 | pmd = mmu_memory_cache_alloc(cache); |
424 | pud_populate(NULL, pud, pmd); | 435 | pud_populate(NULL, pud, pmd); |
425 | pmd += pmd_index(addr); | ||
426 | get_page(virt_to_page(pud)); | 436 | get_page(virt_to_page(pud)); |
427 | } else | 437 | } |
428 | pmd = pmd_offset(pud, addr); | 438 | |
439 | pmd = pmd_offset(pud, addr); | ||
429 | 440 | ||
430 | /* Create 2nd stage page table mapping - Level 2 */ | 441 | /* Create 2nd stage page table mapping - Level 2 */ |
431 | if (pmd_none(*pmd)) { | 442 | if (pmd_none(*pmd)) { |
432 | if (!cache) | 443 | if (!cache) |
433 | return 0; /* ignore calls from kvm_set_spte_hva */ | 444 | return 0; /* ignore calls from kvm_set_spte_hva */ |
434 | pte = mmu_memory_cache_alloc(cache); | 445 | pte = mmu_memory_cache_alloc(cache); |
435 | clean_pte_table(pte); | 446 | kvm_clean_pte(pte); |
436 | pmd_populate_kernel(NULL, pmd, pte); | 447 | pmd_populate_kernel(NULL, pmd, pte); |
437 | pte += pte_index(addr); | ||
438 | get_page(virt_to_page(pmd)); | 448 | get_page(virt_to_page(pmd)); |
439 | } else | 449 | } |
440 | pte = pte_offset_kernel(pmd, addr); | 450 | |
451 | pte = pte_offset_kernel(pmd, addr); | ||
441 | 452 | ||
442 | if (iomap && pte_present(*pte)) | 453 | if (iomap && pte_present(*pte)) |
443 | return -EFAULT; | 454 | return -EFAULT; |
@@ -446,7 +457,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |||
446 | old_pte = *pte; | 457 | old_pte = *pte; |
447 | kvm_set_pte(pte, *new_pte); | 458 | kvm_set_pte(pte, *new_pte); |
448 | if (pte_present(old_pte)) | 459 | if (pte_present(old_pte)) |
449 | kvm_tlb_flush_vmid(kvm); | 460 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
450 | else | 461 | else |
451 | get_page(virt_to_page(pte)); | 462 | get_page(virt_to_page(pte)); |
452 | 463 | ||
@@ -473,7 +484,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | |||
473 | pfn = __phys_to_pfn(pa); | 484 | pfn = __phys_to_pfn(pa); |
474 | 485 | ||
475 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { | 486 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { |
476 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); | 487 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); |
488 | kvm_set_s2pte_writable(&pte); | ||
477 | 489 | ||
478 | ret = mmu_topup_memory_cache(&cache, 2, 2); | 490 | ret = mmu_topup_memory_cache(&cache, 2, 2); |
479 | if (ret) | 491 | if (ret) |
@@ -492,29 +504,6 @@ out: | |||
492 | return ret; | 504 | return ret; |
493 | } | 505 | } |
494 | 506 | ||
495 | static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) | ||
496 | { | ||
497 | /* | ||
498 | * If we are going to insert an instruction page and the icache is | ||
499 | * either VIPT or PIPT, there is a potential problem where the host | ||
500 | * (or another VM) may have used the same page as this guest, and we | ||
501 | * read incorrect data from the icache. If we're using a PIPT cache, | ||
502 | * we can invalidate just that page, but if we are using a VIPT cache | ||
503 | * we need to invalidate the entire icache - damn shame - as written | ||
504 | * in the ARM ARM (DDI 0406C.b - Page B3-1393). | ||
505 | * | ||
506 | * VIVT caches are tagged using both the ASID and the VMID and doesn't | ||
507 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). | ||
508 | */ | ||
509 | if (icache_is_pipt()) { | ||
510 | unsigned long hva = gfn_to_hva(kvm, gfn); | ||
511 | __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); | ||
512 | } else if (!icache_is_vivt_asid_tagged()) { | ||
513 | /* any kind of VIPT cache */ | ||
514 | __flush_icache_all(); | ||
515 | } | ||
516 | } | ||
517 | |||
518 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 507 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
519 | gfn_t gfn, struct kvm_memory_slot *memslot, | 508 | gfn_t gfn, struct kvm_memory_slot *memslot, |
520 | unsigned long fault_status) | 509 | unsigned long fault_status) |
@@ -526,7 +515,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
526 | unsigned long mmu_seq; | 515 | unsigned long mmu_seq; |
527 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; | 516 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
528 | 517 | ||
529 | write_fault = kvm_is_write_fault(vcpu->arch.hsr); | 518 | write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); |
530 | if (fault_status == FSC_PERM && !write_fault) { | 519 | if (fault_status == FSC_PERM && !write_fault) { |
531 | kvm_err("Unexpected L2 read permission error\n"); | 520 | kvm_err("Unexpected L2 read permission error\n"); |
532 | return -EFAULT; | 521 | return -EFAULT; |
@@ -560,7 +549,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
560 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) | 549 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
561 | goto out_unlock; | 550 | goto out_unlock; |
562 | if (writable) { | 551 | if (writable) { |
563 | pte_val(new_pte) |= L_PTE_S2_RDWR; | 552 | kvm_set_s2pte_writable(&new_pte); |
564 | kvm_set_pfn_dirty(pfn); | 553 | kvm_set_pfn_dirty(pfn); |
565 | } | 554 | } |
566 | stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); | 555 | stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); |
@@ -585,7 +574,6 @@ out_unlock: | |||
585 | */ | 574 | */ |
586 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | 575 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) |
587 | { | 576 | { |
588 | unsigned long hsr_ec; | ||
589 | unsigned long fault_status; | 577 | unsigned long fault_status; |
590 | phys_addr_t fault_ipa; | 578 | phys_addr_t fault_ipa; |
591 | struct kvm_memory_slot *memslot; | 579 | struct kvm_memory_slot *memslot; |
@@ -593,18 +581,17 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
593 | gfn_t gfn; | 581 | gfn_t gfn; |
594 | int ret, idx; | 582 | int ret, idx; |
595 | 583 | ||
596 | hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; | 584 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
597 | is_iabt = (hsr_ec == HSR_EC_IABT); | 585 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
598 | fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8; | ||
599 | 586 | ||
600 | trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, | 587 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), |
601 | vcpu->arch.hxfar, fault_ipa); | 588 | kvm_vcpu_get_hfar(vcpu), fault_ipa); |
602 | 589 | ||
603 | /* Check the stage-2 fault is trans. fault or write fault */ | 590 | /* Check the stage-2 fault is trans. fault or write fault */ |
604 | fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); | 591 | fault_status = kvm_vcpu_trap_get_fault(vcpu); |
605 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { | 592 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { |
606 | kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", | 593 | kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n", |
607 | hsr_ec, fault_status); | 594 | kvm_vcpu_trap_get_class(vcpu), fault_status); |
608 | return -EFAULT; | 595 | return -EFAULT; |
609 | } | 596 | } |
610 | 597 | ||
@@ -614,7 +601,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
614 | if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { | 601 | if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { |
615 | if (is_iabt) { | 602 | if (is_iabt) { |
616 | /* Prefetch Abort on I/O address */ | 603 | /* Prefetch Abort on I/O address */ |
617 | kvm_inject_pabt(vcpu, vcpu->arch.hxfar); | 604 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
618 | ret = 1; | 605 | ret = 1; |
619 | goto out_unlock; | 606 | goto out_unlock; |
620 | } | 607 | } |
@@ -626,8 +613,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
626 | goto out_unlock; | 613 | goto out_unlock; |
627 | } | 614 | } |
628 | 615 | ||
629 | /* Adjust page offset */ | 616 | /* |
630 | fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; | 617 | * The IPA is reported as [MAX:12], so we need to |
618 | * complement it with the bottom 12 bits from the | ||
619 | * faulting VA. This is always 12 bits, irrespective | ||
620 | * of the page size. | ||
621 | */ | ||
622 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); | ||
631 | ret = io_mem_abort(vcpu, run, fault_ipa); | 623 | ret = io_mem_abort(vcpu, run, fault_ipa); |
632 | goto out_unlock; | 624 | goto out_unlock; |
633 | } | 625 | } |
@@ -682,7 +674,7 @@ static void handle_hva_to_gpa(struct kvm *kvm, | |||
682 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | 674 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) |
683 | { | 675 | { |
684 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | 676 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); |
685 | kvm_tlb_flush_vmid(kvm); | 677 | kvm_tlb_flush_vmid_ipa(kvm, gpa); |
686 | } | 678 | } |
687 | 679 | ||
688 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 680 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
@@ -776,7 +768,7 @@ void kvm_clear_hyp_idmap(void) | |||
776 | pmd = pmd_offset(pud, addr); | 768 | pmd = pmd_offset(pud, addr); |
777 | 769 | ||
778 | pud_clear(pud); | 770 | pud_clear(pud); |
779 | clean_pmd_entry(pmd); | 771 | kvm_clean_pmd_entry(pmd); |
780 | pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); | 772 | pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); |
781 | } while (pgd++, addr = next, addr < end); | 773 | } while (pgd++, addr = next, addr < end); |
782 | } | 774 | } |
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index 0e4cfe123b38..17c5ac7d10ed 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c | |||
@@ -1477,7 +1477,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | |||
1477 | if (addr & ~KVM_PHYS_MASK) | 1477 | if (addr & ~KVM_PHYS_MASK) |
1478 | return -E2BIG; | 1478 | return -E2BIG; |
1479 | 1479 | ||
1480 | if (addr & ~PAGE_MASK) | 1480 | if (addr & (SZ_4K - 1)) |
1481 | return -EINVAL; | 1481 | return -EINVAL; |
1482 | 1482 | ||
1483 | mutex_lock(&kvm->lock); | 1483 | mutex_lock(&kvm->lock); |
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c index c3f825b27947..af90cfa2f826 100644 --- a/arch/arm/mach-exynos/hotplug.c +++ b/arch/arm/mach-exynos/hotplug.c | |||
@@ -28,7 +28,6 @@ static inline void cpu_enter_lowpower_a9(void) | |||
28 | { | 28 | { |
29 | unsigned int v; | 29 | unsigned int v; |
30 | 30 | ||
31 | flush_cache_all(); | ||
32 | asm volatile( | 31 | asm volatile( |
33 | " mcr p15, 0, %1, c7, c5, 0\n" | 32 | " mcr p15, 0, %1, c7, c5, 0\n" |
34 | " mcr p15, 0, %1, c7, c10, 4\n" | 33 | " mcr p15, 0, %1, c7, c10, 4\n" |
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c index ab920e34bd0a..2517406e7f56 100644 --- a/arch/arm/mach-exynos/mach-nuri.c +++ b/arch/arm/mach-exynos/mach-nuri.c | |||
@@ -1252,7 +1252,7 @@ static void __init nuri_camera_init(void) | |||
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | m5mols_board_info.irq = s5p_register_gpio_interrupt(GPIO_CAM_8M_ISP_INT); | 1254 | m5mols_board_info.irq = s5p_register_gpio_interrupt(GPIO_CAM_8M_ISP_INT); |
1255 | if (!IS_ERR_VALUE(m5mols_board_info.irq)) | 1255 | if (m5mols_board_info.irq >= 0) |
1256 | s3c_gpio_cfgpin(GPIO_CAM_8M_ISP_INT, S3C_GPIO_SFN(0xF)); | 1256 | s3c_gpio_cfgpin(GPIO_CAM_8M_ISP_INT, S3C_GPIO_SFN(0xF)); |
1257 | else | 1257 | else |
1258 | pr_err("%s: Failed to configure 8M_ISP_INT GPIO\n", __func__); | 1258 | pr_err("%s: Failed to configure 8M_ISP_INT GPIO\n", __func__); |
diff --git a/arch/arm/mach-highbank/hotplug.c b/arch/arm/mach-highbank/hotplug.c index 890cae23c12a..a019e4e86e51 100644 --- a/arch/arm/mach-highbank/hotplug.c +++ b/arch/arm/mach-highbank/hotplug.c | |||
@@ -14,7 +14,6 @@ | |||
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | |||
18 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
19 | 18 | ||
20 | #include "core.h" | 19 | #include "core.h" |
diff --git a/arch/arm/mach-imx/devices/devices.c b/arch/arm/mach-imx/devices/devices.c index 1b37482407f9..1b4366a0e7c0 100644 --- a/arch/arm/mach-imx/devices/devices.c +++ b/arch/arm/mach-imx/devices/devices.c | |||
@@ -37,7 +37,7 @@ int __init mxc_device_init(void) | |||
37 | int ret; | 37 | int ret; |
38 | 38 | ||
39 | ret = device_register(&mxc_aips_bus); | 39 | ret = device_register(&mxc_aips_bus); |
40 | if (IS_ERR_VALUE(ret)) | 40 | if (ret < 0) |
41 | goto done; | 41 | goto done; |
42 | 42 | ||
43 | ret = device_register(&mxc_ahb_bus); | 43 | ret = device_register(&mxc_ahb_bus); |
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 361a253e2b63..5e91112dcbee 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c | |||
@@ -11,7 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <asm/cacheflush.h> | ||
15 | #include <asm/cp15.h> | 14 | #include <asm/cp15.h> |
16 | 15 | ||
17 | #include "common.h" | 16 | #include "common.h" |
@@ -20,7 +19,6 @@ static inline void cpu_enter_lowpower(void) | |||
20 | { | 19 | { |
21 | unsigned int v; | 20 | unsigned int v; |
22 | 21 | ||
23 | flush_cache_all(); | ||
24 | asm volatile( | 22 | asm volatile( |
25 | "mcr p15, 0, %1, c7, c5, 0\n" | 23 | "mcr p15, 0, %1, c7, c5, 0\n" |
26 | " mcr p15, 0, %1, c7, c10, 4\n" | 24 | " mcr p15, 0, %1, c7, c10, 4\n" |
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index ea961445e0e9..b23c8e4f28e8 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c | |||
@@ -536,16 +536,14 @@ static void __init ap_init_of(void) | |||
536 | 'A' + (ap_sc_id & 0x0f)); | 536 | 'A' + (ap_sc_id & 0x0f)); |
537 | 537 | ||
538 | soc_dev = soc_device_register(soc_dev_attr); | 538 | soc_dev = soc_device_register(soc_dev_attr); |
539 | if (IS_ERR_OR_NULL(soc_dev)) { | 539 | if (IS_ERR(soc_dev)) { |
540 | kfree(soc_dev_attr->revision); | 540 | kfree(soc_dev_attr->revision); |
541 | kfree(soc_dev_attr); | 541 | kfree(soc_dev_attr); |
542 | return; | 542 | return; |
543 | } | 543 | } |
544 | 544 | ||
545 | parent = soc_device_to_device(soc_dev); | 545 | parent = soc_device_to_device(soc_dev); |
546 | 546 | integrator_init_sysfs(parent, ap_sc_id); | |
547 | if (!IS_ERR_OR_NULL(parent)) | ||
548 | integrator_init_sysfs(parent, ap_sc_id); | ||
549 | 547 | ||
550 | of_platform_populate(root, of_default_bus_match_table, | 548 | of_platform_populate(root, of_default_bus_match_table, |
551 | ap_auxdata_lookup, parent); | 549 | ap_auxdata_lookup, parent); |
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c index 2b0db82a5381..da1091be0887 100644 --- a/arch/arm/mach-integrator/integrator_cp.c +++ b/arch/arm/mach-integrator/integrator_cp.c | |||
@@ -360,17 +360,14 @@ static void __init intcp_init_of(void) | |||
360 | 'A' + (intcp_sc_id & 0x0f)); | 360 | 'A' + (intcp_sc_id & 0x0f)); |
361 | 361 | ||
362 | soc_dev = soc_device_register(soc_dev_attr); | 362 | soc_dev = soc_device_register(soc_dev_attr); |
363 | if (IS_ERR_OR_NULL(soc_dev)) { | 363 | if (IS_ERR(soc_dev)) { |
364 | kfree(soc_dev_attr->revision); | 364 | kfree(soc_dev_attr->revision); |
365 | kfree(soc_dev_attr); | 365 | kfree(soc_dev_attr); |
366 | return; | 366 | return; |
367 | } | 367 | } |
368 | 368 | ||
369 | parent = soc_device_to_device(soc_dev); | 369 | parent = soc_device_to_device(soc_dev); |
370 | 370 | integrator_init_sysfs(parent, intcp_sc_id); | |
371 | if (!IS_ERR_OR_NULL(parent)) | ||
372 | integrator_init_sysfs(parent, intcp_sc_id); | ||
373 | |||
374 | of_platform_populate(root, of_default_bus_match_table, | 371 | of_platform_populate(root, of_default_bus_match_table, |
375 | intcp_auxdata_lookup, parent); | 372 | intcp_auxdata_lookup, parent); |
376 | } | 373 | } |
diff --git a/arch/arm/mach-msm/hotplug.c b/arch/arm/mach-msm/hotplug.c index 750446feb444..326a87261f9a 100644 --- a/arch/arm/mach-msm/hotplug.c +++ b/arch/arm/mach-msm/hotplug.c | |||
@@ -10,16 +10,12 @@ | |||
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
12 | 12 | ||
13 | #include <asm/cacheflush.h> | ||
14 | #include <asm/smp_plat.h> | 13 | #include <asm/smp_plat.h> |
15 | 14 | ||
16 | #include "common.h" | 15 | #include "common.h" |
17 | 16 | ||
18 | static inline void cpu_enter_lowpower(void) | 17 | static inline void cpu_enter_lowpower(void) |
19 | { | 18 | { |
20 | /* Just flush the cache. Changing the coherency is not yet | ||
21 | * available on msm. */ | ||
22 | flush_cache_all(); | ||
23 | } | 19 | } |
24 | 20 | ||
25 | static inline void cpu_leave_lowpower(void) | 21 | static inline void cpu_leave_lowpower(void) |
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 0ce91af753fa..fff141330a63 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c | |||
@@ -479,7 +479,7 @@ static int __init beagle_opp_init(void) | |||
479 | 479 | ||
480 | /* Initialize the omap3 opp table if not already created. */ | 480 | /* Initialize the omap3 opp table if not already created. */ |
481 | r = omap3_opp_init(); | 481 | r = omap3_opp_init(); |
482 | if (IS_ERR_VALUE(r) && (r != -EEXIST)) { | 482 | if (r < 0 && (r != -EEXIST)) { |
483 | pr_err("%s: opp default init failed\n", __func__); | 483 | pr_err("%s: opp default init failed\n", __func__); |
484 | return r; | 484 | return r; |
485 | } | 485 | } |
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index 8474c7d228ee..0c38ca96c840 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c | |||
@@ -611,7 +611,7 @@ int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name) | |||
611 | return -ENOENT; | 611 | return -ENOENT; |
612 | 612 | ||
613 | r = clk_set_rate(mpurate_ck, mpurate); | 613 | r = clk_set_rate(mpurate_ck, mpurate); |
614 | if (IS_ERR_VALUE(r)) { | 614 | if (r < 0) { |
615 | WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n", | 615 | WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n", |
616 | mpurate_ck_name, mpurate, r); | 616 | mpurate_ck_name, mpurate, r); |
617 | clk_put(mpurate_ck); | 617 | clk_put(mpurate_ck); |
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c index fadd87435cd0..0d75889c0a6f 100644 --- a/arch/arm/mach-omap2/gpmc-onenand.c +++ b/arch/arm/mach-omap2/gpmc-onenand.c | |||
@@ -303,7 +303,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base) | |||
303 | t = omap2_onenand_calc_async_timings(); | 303 | t = omap2_onenand_calc_async_timings(); |
304 | 304 | ||
305 | ret = gpmc_set_async_mode(gpmc_onenand_data->cs, &t); | 305 | ret = gpmc_set_async_mode(gpmc_onenand_data->cs, &t); |
306 | if (IS_ERR_VALUE(ret)) | 306 | if (ret < 0) |
307 | return ret; | 307 | return ret; |
308 | 308 | ||
309 | omap2_onenand_set_async_mode(onenand_base); | 309 | omap2_onenand_set_async_mode(onenand_base); |
@@ -325,7 +325,7 @@ static int omap2_onenand_setup_sync(void __iomem *onenand_base, int *freq_ptr) | |||
325 | t = omap2_onenand_calc_sync_timings(gpmc_onenand_data, freq); | 325 | t = omap2_onenand_calc_sync_timings(gpmc_onenand_data, freq); |
326 | 326 | ||
327 | ret = gpmc_set_sync_mode(gpmc_onenand_data->cs, &t); | 327 | ret = gpmc_set_sync_mode(gpmc_onenand_data->cs, &t); |
328 | if (IS_ERR_VALUE(ret)) | 328 | if (ret < 0) |
329 | return ret; | 329 | return ret; |
330 | 330 | ||
331 | set_onenand_cfg(onenand_base); | 331 | set_onenand_cfg(onenand_base); |
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 410e1bac7815..6de31739b45c 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c | |||
@@ -716,7 +716,7 @@ static int gpmc_setup_irq(void) | |||
716 | return -EINVAL; | 716 | return -EINVAL; |
717 | 717 | ||
718 | gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0); | 718 | gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0); |
719 | if (IS_ERR_VALUE(gpmc_irq_start)) { | 719 | if (gpmc_irq_start < 0) { |
720 | pr_err("irq_alloc_descs failed\n"); | 720 | pr_err("irq_alloc_descs failed\n"); |
721 | return gpmc_irq_start; | 721 | return gpmc_irq_start; |
722 | } | 722 | } |
@@ -801,7 +801,7 @@ static int gpmc_mem_init(void) | |||
801 | continue; | 801 | continue; |
802 | gpmc_cs_get_memconf(cs, &base, &size); | 802 | gpmc_cs_get_memconf(cs, &base, &size); |
803 | rc = gpmc_cs_insert_mem(cs, base, size); | 803 | rc = gpmc_cs_insert_mem(cs, base, size); |
804 | if (IS_ERR_VALUE(rc)) { | 804 | if (rc < 0) { |
805 | while (--cs >= 0) | 805 | while (--cs >= 0) |
806 | if (gpmc_cs_mem_enabled(cs)) | 806 | if (gpmc_cs_mem_enabled(cs)) |
807 | gpmc_cs_delete_mem(cs); | 807 | gpmc_cs_delete_mem(cs); |
@@ -1370,14 +1370,14 @@ static int gpmc_probe(struct platform_device *pdev) | |||
1370 | GPMC_REVISION_MINOR(l)); | 1370 | GPMC_REVISION_MINOR(l)); |
1371 | 1371 | ||
1372 | rc = gpmc_mem_init(); | 1372 | rc = gpmc_mem_init(); |
1373 | if (IS_ERR_VALUE(rc)) { | 1373 | if (rc < 0) { |
1374 | clk_disable_unprepare(gpmc_l3_clk); | 1374 | clk_disable_unprepare(gpmc_l3_clk); |
1375 | clk_put(gpmc_l3_clk); | 1375 | clk_put(gpmc_l3_clk); |
1376 | dev_err(gpmc_dev, "failed to reserve memory\n"); | 1376 | dev_err(gpmc_dev, "failed to reserve memory\n"); |
1377 | return rc; | 1377 | return rc; |
1378 | } | 1378 | } |
1379 | 1379 | ||
1380 | if (IS_ERR_VALUE(gpmc_setup_irq())) | 1380 | if (gpmc_setup_irq() < 0) |
1381 | dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); | 1381 | dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); |
1382 | 1382 | ||
1383 | /* Now the GPMC is initialised, unreserve the chip-selects */ | 1383 | /* Now the GPMC is initialised, unreserve the chip-selects */ |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 2fb17caa8683..0f4c18e6e60c 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
@@ -314,7 +314,7 @@ void __init omap3xxx_check_revision(void) | |||
314 | * If the processor type is Cortex-A8 and the revision is 0x0 | 314 | * If the processor type is Cortex-A8 and the revision is 0x0 |
315 | * it means its Cortex r0p0 which is 3430 ES1.0. | 315 | * it means its Cortex r0p0 which is 3430 ES1.0. |
316 | */ | 316 | */ |
317 | cpuid = read_cpuid(CPUID_ID); | 317 | cpuid = read_cpuid_id(); |
318 | if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) { | 318 | if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) { |
319 | omap_revision = OMAP3430_REV_ES1_0; | 319 | omap_revision = OMAP3430_REV_ES1_0; |
320 | cpu_rev = "1.0"; | 320 | cpu_rev = "1.0"; |
@@ -475,7 +475,7 @@ void __init omap4xxx_check_revision(void) | |||
475 | * Use ARM register to detect the correct ES version | 475 | * Use ARM register to detect the correct ES version |
476 | */ | 476 | */ |
477 | if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) { | 477 | if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) { |
478 | idcode = read_cpuid(CPUID_ID); | 478 | idcode = read_cpuid_id(); |
479 | rev = (idcode & 0xf) - 1; | 479 | rev = (idcode & 0xf) - 1; |
480 | } | 480 | } |
481 | 481 | ||
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 61174b78dee6..2a551f997aea 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c | |||
@@ -174,7 +174,7 @@ static void __init omap4_smp_init_cpus(void) | |||
174 | unsigned int i = 0, ncores = 1, cpu_id; | 174 | unsigned int i = 0, ncores = 1, cpu_id; |
175 | 175 | ||
176 | /* Use ARM cpuid check here, as SoC detection will not work so early */ | 176 | /* Use ARM cpuid check here, as SoC detection will not work so early */ |
177 | cpu_id = read_cpuid(CPUID_ID) & CPU_MASK; | 177 | cpu_id = read_cpuid_id() & CPU_MASK; |
178 | if (cpu_id == CPU_CORTEX_A9) { | 178 | if (cpu_id == CPU_CORTEX_A9) { |
179 | /* | 179 | /* |
180 | * Currently we can't call ioremap here because | 180 | * Currently we can't call ioremap here because |
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 381be7ac0c17..eeea4fa28fbc 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c | |||
@@ -131,7 +131,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev) | |||
131 | int oh_cnt, i, ret = 0; | 131 | int oh_cnt, i, ret = 0; |
132 | 132 | ||
133 | oh_cnt = of_property_count_strings(node, "ti,hwmods"); | 133 | oh_cnt = of_property_count_strings(node, "ti,hwmods"); |
134 | if (!oh_cnt || IS_ERR_VALUE(oh_cnt)) { | 134 | if (oh_cnt <= 0) { |
135 | dev_dbg(&pdev->dev, "No 'hwmods' to build omap_device\n"); | 135 | dev_dbg(&pdev->dev, "No 'hwmods' to build omap_device\n"); |
136 | return -ENODEV; | 136 | return -ENODEV; |
137 | } | 137 | } |
@@ -815,20 +815,17 @@ struct device *omap_device_get_by_hwmod_name(const char *oh_name) | |||
815 | } | 815 | } |
816 | 816 | ||
817 | oh = omap_hwmod_lookup(oh_name); | 817 | oh = omap_hwmod_lookup(oh_name); |
818 | if (IS_ERR_OR_NULL(oh)) { | 818 | if (!oh) { |
819 | WARN(1, "%s: no hwmod for %s\n", __func__, | 819 | WARN(1, "%s: no hwmod for %s\n", __func__, |
820 | oh_name); | 820 | oh_name); |
821 | return ERR_PTR(oh ? PTR_ERR(oh) : -ENODEV); | 821 | return ERR_PTR(-ENODEV); |
822 | } | 822 | } |
823 | if (IS_ERR_OR_NULL(oh->od)) { | 823 | if (!oh->od) { |
824 | WARN(1, "%s: no omap_device for %s\n", __func__, | 824 | WARN(1, "%s: no omap_device for %s\n", __func__, |
825 | oh_name); | 825 | oh_name); |
826 | return ERR_PTR(oh->od ? PTR_ERR(oh->od) : -ENODEV); | 826 | return ERR_PTR(-ENODEV); |
827 | } | 827 | } |
828 | 828 | ||
829 | if (IS_ERR_OR_NULL(oh->od->pdev)) | ||
830 | return ERR_PTR(oh->od->pdev ? PTR_ERR(oh->od->pdev) : -ENODEV); | ||
831 | |||
832 | return &oh->od->pdev->dev; | 829 | return &oh->od->pdev->dev; |
833 | } | 830 | } |
834 | 831 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 9553c9907d40..93f213b6a784 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -1663,7 +1663,7 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name) | |||
1663 | return -ENOSYS; | 1663 | return -ENOSYS; |
1664 | 1664 | ||
1665 | ret = _lookup_hardreset(oh, name, &ohri); | 1665 | ret = _lookup_hardreset(oh, name, &ohri); |
1666 | if (IS_ERR_VALUE(ret)) | 1666 | if (ret < 0) |
1667 | return ret; | 1667 | return ret; |
1668 | 1668 | ||
1669 | if (oh->clkdm) { | 1669 | if (oh->clkdm) { |
@@ -2413,7 +2413,7 @@ static int __init _init(struct omap_hwmod *oh, void *data) | |||
2413 | _init_mpu_rt_base(oh, NULL); | 2413 | _init_mpu_rt_base(oh, NULL); |
2414 | 2414 | ||
2415 | r = _init_clocks(oh, NULL); | 2415 | r = _init_clocks(oh, NULL); |
2416 | if (IS_ERR_VALUE(r)) { | 2416 | if (r < 0) { |
2417 | WARN(1, "omap_hwmod: %s: couldn't init clocks\n", oh->name); | 2417 | WARN(1, "omap_hwmod: %s: couldn't init clocks\n", oh->name); |
2418 | return -EINVAL; | 2418 | return -EINVAL; |
2419 | } | 2419 | } |
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 1edd000a8143..0b339861d751 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c | |||
@@ -217,7 +217,7 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *dir) | |||
217 | return 0; | 217 | return 0; |
218 | 218 | ||
219 | d = debugfs_create_dir(pwrdm->name, (struct dentry *)dir); | 219 | d = debugfs_create_dir(pwrdm->name, (struct dentry *)dir); |
220 | if (!(IS_ERR_OR_NULL(d))) | 220 | if (d) |
221 | (void) debugfs_create_file("suspend", S_IRUGO|S_IWUSR, d, | 221 | (void) debugfs_create_file("suspend", S_IRUGO|S_IWUSR, d, |
222 | (void *)pwrdm, &pwrdm_suspend_fops); | 222 | (void *)pwrdm, &pwrdm_suspend_fops); |
223 | 223 | ||
@@ -261,8 +261,8 @@ static int __init pm_dbg_init(void) | |||
261 | return 0; | 261 | return 0; |
262 | 262 | ||
263 | d = debugfs_create_dir("pm_debug", NULL); | 263 | d = debugfs_create_dir("pm_debug", NULL); |
264 | if (IS_ERR_OR_NULL(d)) | 264 | if (!d) |
265 | return PTR_ERR(d); | 265 | return -EINVAL; |
266 | 266 | ||
267 | (void) debugfs_create_file("count", S_IRUGO, | 267 | (void) debugfs_create_file("count", S_IRUGO, |
268 | d, (void *)DEBUG_FILE_COUNTERS, &debug_fops); | 268 | d, (void *)DEBUG_FILE_COUNTERS, &debug_fops); |
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 89cad4a605dd..86babd740d41 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c | |||
@@ -1180,7 +1180,7 @@ bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm) | |||
1180 | { | 1180 | { |
1181 | int i; | 1181 | int i; |
1182 | 1182 | ||
1183 | if (IS_ERR_OR_NULL(pwrdm)) { | 1183 | if (!pwrdm) { |
1184 | pr_debug("powerdomain: %s: invalid powerdomain pointer\n", | 1184 | pr_debug("powerdomain: %s: invalid powerdomain pointer\n", |
1185 | __func__); | 1185 | __func__); |
1186 | return 1; | 1186 | return 1; |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index ea6ea9aab092..63e6384fa72e 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -288,7 +288,7 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, | |||
288 | r = -EINVAL; | 288 | r = -EINVAL; |
289 | } else { | 289 | } else { |
290 | r = clk_set_parent(timer->fclk, src); | 290 | r = clk_set_parent(timer->fclk, src); |
291 | if (IS_ERR_VALUE(r)) | 291 | if (r < 0) |
292 | pr_warn("%s: %s cannot set source\n", | 292 | pr_warn("%s: %s cannot set source\n", |
293 | __func__, oh->name); | 293 | __func__, oh->name); |
294 | clk_put(src); | 294 | clk_put(src); |
diff --git a/arch/arm/mach-prima2/hotplug.c b/arch/arm/mach-prima2/hotplug.c index f4b17cbababd..0ab2f8bae28e 100644 --- a/arch/arm/mach-prima2/hotplug.c +++ b/arch/arm/mach-prima2/hotplug.c | |||
@@ -10,13 +10,10 @@ | |||
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
12 | 12 | ||
13 | #include <asm/cacheflush.h> | ||
14 | #include <asm/smp_plat.h> | 13 | #include <asm/smp_plat.h> |
15 | 14 | ||
16 | static inline void platform_do_lowpower(unsigned int cpu) | 15 | static inline void platform_do_lowpower(unsigned int cpu) |
17 | { | 16 | { |
18 | flush_cache_all(); | ||
19 | |||
20 | /* we put the platform to just WFI */ | 17 | /* we put the platform to just WFI */ |
21 | for (;;) { | 18 | for (;;) { |
22 | __asm__ __volatile__("dsb\n\t" "wfi\n\t" | 19 | __asm__ __volatile__("dsb\n\t" "wfi\n\t" |
diff --git a/arch/arm/mach-realview/hotplug.c b/arch/arm/mach-realview/hotplug.c index 53818e5cd3ad..ac22dd41b135 100644 --- a/arch/arm/mach-realview/hotplug.c +++ b/arch/arm/mach-realview/hotplug.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | 14 | ||
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/cp15.h> | 15 | #include <asm/cp15.h> |
17 | #include <asm/smp_plat.h> | 16 | #include <asm/smp_plat.h> |
18 | 17 | ||
@@ -20,7 +19,6 @@ static inline void cpu_enter_lowpower(void) | |||
20 | { | 19 | { |
21 | unsigned int v; | 20 | unsigned int v; |
22 | 21 | ||
23 | flush_cache_all(); | ||
24 | asm volatile( | 22 | asm volatile( |
25 | " mcr p15, 0, %1, c7, c5, 0\n" | 23 | " mcr p15, 0, %1, c7, c5, 0\n" |
26 | " mcr p15, 0, %1, c7, c10, 4\n" | 24 | " mcr p15, 0, %1, c7, c10, 4\n" |
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c index bf79626ee5a4..496592b6c763 100644 --- a/arch/arm/mach-shmobile/smp-sh73a0.c +++ b/arch/arm/mach-shmobile/smp-sh73a0.c | |||
@@ -104,14 +104,6 @@ static int sh73a0_cpu_kill(unsigned int cpu) | |||
104 | 104 | ||
105 | static void sh73a0_cpu_die(unsigned int cpu) | 105 | static void sh73a0_cpu_die(unsigned int cpu) |
106 | { | 106 | { |
107 | /* | ||
108 | * The ARM MPcore does not issue a cache coherency request for the L1 | ||
109 | * cache when powering off single CPUs. We must take care of this and | ||
110 | * further caches. | ||
111 | */ | ||
112 | dsb(); | ||
113 | flush_cache_all(); | ||
114 | |||
115 | /* Set power off mode. This takes the CPU out of the MP cluster */ | 107 | /* Set power off mode. This takes the CPU out of the MP cluster */ |
116 | scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF); | 108 | scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF); |
117 | 109 | ||
diff --git a/arch/arm/mach-spear/hotplug.c b/arch/arm/mach-spear/hotplug.c index a7d2dd11a4f2..d97749c642ce 100644 --- a/arch/arm/mach-spear/hotplug.c +++ b/arch/arm/mach-spear/hotplug.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/cp15.h> | 16 | #include <asm/cp15.h> |
18 | #include <asm/smp_plat.h> | 17 | #include <asm/smp_plat.h> |
19 | 18 | ||
@@ -21,7 +20,6 @@ static inline void cpu_enter_lowpower(void) | |||
21 | { | 20 | { |
22 | unsigned int v; | 21 | unsigned int v; |
23 | 22 | ||
24 | flush_cache_all(); | ||
25 | asm volatile( | 23 | asm volatile( |
26 | " mcr p15, 0, %1, c7, c5, 0\n" | 24 | " mcr p15, 0, %1, c7, c5, 0\n" |
27 | " dsb\n" | 25 | " dsb\n" |
diff --git a/arch/arm/mach-tegra/board-harmony-pcie.c b/arch/arm/mach-tegra/board-harmony-pcie.c index d195db09ea32..035b240b9e15 100644 --- a/arch/arm/mach-tegra/board-harmony-pcie.c +++ b/arch/arm/mach-tegra/board-harmony-pcie.c | |||
@@ -56,9 +56,9 @@ int __init harmony_pcie_init(void) | |||
56 | gpio_direction_output(en_vdd_1v05, 1); | 56 | gpio_direction_output(en_vdd_1v05, 1); |
57 | 57 | ||
58 | regulator = regulator_get(NULL, "vdd_ldo0,vddio_pex_clk"); | 58 | regulator = regulator_get(NULL, "vdd_ldo0,vddio_pex_clk"); |
59 | if (IS_ERR_OR_NULL(regulator)) { | 59 | if (IS_ERR(regulator)) { |
60 | pr_err("%s: regulator_get failed: %d\n", __func__, | 60 | err = PTR_ERR(regulator); |
61 | (int)PTR_ERR(regulator)); | 61 | pr_err("%s: regulator_get failed: %d\n", __func__, err); |
62 | goto err_reg; | 62 | goto err_reg; |
63 | } | 63 | } |
64 | 64 | ||
diff --git a/arch/arm/mach-tegra/common.h b/arch/arm/mach-tegra/common.h index 32f8eb3fe344..5900cc44f780 100644 --- a/arch/arm/mach-tegra/common.h +++ b/arch/arm/mach-tegra/common.h | |||
@@ -2,4 +2,3 @@ extern struct smp_operations tegra_smp_ops; | |||
2 | 2 | ||
3 | extern int tegra_cpu_kill(unsigned int cpu); | 3 | extern int tegra_cpu_kill(unsigned int cpu); |
4 | extern void tegra_cpu_die(unsigned int cpu); | 4 | extern void tegra_cpu_die(unsigned int cpu); |
5 | extern int tegra_cpu_disable(unsigned int cpu); | ||
diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c index 8da9f78475da..184914a68d73 100644 --- a/arch/arm/mach-tegra/hotplug.c +++ b/arch/arm/mach-tegra/hotplug.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
12 | #include <linux/clk/tegra.h> | 12 | #include <linux/clk/tegra.h> |
13 | 13 | ||
14 | #include <asm/cacheflush.h> | ||
15 | #include <asm/smp_plat.h> | 14 | #include <asm/smp_plat.h> |
16 | 15 | ||
17 | #include "fuse.h" | 16 | #include "fuse.h" |
@@ -47,15 +46,6 @@ void __ref tegra_cpu_die(unsigned int cpu) | |||
47 | BUG(); | 46 | BUG(); |
48 | } | 47 | } |
49 | 48 | ||
50 | int tegra_cpu_disable(unsigned int cpu) | ||
51 | { | ||
52 | /* | ||
53 | * we don't allow CPU 0 to be shutdown (it is still too special | ||
54 | * e.g. clock tick interrupts) | ||
55 | */ | ||
56 | return cpu == 0 ? -EPERM : 0; | ||
57 | } | ||
58 | |||
59 | void __init tegra_hotplug_init(void) | 49 | void __init tegra_hotplug_init(void) |
60 | { | 50 | { |
61 | if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) | 51 | if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) |
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c index 0c4963bd4b44..fad4226ef710 100644 --- a/arch/arm/mach-tegra/platsmp.c +++ b/arch/arm/mach-tegra/platsmp.c | |||
@@ -173,6 +173,5 @@ struct smp_operations tegra_smp_ops __initdata = { | |||
173 | #ifdef CONFIG_HOTPLUG_CPU | 173 | #ifdef CONFIG_HOTPLUG_CPU |
174 | .cpu_kill = tegra_cpu_kill, | 174 | .cpu_kill = tegra_cpu_kill, |
175 | .cpu_die = tegra_cpu_die, | 175 | .cpu_die = tegra_cpu_die, |
176 | .cpu_disable = tegra_cpu_disable, | ||
177 | #endif | 176 | #endif |
178 | }; | 177 | }; |
diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c index ce7ce42a1ac9..9e8bdfa2b369 100644 --- a/arch/arm/mach-tegra/tegra2_emc.c +++ b/arch/arm/mach-tegra/tegra2_emc.c | |||
@@ -276,7 +276,7 @@ static struct tegra_emc_pdata *tegra_emc_fill_pdata(struct platform_device *pdev | |||
276 | int i; | 276 | int i; |
277 | 277 | ||
278 | WARN_ON(pdev->dev.platform_data); | 278 | WARN_ON(pdev->dev.platform_data); |
279 | BUG_ON(IS_ERR_OR_NULL(c)); | 279 | BUG_ON(IS_ERR(c)); |
280 | 280 | ||
281 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 281 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
282 | pdata->tables = devm_kzalloc(&pdev->dev, sizeof(*pdata->tables), | 282 | pdata->tables = devm_kzalloc(&pdev->dev, sizeof(*pdata->tables), |
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c index 915e2636cbaa..b6145ea51641 100644 --- a/arch/arm/mach-ux500/cpu.c +++ b/arch/arm/mach-ux500/cpu.c | |||
@@ -149,14 +149,13 @@ struct device * __init ux500_soc_device_init(const char *soc_id) | |||
149 | soc_info_populate(soc_dev_attr, soc_id); | 149 | soc_info_populate(soc_dev_attr, soc_id); |
150 | 150 | ||
151 | soc_dev = soc_device_register(soc_dev_attr); | 151 | soc_dev = soc_device_register(soc_dev_attr); |
152 | if (IS_ERR_OR_NULL(soc_dev)) { | 152 | if (IS_ERR(soc_dev)) { |
153 | kfree(soc_dev_attr); | 153 | kfree(soc_dev_attr); |
154 | return NULL; | 154 | return NULL; |
155 | } | 155 | } |
156 | 156 | ||
157 | parent = soc_device_to_device(soc_dev); | 157 | parent = soc_device_to_device(soc_dev); |
158 | if (!IS_ERR_OR_NULL(parent)) | 158 | device_create_file(parent, &ux500_soc_attr); |
159 | device_create_file(parent, &ux500_soc_attr); | ||
160 | 159 | ||
161 | return parent; | 160 | return parent; |
162 | } | 161 | } |
diff --git a/arch/arm/mach-ux500/hotplug.c b/arch/arm/mach-ux500/hotplug.c index 87abcf278432..2bc00b085e38 100644 --- a/arch/arm/mach-ux500/hotplug.c +++ b/arch/arm/mach-ux500/hotplug.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | 14 | ||
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/smp_plat.h> | 15 | #include <asm/smp_plat.h> |
17 | 16 | ||
18 | #include "setup.h" | 17 | #include "setup.h" |
@@ -24,8 +23,6 @@ | |||
24 | */ | 23 | */ |
25 | void __ref ux500_cpu_die(unsigned int cpu) | 24 | void __ref ux500_cpu_die(unsigned int cpu) |
26 | { | 25 | { |
27 | flush_cache_all(); | ||
28 | |||
29 | /* directly enter low power state, skipping secure registers */ | 26 | /* directly enter low power state, skipping secure registers */ |
30 | for (;;) { | 27 | for (;;) { |
31 | __asm__ __volatile__("dsb\n\t" "wfi\n\t" | 28 | __asm__ __volatile__("dsb\n\t" "wfi\n\t" |
diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c index a141b98d84fe..f0ce6b8f5e71 100644 --- a/arch/arm/mach-vexpress/hotplug.c +++ b/arch/arm/mach-vexpress/hotplug.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | 14 | ||
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/smp_plat.h> | 15 | #include <asm/smp_plat.h> |
17 | #include <asm/cp15.h> | 16 | #include <asm/cp15.h> |
18 | 17 | ||
@@ -20,7 +19,6 @@ static inline void cpu_enter_lowpower(void) | |||
20 | { | 19 | { |
21 | unsigned int v; | 20 | unsigned int v; |
22 | 21 | ||
23 | flush_cache_all(); | ||
24 | asm volatile( | 22 | asm volatile( |
25 | "mcr p15, 0, %1, c7, c5, 0\n" | 23 | "mcr p15, 0, %1, c7, c5, 0\n" |
26 | " mcr p15, 0, %1, c7, c10, 4\n" | 24 | " mcr p15, 0, %1, c7, c10, 4\n" |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 4045c4931a30..35955b54944c 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -397,6 +397,13 @@ config CPU_V7 | |||
397 | select CPU_PABRT_V7 | 397 | select CPU_PABRT_V7 |
398 | select CPU_TLB_V7 if MMU | 398 | select CPU_TLB_V7 if MMU |
399 | 399 | ||
400 | config CPU_THUMBONLY | ||
401 | bool | ||
402 | # There are no CPUs available with MMU that don't implement an ARM ISA: | ||
403 | depends on !MMU | ||
404 | help | ||
405 | Select this if your CPU doesn't support the 32 bit ARM instructions. | ||
406 | |||
400 | # Figure out what processor architecture version we should be using. | 407 | # Figure out what processor architecture version we should be using. |
401 | # This defines the compiler instruction set which depends on the machine type. | 408 | # This defines the compiler instruction set which depends on the machine type. |
402 | config CPU_32v3 | 409 | config CPU_32v3 |
@@ -605,7 +612,7 @@ config ARCH_DMA_ADDR_T_64BIT | |||
605 | bool | 612 | bool |
606 | 613 | ||
607 | config ARM_THUMB | 614 | config ARM_THUMB |
608 | bool "Support Thumb user binaries" | 615 | bool "Support Thumb user binaries" if !CPU_THUMBONLY |
609 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON | 616 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON |
610 | default y | 617 | default y |
611 | help | 618 | help |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index db26e2e543f4..6f4585b89078 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -961,12 +961,14 @@ static int __init alignment_init(void) | |||
961 | return -ENOMEM; | 961 | return -ENOMEM; |
962 | #endif | 962 | #endif |
963 | 963 | ||
964 | #ifdef CONFIG_CPU_CP15 | ||
964 | if (cpu_is_v6_unaligned()) { | 965 | if (cpu_is_v6_unaligned()) { |
965 | cr_alignment &= ~CR_A; | 966 | cr_alignment &= ~CR_A; |
966 | cr_no_alignment &= ~CR_A; | 967 | cr_no_alignment &= ~CR_A; |
967 | set_cr(cr_alignment); | 968 | set_cr(cr_alignment); |
968 | ai_usermode = safe_usermode(ai_usermode, false); | 969 | ai_usermode = safe_usermode(ai_usermode, false); |
969 | } | 970 | } |
971 | #endif | ||
970 | 972 | ||
971 | hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, | 973 | hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, |
972 | "alignment exception"); | 974 | "alignment exception"); |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e9db6b4bf65a..ef3e0f3aac96 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -823,16 +823,17 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
823 | if (PageHighMem(page)) { | 823 | if (PageHighMem(page)) { |
824 | if (len + offset > PAGE_SIZE) | 824 | if (len + offset > PAGE_SIZE) |
825 | len = PAGE_SIZE - offset; | 825 | len = PAGE_SIZE - offset; |
826 | vaddr = kmap_high_get(page); | 826 | |
827 | if (vaddr) { | 827 | if (cache_is_vipt_nonaliasing()) { |
828 | vaddr += offset; | ||
829 | op(vaddr, len, dir); | ||
830 | kunmap_high(page); | ||
831 | } else if (cache_is_vipt()) { | ||
832 | /* unmapped pages might still be cached */ | ||
833 | vaddr = kmap_atomic(page); | 828 | vaddr = kmap_atomic(page); |
834 | op(vaddr + offset, len, dir); | 829 | op(vaddr + offset, len, dir); |
835 | kunmap_atomic(vaddr); | 830 | kunmap_atomic(vaddr); |
831 | } else { | ||
832 | vaddr = kmap_high_get(page); | ||
833 | if (vaddr) { | ||
834 | op(vaddr + offset, len, dir); | ||
835 | kunmap_high(page); | ||
836 | } | ||
836 | } | 837 | } |
837 | } else { | 838 | } else { |
838 | vaddr = page_address(page) + offset; | 839 | vaddr = page_address(page) + offset; |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1c8f7f564175..0d473cce501c 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -170,15 +170,18 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
170 | if (!PageHighMem(page)) { | 170 | if (!PageHighMem(page)) { |
171 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); | 171 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
172 | } else { | 172 | } else { |
173 | void *addr = kmap_high_get(page); | 173 | void *addr; |
174 | if (addr) { | 174 | |
175 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 175 | if (cache_is_vipt_nonaliasing()) { |
176 | kunmap_high(page); | ||
177 | } else if (cache_is_vipt()) { | ||
178 | /* unmapped pages might still be cached */ | ||
179 | addr = kmap_atomic(page); | 176 | addr = kmap_atomic(page); |
180 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 177 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
181 | kunmap_atomic(addr); | 178 | kunmap_atomic(addr); |
179 | } else { | ||
180 | addr = kmap_high_get(page); | ||
181 | if (addr) { | ||
182 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
183 | kunmap_high(page); | ||
184 | } | ||
182 | } | 185 | } |
183 | } | 186 | } |
184 | 187 | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index a84ff763ac39..e0d8565671a6 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -113,6 +113,7 @@ static struct cachepolicy cache_policies[] __initdata = { | |||
113 | } | 113 | } |
114 | }; | 114 | }; |
115 | 115 | ||
116 | #ifdef CONFIG_CPU_CP15 | ||
116 | /* | 117 | /* |
117 | * These are useful for identifying cache coherency | 118 | * These are useful for identifying cache coherency |
118 | * problems by allowing the cache or the cache and | 119 | * problems by allowing the cache or the cache and |
@@ -211,6 +212,22 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
211 | } | 212 | } |
212 | #endif | 213 | #endif |
213 | 214 | ||
215 | #else /* ifdef CONFIG_CPU_CP15 */ | ||
216 | |||
217 | static int __init early_cachepolicy(char *p) | ||
218 | { | ||
219 | pr_warning("cachepolicy kernel parameter not supported without cp15\n"); | ||
220 | } | ||
221 | early_param("cachepolicy", early_cachepolicy); | ||
222 | |||
223 | static int __init noalign_setup(char *__unused) | ||
224 | { | ||
225 | pr_warning("noalign kernel parameter not supported without cp15\n"); | ||
226 | } | ||
227 | __setup("noalign", noalign_setup); | ||
228 | |||
229 | #endif /* ifdef CONFIG_CPU_CP15 / else */ | ||
230 | |||
214 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN | 231 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN |
215 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE | 232 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE |
216 | 233 | ||
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 5c07ee4fe3eb..919405e20b80 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -80,12 +80,10 @@ ENTRY(cpu_v6_do_idle) | |||
80 | mov pc, lr | 80 | mov pc, lr |
81 | 81 | ||
82 | ENTRY(cpu_v6_dcache_clean_area) | 82 | ENTRY(cpu_v6_dcache_clean_area) |
83 | #ifndef TLB_CAN_READ_FROM_L1_CACHE | ||
84 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 83 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
85 | add r0, r0, #D_CACHE_LINE_SIZE | 84 | add r0, r0, #D_CACHE_LINE_SIZE |
86 | subs r1, r1, #D_CACHE_LINE_SIZE | 85 | subs r1, r1, #D_CACHE_LINE_SIZE |
87 | bhi 1b | 86 | bhi 1b |
88 | #endif | ||
89 | mov pc, lr | 87 | mov pc, lr |
90 | 88 | ||
91 | /* | 89 | /* |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 78f520bc0e99..9704097c450e 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S | |||
@@ -110,7 +110,8 @@ ENTRY(cpu_v7_set_pte_ext) | |||
110 | ARM( str r3, [r0, #2048]! ) | 110 | ARM( str r3, [r0, #2048]! ) |
111 | THUMB( add r0, r0, #2048 ) | 111 | THUMB( add r0, r0, #2048 ) |
112 | THUMB( str r3, [r0] ) | 112 | THUMB( str r3, [r0] ) |
113 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | 113 | ALT_SMP(mov pc,lr) |
114 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte | ||
114 | #endif | 115 | #endif |
115 | mov pc, lr | 116 | mov pc, lr |
116 | ENDPROC(cpu_v7_set_pte_ext) | 117 | ENDPROC(cpu_v7_set_pte_ext) |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 6ffd78c0f9ab..363027e811d6 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -73,7 +73,8 @@ ENTRY(cpu_v7_set_pte_ext) | |||
73 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY | 73 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY |
74 | orreq r2, #L_PTE_RDONLY | 74 | orreq r2, #L_PTE_RDONLY |
75 | 1: strd r2, r3, [r0] | 75 | 1: strd r2, r3, [r0] |
76 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | 76 | ALT_SMP(mov pc, lr) |
77 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte | ||
77 | #endif | 78 | #endif |
78 | mov pc, lr | 79 | mov pc, lr |
79 | ENDPROC(cpu_v7_set_pte_ext) | 80 | ENDPROC(cpu_v7_set_pte_ext) |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index f584d3f5b37c..2c73a7301ff7 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -75,14 +75,14 @@ ENTRY(cpu_v7_do_idle) | |||
75 | ENDPROC(cpu_v7_do_idle) | 75 | ENDPROC(cpu_v7_do_idle) |
76 | 76 | ||
77 | ENTRY(cpu_v7_dcache_clean_area) | 77 | ENTRY(cpu_v7_dcache_clean_area) |
78 | #ifndef TLB_CAN_READ_FROM_L1_CACHE | 78 | ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW |
79 | ALT_UP(W(nop)) | ||
79 | dcache_line_size r2, r3 | 80 | dcache_line_size r2, r3 |
80 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 81 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
81 | add r0, r0, r2 | 82 | add r0, r0, r2 |
82 | subs r1, r1, r2 | 83 | subs r1, r1, r2 |
83 | bhi 1b | 84 | bhi 1b |
84 | dsb | 85 | dsb |
85 | #endif | ||
86 | mov pc, lr | 86 | mov pc, lr |
87 | ENDPROC(cpu_v7_dcache_clean_area) | 87 | ENDPROC(cpu_v7_dcache_clean_area) |
88 | 88 | ||
@@ -402,6 +402,8 @@ __v7_ca9mp_proc_info: | |||
402 | __v7_proc __v7_ca9mp_setup | 402 | __v7_proc __v7_ca9mp_setup |
403 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info | 403 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info |
404 | 404 | ||
405 | #endif /* CONFIG_ARM_LPAE */ | ||
406 | |||
405 | /* | 407 | /* |
406 | * Marvell PJ4B processor. | 408 | * Marvell PJ4B processor. |
407 | */ | 409 | */ |
@@ -411,7 +413,6 @@ __v7_pj4b_proc_info: | |||
411 | .long 0xfffffff0 | 413 | .long 0xfffffff0 |
412 | __v7_proc __v7_pj4b_setup | 414 | __v7_proc __v7_pj4b_setup |
413 | .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info | 415 | .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info |
414 | #endif /* CONFIG_ARM_LPAE */ | ||
415 | 416 | ||
416 | /* | 417 | /* |
417 | * ARM Ltd. Cortex A7 processor. | 418 | * ARM Ltd. Cortex A7 processor. |
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index a0daa2fb5de6..e6dbc8dbe6a6 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c | |||
@@ -140,8 +140,7 @@ static int omap_dm_timer_prepare(struct omap_dm_timer *timer) | |||
140 | */ | 140 | */ |
141 | if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) { | 141 | if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) { |
142 | timer->fclk = clk_get(&timer->pdev->dev, "fck"); | 142 | timer->fclk = clk_get(&timer->pdev->dev, "fck"); |
143 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) { | 143 | if (WARN_ON_ONCE(IS_ERR(timer->fclk))) { |
144 | timer->fclk = NULL; | ||
145 | dev_err(&timer->pdev->dev, ": No fclk handle.\n"); | 144 | dev_err(&timer->pdev->dev, ": No fclk handle.\n"); |
146 | return -EINVAL; | 145 | return -EINVAL; |
147 | } | 146 | } |
@@ -373,7 +372,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask); | |||
373 | 372 | ||
374 | struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer) | 373 | struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer) |
375 | { | 374 | { |
376 | if (timer) | 375 | if (timer && !IS_ERR(timer->fclk)) |
377 | return timer->fclk; | 376 | return timer->fclk; |
378 | return NULL; | 377 | return NULL; |
379 | } | 378 | } |
@@ -482,7 +481,7 @@ int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source) | |||
482 | if (pdata && pdata->set_timer_src) | 481 | if (pdata && pdata->set_timer_src) |
483 | return pdata->set_timer_src(timer->pdev, source); | 482 | return pdata->set_timer_src(timer->pdev, source); |
484 | 483 | ||
485 | if (!timer->fclk) | 484 | if (IS_ERR(timer->fclk)) |
486 | return -EINVAL; | 485 | return -EINVAL; |
487 | 486 | ||
488 | switch (source) { | 487 | switch (source) { |
@@ -500,13 +499,13 @@ int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source) | |||
500 | } | 499 | } |
501 | 500 | ||
502 | parent = clk_get(&timer->pdev->dev, parent_name); | 501 | parent = clk_get(&timer->pdev->dev, parent_name); |
503 | if (IS_ERR_OR_NULL(parent)) { | 502 | if (IS_ERR(parent)) { |
504 | pr_err("%s: %s not found\n", __func__, parent_name); | 503 | pr_err("%s: %s not found\n", __func__, parent_name); |
505 | return -EINVAL; | 504 | return -EINVAL; |
506 | } | 505 | } |
507 | 506 | ||
508 | ret = clk_set_parent(timer->fclk, parent); | 507 | ret = clk_set_parent(timer->fclk, parent); |
509 | if (IS_ERR_VALUE(ret)) | 508 | if (ret < 0) |
510 | pr_err("%s: failed to set %s as parent\n", __func__, | 509 | pr_err("%s: failed to set %s as parent\n", __func__, |
511 | parent_name); | 510 | parent_name); |
512 | 511 | ||
@@ -808,6 +807,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
808 | return -ENOMEM; | 807 | return -ENOMEM; |
809 | } | 808 | } |
810 | 809 | ||
810 | timer->fclk = ERR_PTR(-ENODEV); | ||
811 | timer->io_base = devm_ioremap_resource(dev, mem); | 811 | timer->io_base = devm_ioremap_resource(dev, mem); |
812 | if (IS_ERR(timer->io_base)) | 812 | if (IS_ERR(timer->io_base)) |
813 | return PTR_ERR(timer->io_base); | 813 | return PTR_ERR(timer->io_base); |
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 831e1fdfdb2f..a10297da122b 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
@@ -16,7 +16,7 @@ | |||
16 | # are merged into mainline or have been edited in the machine database | 16 | # are merged into mainline or have been edited in the machine database |
17 | # within the last 12 months. References to machine_is_NAME() do not count! | 17 | # within the last 12 months. References to machine_is_NAME() do not count! |
18 | # | 18 | # |
19 | # Last update: Thu Apr 26 08:44:23 2012 | 19 | # Last update: Fri Mar 22 17:24:50 2013 |
20 | # | 20 | # |
21 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number | 21 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number |
22 | # | 22 | # |
@@ -64,8 +64,8 @@ h7201 ARCH_H7201 H7201 161 | |||
64 | h7202 ARCH_H7202 H7202 162 | 64 | h7202 ARCH_H7202 H7202 162 |
65 | iq80321 ARCH_IQ80321 IQ80321 169 | 65 | iq80321 ARCH_IQ80321 IQ80321 169 |
66 | ks8695 ARCH_KS8695 KS8695 180 | 66 | ks8695 ARCH_KS8695 KS8695 180 |
67 | karo ARCH_KARO KARO 190 | ||
68 | smdk2410 ARCH_SMDK2410 SMDK2410 193 | 67 | smdk2410 ARCH_SMDK2410 SMDK2410 193 |
68 | ceiva ARCH_CEIVA CEIVA 200 | ||
69 | voiceblue MACH_VOICEBLUE VOICEBLUE 218 | 69 | voiceblue MACH_VOICEBLUE VOICEBLUE 218 |
70 | h5400 ARCH_H5400 H5400 220 | 70 | h5400 ARCH_H5400 H5400 220 |
71 | omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234 | 71 | omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234 |
@@ -95,6 +95,7 @@ lpd7a400 MACH_LPD7A400 LPD7A400 389 | |||
95 | lpd7a404 MACH_LPD7A404 LPD7A404 390 | 95 | lpd7a404 MACH_LPD7A404 LPD7A404 390 |
96 | csb337 MACH_CSB337 CSB337 399 | 96 | csb337 MACH_CSB337 CSB337 399 |
97 | mainstone MACH_MAINSTONE MAINSTONE 406 | 97 | mainstone MACH_MAINSTONE MAINSTONE 406 |
98 | lite300 MACH_LITE300 LITE300 408 | ||
98 | xcep MACH_XCEP XCEP 413 | 99 | xcep MACH_XCEP XCEP 413 |
99 | arcom_vulcan MACH_ARCOM_VULCAN ARCOM_VULCAN 414 | 100 | arcom_vulcan MACH_ARCOM_VULCAN ARCOM_VULCAN 414 |
100 | nomadik MACH_NOMADIK NOMADIK 420 | 101 | nomadik MACH_NOMADIK NOMADIK 420 |
@@ -131,12 +132,14 @@ kb9200 MACH_KB9200 KB9200 612 | |||
131 | sx1 MACH_SX1 SX1 613 | 132 | sx1 MACH_SX1 SX1 613 |
132 | ixdp465 MACH_IXDP465 IXDP465 618 | 133 | ixdp465 MACH_IXDP465 IXDP465 618 |
133 | ixdp2351 MACH_IXDP2351 IXDP2351 619 | 134 | ixdp2351 MACH_IXDP2351 IXDP2351 619 |
135 | cm4008 MACH_CM4008 CM4008 624 | ||
134 | iq80332 MACH_IQ80332 IQ80332 629 | 136 | iq80332 MACH_IQ80332 IQ80332 629 |
135 | gtwx5715 MACH_GTWX5715 GTWX5715 641 | 137 | gtwx5715 MACH_GTWX5715 GTWX5715 641 |
136 | csb637 MACH_CSB637 CSB637 648 | 138 | csb637 MACH_CSB637 CSB637 648 |
137 | n30 MACH_N30 N30 656 | 139 | n30 MACH_N30 N30 656 |
138 | nec_mp900 MACH_NEC_MP900 NEC_MP900 659 | 140 | nec_mp900 MACH_NEC_MP900 NEC_MP900 659 |
139 | kafa MACH_KAFA KAFA 662 | 141 | kafa MACH_KAFA KAFA 662 |
142 | cm41xx MACH_CM41XX CM41XX 672 | ||
140 | ts72xx MACH_TS72XX TS72XX 673 | 143 | ts72xx MACH_TS72XX TS72XX 673 |
141 | otom MACH_OTOM OTOM 680 | 144 | otom MACH_OTOM OTOM 680 |
142 | nexcoder_2440 MACH_NEXCODER_2440 NEXCODER_2440 681 | 145 | nexcoder_2440 MACH_NEXCODER_2440 NEXCODER_2440 681 |
@@ -149,6 +152,7 @@ colibri MACH_COLIBRI COLIBRI 729 | |||
149 | gateway7001 MACH_GATEWAY7001 GATEWAY7001 731 | 152 | gateway7001 MACH_GATEWAY7001 GATEWAY7001 731 |
150 | pcm027 MACH_PCM027 PCM027 732 | 153 | pcm027 MACH_PCM027 PCM027 732 |
151 | anubis MACH_ANUBIS ANUBIS 734 | 154 | anubis MACH_ANUBIS ANUBIS 734 |
155 | xboardgp8 MACH_XBOARDGP8 XBOARDGP8 742 | ||
152 | akita MACH_AKITA AKITA 744 | 156 | akita MACH_AKITA AKITA 744 |
153 | e330 MACH_E330 E330 753 | 157 | e330 MACH_E330 E330 753 |
154 | nokia770 MACH_NOKIA770 NOKIA770 755 | 158 | nokia770 MACH_NOKIA770 NOKIA770 755 |
@@ -157,9 +161,11 @@ edb9315a MACH_EDB9315A EDB9315A 772 | |||
157 | stargate2 MACH_STARGATE2 STARGATE2 774 | 161 | stargate2 MACH_STARGATE2 STARGATE2 774 |
158 | intelmote2 MACH_INTELMOTE2 INTELMOTE2 775 | 162 | intelmote2 MACH_INTELMOTE2 INTELMOTE2 775 |
159 | trizeps4 MACH_TRIZEPS4 TRIZEPS4 776 | 163 | trizeps4 MACH_TRIZEPS4 TRIZEPS4 776 |
164 | pnx4008 MACH_PNX4008 PNX4008 782 | ||
160 | cpuat91 MACH_CPUAT91 CPUAT91 787 | 165 | cpuat91 MACH_CPUAT91 CPUAT91 787 |
161 | iq81340sc MACH_IQ81340SC IQ81340SC 799 | 166 | iq81340sc MACH_IQ81340SC IQ81340SC 799 |
162 | iq81340mc MACH_IQ81340MC IQ81340MC 801 | 167 | iq81340mc MACH_IQ81340MC IQ81340MC 801 |
168 | se4200 MACH_SE4200 SE4200 809 | ||
163 | micro9 MACH_MICRO9 MICRO9 811 | 169 | micro9 MACH_MICRO9 MICRO9 811 |
164 | micro9l MACH_MICRO9L MICRO9L 812 | 170 | micro9l MACH_MICRO9L MICRO9L 812 |
165 | omap_palmte MACH_OMAP_PALMTE OMAP_PALMTE 817 | 171 | omap_palmte MACH_OMAP_PALMTE OMAP_PALMTE 817 |
@@ -178,6 +184,7 @@ mx21ads MACH_MX21ADS MX21ADS 851 | |||
178 | ams_delta MACH_AMS_DELTA AMS_DELTA 862 | 184 | ams_delta MACH_AMS_DELTA AMS_DELTA 862 |
179 | nas100d MACH_NAS100D NAS100D 865 | 185 | nas100d MACH_NAS100D NAS100D 865 |
180 | magician MACH_MAGICIAN MAGICIAN 875 | 186 | magician MACH_MAGICIAN MAGICIAN 875 |
187 | cm4002 MACH_CM4002 CM4002 876 | ||
181 | nxdkn MACH_NXDKN NXDKN 880 | 188 | nxdkn MACH_NXDKN NXDKN 880 |
182 | palmtx MACH_PALMTX PALMTX 885 | 189 | palmtx MACH_PALMTX PALMTX 885 |
183 | s3c2413 MACH_S3C2413 S3C2413 887 | 190 | s3c2413 MACH_S3C2413 S3C2413 887 |
@@ -203,7 +210,6 @@ omap_fsample MACH_OMAP_FSAMPLE OMAP_FSAMPLE 970 | |||
203 | snapper_cl15 MACH_SNAPPER_CL15 SNAPPER_CL15 986 | 210 | snapper_cl15 MACH_SNAPPER_CL15 SNAPPER_CL15 986 |
204 | omap_palmz71 MACH_OMAP_PALMZ71 OMAP_PALMZ71 993 | 211 | omap_palmz71 MACH_OMAP_PALMZ71 OMAP_PALMZ71 993 |
205 | smdk2412 MACH_SMDK2412 SMDK2412 1009 | 212 | smdk2412 MACH_SMDK2412 SMDK2412 1009 |
206 | bkde303 MACH_BKDE303 BKDE303 1021 | ||
207 | smdk2413 MACH_SMDK2413 SMDK2413 1022 | 213 | smdk2413 MACH_SMDK2413 SMDK2413 1022 |
208 | aml_m5900 MACH_AML_M5900 AML_M5900 1024 | 214 | aml_m5900 MACH_AML_M5900 AML_M5900 1024 |
209 | balloon3 MACH_BALLOON3 BALLOON3 1029 | 215 | balloon3 MACH_BALLOON3 BALLOON3 1029 |
@@ -214,6 +220,7 @@ fsg MACH_FSG FSG 1091 | |||
214 | at91sam9260ek MACH_AT91SAM9260EK AT91SAM9260EK 1099 | 220 | at91sam9260ek MACH_AT91SAM9260EK AT91SAM9260EK 1099 |
215 | glantank MACH_GLANTANK GLANTANK 1100 | 221 | glantank MACH_GLANTANK GLANTANK 1100 |
216 | n2100 MACH_N2100 N2100 1101 | 222 | n2100 MACH_N2100 N2100 1101 |
223 | im42xx MACH_IM42XX IM42XX 1105 | ||
217 | qt2410 MACH_QT2410 QT2410 1108 | 224 | qt2410 MACH_QT2410 QT2410 1108 |
218 | kixrp435 MACH_KIXRP435 KIXRP435 1109 | 225 | kixrp435 MACH_KIXRP435 KIXRP435 1109 |
219 | cc9p9360dev MACH_CC9P9360DEV CC9P9360DEV 1114 | 226 | cc9p9360dev MACH_CC9P9360DEV CC9P9360DEV 1114 |
@@ -247,6 +254,7 @@ csb726 MACH_CSB726 CSB726 1359 | |||
247 | davinci_dm6467_evm MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM 1380 | 254 | davinci_dm6467_evm MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM 1380 |
248 | davinci_dm355_evm MACH_DAVINCI_DM355_EVM DAVINCI_DM355_EVM 1381 | 255 | davinci_dm355_evm MACH_DAVINCI_DM355_EVM DAVINCI_DM355_EVM 1381 |
249 | littleton MACH_LITTLETON LITTLETON 1388 | 256 | littleton MACH_LITTLETON LITTLETON 1388 |
257 | im4004 MACH_IM4004 IM4004 1400 | ||
250 | realview_pb11mp MACH_REALVIEW_PB11MP REALVIEW_PB11MP 1407 | 258 | realview_pb11mp MACH_REALVIEW_PB11MP REALVIEW_PB11MP 1407 |
251 | mx27_3ds MACH_MX27_3DS MX27_3DS 1430 | 259 | mx27_3ds MACH_MX27_3DS MX27_3DS 1430 |
252 | halibut MACH_HALIBUT HALIBUT 1439 | 260 | halibut MACH_HALIBUT HALIBUT 1439 |
@@ -268,6 +276,7 @@ dns323 MACH_DNS323 DNS323 1542 | |||
268 | omap3_beagle MACH_OMAP3_BEAGLE OMAP3_BEAGLE 1546 | 276 | omap3_beagle MACH_OMAP3_BEAGLE OMAP3_BEAGLE 1546 |
269 | nokia_n810 MACH_NOKIA_N810 NOKIA_N810 1548 | 277 | nokia_n810 MACH_NOKIA_N810 NOKIA_N810 1548 |
270 | pcm038 MACH_PCM038 PCM038 1551 | 278 | pcm038 MACH_PCM038 PCM038 1551 |
279 | sg310 MACH_SG310 SG310 1564 | ||
271 | ts209 MACH_TS209 TS209 1565 | 280 | ts209 MACH_TS209 TS209 1565 |
272 | at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566 | 281 | at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566 |
273 | mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574 | 282 | mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574 |
@@ -371,7 +380,6 @@ pcm043 MACH_PCM043 PCM043 2072 | |||
371 | sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097 | 380 | sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097 |
372 | avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104 | 381 | avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104 |
373 | mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125 | 382 | mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125 |
374 | tx37 MACH_TX37 TX37 2127 | ||
375 | rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135 | 383 | rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135 |
376 | dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138 | 384 | dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138 |
377 | ts219 MACH_TS219 TS219 2139 | 385 | ts219 MACH_TS219 TS219 2139 |
@@ -380,12 +388,12 @@ davinci_da850_evm MACH_DAVINCI_DA850_EVM DAVINCI_DA850_EVM 2157 | |||
380 | at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159 | 388 | at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159 |
381 | omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160 | 389 | omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160 |
382 | magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162 | 390 | magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162 |
383 | tx25 MACH_TX25 TX25 2177 | ||
384 | omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178 | 391 | omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178 |
385 | anw6410 MACH_ANW6410 ANW6410 2183 | 392 | anw6410 MACH_ANW6410 ANW6410 2183 |
386 | imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187 | 393 | imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187 |
387 | portuxg20 MACH_PORTUXG20 PORTUXG20 2191 | 394 | portuxg20 MACH_PORTUXG20 PORTUXG20 2191 |
388 | smdkc110 MACH_SMDKC110 SMDKC110 2193 | 395 | smdkc110 MACH_SMDKC110 SMDKC110 2193 |
396 | cabespresso MACH_CABESPRESSO CABESPRESSO 2194 | ||
389 | omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200 | 397 | omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200 |
390 | netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201 | 398 | netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201 |
391 | netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202 | 399 | netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202 |
@@ -404,6 +412,7 @@ bigdisk MACH_BIGDISK BIGDISK 2283 | |||
404 | at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288 | 412 | at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288 |
405 | bcmring MACH_BCMRING BCMRING 2289 | 413 | bcmring MACH_BCMRING BCMRING 2289 |
406 | mahimahi MACH_MAHIMAHI MAHIMAHI 2304 | 414 | mahimahi MACH_MAHIMAHI MAHIMAHI 2304 |
415 | cerebric MACH_CEREBRIC CEREBRIC 2311 | ||
407 | smdk6442 MACH_SMDK6442 SMDK6442 2324 | 416 | smdk6442 MACH_SMDK6442 SMDK6442 2324 |
408 | openrd_base MACH_OPENRD_BASE OPENRD_BASE 2325 | 417 | openrd_base MACH_OPENRD_BASE OPENRD_BASE 2325 |
409 | devkit8000 MACH_DEVKIT8000 DEVKIT8000 2330 | 418 | devkit8000 MACH_DEVKIT8000 DEVKIT8000 2330 |
@@ -423,10 +432,10 @@ raumfeld_rc MACH_RAUMFELD_RC RAUMFELD_RC 2413 | |||
423 | raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414 | 432 | raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414 |
424 | raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415 | 433 | raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415 |
425 | tnetv107x MACH_TNETV107X TNETV107X 2418 | 434 | tnetv107x MACH_TNETV107X TNETV107X 2418 |
426 | mx51_m2id MACH_MX51_M2ID MX51_M2ID 2428 | ||
427 | smdkv210 MACH_SMDKV210 SMDKV210 2456 | 435 | smdkv210 MACH_SMDKV210 SMDKV210 2456 |
428 | omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464 | 436 | omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464 |
429 | omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465 | 437 | omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465 |
438 | cybook2440 MACH_CYBOOK2440 CYBOOK2440 2466 | ||
430 | smartq7 MACH_SMARTQ7 SMARTQ7 2479 | 439 | smartq7 MACH_SMARTQ7 SMARTQ7 2479 |
431 | watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491 | 440 | watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491 |
432 | g4evm MACH_G4EVM G4EVM 2493 | 441 | g4evm MACH_G4EVM G4EVM 2493 |
@@ -434,12 +443,10 @@ omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495 | |||
434 | ts41x MACH_TS41X TS41X 2502 | 443 | ts41x MACH_TS41X TS41X 2502 |
435 | phy3250 MACH_PHY3250 PHY3250 2511 | 444 | phy3250 MACH_PHY3250 PHY3250 2511 |
436 | mini6410 MACH_MINI6410 MINI6410 2520 | 445 | mini6410 MACH_MINI6410 MINI6410 2520 |
437 | tx51 MACH_TX51 TX51 2529 | ||
438 | mx28evk MACH_MX28EVK MX28EVK 2531 | 446 | mx28evk MACH_MX28EVK MX28EVK 2531 |
439 | smartq5 MACH_SMARTQ5 SMARTQ5 2534 | 447 | smartq5 MACH_SMARTQ5 SMARTQ5 2534 |
440 | davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548 | 448 | davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548 |
441 | mxt_td60 MACH_MXT_TD60 MXT_TD60 2550 | 449 | mxt_td60 MACH_MXT_TD60 MXT_TD60 2550 |
442 | pca101 MACH_PCA101 PCA101 2595 | ||
443 | capc7117 MACH_CAPC7117 CAPC7117 2612 | 450 | capc7117 MACH_CAPC7117 CAPC7117 2612 |
444 | icontrol MACH_ICONTROL ICONTROL 2624 | 451 | icontrol MACH_ICONTROL ICONTROL 2624 |
445 | gplugd MACH_GPLUGD GPLUGD 2625 | 452 | gplugd MACH_GPLUGD GPLUGD 2625 |
@@ -465,6 +472,7 @@ igep0030 MACH_IGEP0030 IGEP0030 2717 | |||
465 | sbc3530 MACH_SBC3530 SBC3530 2722 | 472 | sbc3530 MACH_SBC3530 SBC3530 2722 |
466 | saarb MACH_SAARB SAARB 2727 | 473 | saarb MACH_SAARB SAARB 2727 |
467 | harmony MACH_HARMONY HARMONY 2731 | 474 | harmony MACH_HARMONY HARMONY 2731 |
475 | cybook_orizon MACH_CYBOOK_ORIZON CYBOOK_ORIZON 2733 | ||
468 | msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741 | 476 | msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741 |
469 | cm_t3517 MACH_CM_T3517 CM_T3517 2750 | 477 | cm_t3517 MACH_CM_T3517 CM_T3517 2750 |
470 | wbd222 MACH_WBD222 WBD222 2753 | 478 | wbd222 MACH_WBD222 WBD222 2753 |
@@ -480,10 +488,8 @@ eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821 | |||
480 | eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822 | 488 | eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822 |
481 | eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823 | 489 | eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823 |
482 | smdkc210 MACH_SMDKC210 SMDKC210 2838 | 490 | smdkc210 MACH_SMDKC210 SMDKC210 2838 |
483 | pcaal1 MACH_PCAAL1 PCAAL1 2843 | ||
484 | t5325 MACH_T5325 T5325 2846 | 491 | t5325 MACH_T5325 T5325 2846 |
485 | income MACH_INCOME INCOME 2849 | 492 | income MACH_INCOME INCOME 2849 |
486 | mx257sx MACH_MX257SX MX257SX 2861 | ||
487 | goni MACH_GONI GONI 2862 | 493 | goni MACH_GONI GONI 2862 |
488 | bv07 MACH_BV07 BV07 2882 | 494 | bv07 MACH_BV07 BV07 2882 |
489 | openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884 | 495 | openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884 |
@@ -491,7 +497,6 @@ devixp MACH_DEVIXP DEVIXP 2885 | |||
491 | miccpt MACH_MICCPT MICCPT 2886 | 497 | miccpt MACH_MICCPT MICCPT 2886 |
492 | mic256 MACH_MIC256 MIC256 2887 | 498 | mic256 MACH_MIC256 MIC256 2887 |
493 | u5500 MACH_U5500 U5500 2890 | 499 | u5500 MACH_U5500 U5500 2890 |
494 | pov15hd MACH_POV15HD POV15HD 2910 | ||
495 | linkstation_lschl MACH_LINKSTATION_LSCHL LINKSTATION_LSCHL 2913 | 500 | linkstation_lschl MACH_LINKSTATION_LSCHL LINKSTATION_LSCHL 2913 |
496 | smdkv310 MACH_SMDKV310 SMDKV310 2925 | 501 | smdkv310 MACH_SMDKV310 SMDKV310 2925 |
497 | wm8505_7in_netbook MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK 2928 | 502 | wm8505_7in_netbook MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK 2928 |
@@ -518,7 +523,6 @@ prima2_evb MACH_PRIMA2_EVB PRIMA2_EVB 3103 | |||
518 | paz00 MACH_PAZ00 PAZ00 3128 | 523 | paz00 MACH_PAZ00 PAZ00 3128 |
519 | acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129 | 524 | acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129 |
520 | ag5evm MACH_AG5EVM AG5EVM 3189 | 525 | ag5evm MACH_AG5EVM AG5EVM 3189 |
521 | tsunagi MACH_TSUNAGI TSUNAGI 3197 | ||
522 | ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 | 526 | ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 |
523 | wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 | 527 | wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 |
524 | trimslice MACH_TRIMSLICE TRIMSLICE 3209 | 528 | trimslice MACH_TRIMSLICE TRIMSLICE 3209 |
@@ -529,8 +533,6 @@ msm8960_sim MACH_MSM8960_SIM MSM8960_SIM 3230 | |||
529 | msm8960_rumi3 MACH_MSM8960_RUMI3 MSM8960_RUMI3 3231 | 533 | msm8960_rumi3 MACH_MSM8960_RUMI3 MSM8960_RUMI3 3231 |
530 | gsia18s MACH_GSIA18S GSIA18S 3234 | 534 | gsia18s MACH_GSIA18S GSIA18S 3234 |
531 | mx53_loco MACH_MX53_LOCO MX53_LOCO 3273 | 535 | mx53_loco MACH_MX53_LOCO MX53_LOCO 3273 |
532 | tx53 MACH_TX53 TX53 3279 | ||
533 | encore MACH_ENCORE ENCORE 3284 | ||
534 | wario MACH_WARIO WARIO 3288 | 536 | wario MACH_WARIO WARIO 3288 |
535 | cm_t3730 MACH_CM_T3730 CM_T3730 3290 | 537 | cm_t3730 MACH_CM_T3730 CM_T3730 3290 |
536 | hrefv60 MACH_HREFV60 HREFV60 3293 | 538 | hrefv60 MACH_HREFV60 HREFV60 3293 |
@@ -538,603 +540,24 @@ armlex4210 MACH_ARMLEX4210 ARMLEX4210 3361 | |||
538 | snowball MACH_SNOWBALL SNOWBALL 3363 | 540 | snowball MACH_SNOWBALL SNOWBALL 3363 |
539 | xilinx_ep107 MACH_XILINX_EP107 XILINX_EP107 3378 | 541 | xilinx_ep107 MACH_XILINX_EP107 XILINX_EP107 3378 |
540 | nuri MACH_NURI NURI 3379 | 542 | nuri MACH_NURI NURI 3379 |
541 | wtplug MACH_WTPLUG WTPLUG 3412 | ||
542 | veridis_a300 MACH_VERIDIS_A300 VERIDIS_A300 3448 | ||
543 | origen MACH_ORIGEN ORIGEN 3455 | 543 | origen MACH_ORIGEN ORIGEN 3455 |
544 | wm8650refboard MACH_WM8650REFBOARD WM8650REFBOARD 3472 | ||
545 | xarina MACH_XARINA XARINA 3476 | ||
546 | sdvr MACH_SDVR SDVR 3478 | ||
547 | acer_maya MACH_ACER_MAYA ACER_MAYA 3479 | ||
548 | pico MACH_PICO PICO 3480 | ||
549 | cwmx233 MACH_CWMX233 CWMX233 3481 | ||
550 | cwam1808 MACH_CWAM1808 CWAM1808 3482 | ||
551 | cwdm365 MACH_CWDM365 CWDM365 3483 | ||
552 | mx51_moray MACH_MX51_MORAY MX51_MORAY 3484 | ||
553 | thales_cbc MACH_THALES_CBC THALES_CBC 3485 | ||
554 | bluepoint MACH_BLUEPOINT BLUEPOINT 3486 | ||
555 | dir665 MACH_DIR665 DIR665 3487 | ||
556 | acmerover1 MACH_ACMEROVER1 ACMEROVER1 3488 | ||
557 | shooter_ct MACH_SHOOTER_CT SHOOTER_CT 3489 | ||
558 | bliss MACH_BLISS BLISS 3490 | ||
559 | blissc MACH_BLISSC BLISSC 3491 | ||
560 | thales_adc MACH_THALES_ADC THALES_ADC 3492 | ||
561 | ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493 | ||
562 | atdgp318 MACH_ATDGP318 ATDGP318 3494 | ||
563 | dma210u MACH_DMA210U DMA210U 3495 | ||
564 | em_t3 MACH_EM_T3 EM_T3 3496 | ||
565 | htx3250 MACH_HTX3250 HTX3250 3497 | ||
566 | g50 MACH_G50 G50 3498 | ||
567 | eco5 MACH_ECO5 ECO5 3499 | ||
568 | wintergrasp MACH_WINTERGRASP WINTERGRASP 3500 | ||
569 | puro MACH_PURO PURO 3501 | ||
570 | shooter_k MACH_SHOOTER_K SHOOTER_K 3502 | ||
571 | nspire MACH_NSPIRE NSPIRE 3503 | 544 | nspire MACH_NSPIRE NSPIRE 3503 |
572 | mickxx MACH_MICKXX MICKXX 3504 | ||
573 | lxmb MACH_LXMB LXMB 3505 | ||
574 | adam MACH_ADAM ADAM 3507 | ||
575 | b1004 MACH_B1004 B1004 3508 | ||
576 | oboea MACH_OBOEA OBOEA 3509 | ||
577 | a1015 MACH_A1015 A1015 3510 | ||
578 | robin_vbdt30 MACH_ROBIN_VBDT30 ROBIN_VBDT30 3511 | ||
579 | tegra_enterprise MACH_TEGRA_ENTERPRISE TEGRA_ENTERPRISE 3512 | ||
580 | rfl108200_mk10 MACH_RFL108200_MK10 RFL108200_MK10 3513 | ||
581 | rfl108300_mk16 MACH_RFL108300_MK16 RFL108300_MK16 3514 | ||
582 | rover_v7 MACH_ROVER_V7 ROVER_V7 3515 | ||
583 | miphone MACH_MIPHONE MIPHONE 3516 | ||
584 | femtobts MACH_FEMTOBTS FEMTOBTS 3517 | ||
585 | monopoli MACH_MONOPOLI MONOPOLI 3518 | ||
586 | boss MACH_BOSS BOSS 3519 | ||
587 | davinci_dm368_vtam MACH_DAVINCI_DM368_VTAM DAVINCI_DM368_VTAM 3520 | ||
588 | clcon MACH_CLCON CLCON 3521 | ||
589 | nokia_rm696 MACH_NOKIA_RM696 NOKIA_RM696 3522 | 545 | nokia_rm696 MACH_NOKIA_RM696 NOKIA_RM696 3522 |
590 | tahiti MACH_TAHITI TAHITI 3523 | ||
591 | fighter MACH_FIGHTER FIGHTER 3524 | ||
592 | sgh_i710 MACH_SGH_I710 SGH_I710 3525 | ||
593 | integreproscb MACH_INTEGREPROSCB INTEGREPROSCB 3526 | ||
594 | monza MACH_MONZA MONZA 3527 | ||
595 | calimain MACH_CALIMAIN CALIMAIN 3528 | ||
596 | mx6q_sabreauto MACH_MX6Q_SABREAUTO MX6Q_SABREAUTO 3529 | ||
597 | gma01x MACH_GMA01X GMA01X 3530 | ||
598 | sbc51 MACH_SBC51 SBC51 3531 | ||
599 | fit MACH_FIT FIT 3532 | ||
600 | steelhead MACH_STEELHEAD STEELHEAD 3533 | ||
601 | panther MACH_PANTHER PANTHER 3534 | ||
602 | msm8960_liquid MACH_MSM8960_LIQUID MSM8960_LIQUID 3535 | ||
603 | lexikonct MACH_LEXIKONCT LEXIKONCT 3536 | ||
604 | ns2816_stb MACH_NS2816_STB NS2816_STB 3537 | ||
605 | sei_mm2_lpc3250 MACH_SEI_MM2_LPC3250 SEI_MM2_LPC3250 3538 | ||
606 | cmimx53 MACH_CMIMX53 CMIMX53 3539 | ||
607 | sandwich MACH_SANDWICH SANDWICH 3540 | ||
608 | chief MACH_CHIEF CHIEF 3541 | ||
609 | pogo_e02 MACH_POGO_E02 POGO_E02 3542 | ||
610 | mikrap_x168 MACH_MIKRAP_X168 MIKRAP_X168 3543 | 546 | mikrap_x168 MACH_MIKRAP_X168 MIKRAP_X168 3543 |
611 | htcmozart MACH_HTCMOZART HTCMOZART 3544 | ||
612 | htcgold MACH_HTCGOLD HTCGOLD 3545 | ||
613 | mt72xx MACH_MT72XX MT72XX 3546 | ||
614 | mx51_ivy MACH_MX51_IVY MX51_IVY 3547 | ||
615 | mx51_lvd MACH_MX51_LVD MX51_LVD 3548 | ||
616 | omap3_wiser2 MACH_OMAP3_WISER2 OMAP3_WISER2 3549 | ||
617 | dreamplug MACH_DREAMPLUG DREAMPLUG 3550 | ||
618 | cobas_c_111 MACH_COBAS_C_111 COBAS_C_111 3551 | ||
619 | cobas_u_411 MACH_COBAS_U_411 COBAS_U_411 3552 | ||
620 | hssd MACH_HSSD HSSD 3553 | ||
621 | iom35x MACH_IOM35X IOM35X 3554 | ||
622 | psom_omap MACH_PSOM_OMAP PSOM_OMAP 3555 | ||
623 | iphone_2g MACH_IPHONE_2G IPHONE_2G 3556 | ||
624 | iphone_3g MACH_IPHONE_3G IPHONE_3G 3557 | ||
625 | ipod_touch_1g MACH_IPOD_TOUCH_1G IPOD_TOUCH_1G 3558 | ||
626 | pharos_tpc MACH_PHAROS_TPC PHAROS_TPC 3559 | ||
627 | mx53_hydra MACH_MX53_HYDRA MX53_HYDRA 3560 | ||
628 | ns2816_dev_board MACH_NS2816_DEV_BOARD NS2816_DEV_BOARD 3561 | ||
629 | iphone_3gs MACH_IPHONE_3GS IPHONE_3GS 3562 | ||
630 | iphone_4 MACH_IPHONE_4 IPHONE_4 3563 | ||
631 | ipod_touch_4g MACH_IPOD_TOUCH_4G IPOD_TOUCH_4G 3564 | ||
632 | dragon_e1100 MACH_DRAGON_E1100 DRAGON_E1100 3565 | ||
633 | topside MACH_TOPSIDE TOPSIDE 3566 | ||
634 | irisiii MACH_IRISIII IRISIII 3567 | ||
635 | deto_macarm9 MACH_DETO_MACARM9 DETO_MACARM9 3568 | 547 | deto_macarm9 MACH_DETO_MACARM9 DETO_MACARM9 3568 |
636 | eti_d1 MACH_ETI_D1 ETI_D1 3569 | ||
637 | som3530sdk MACH_SOM3530SDK SOM3530SDK 3570 | ||
638 | oc_engine MACH_OC_ENGINE OC_ENGINE 3571 | ||
639 | apq8064_sim MACH_APQ8064_SIM APQ8064_SIM 3572 | ||
640 | alps MACH_ALPS ALPS 3575 | ||
641 | tny_t3730 MACH_TNY_T3730 TNY_T3730 3576 | ||
642 | geryon_nfe MACH_GERYON_NFE GERYON_NFE 3577 | ||
643 | ns2816_ref_board MACH_NS2816_REF_BOARD NS2816_REF_BOARD 3578 | ||
644 | silverstone MACH_SILVERSTONE SILVERSTONE 3579 | ||
645 | mtt2440 MACH_MTT2440 MTT2440 3580 | ||
646 | ynicdb MACH_YNICDB YNICDB 3581 | ||
647 | bct MACH_BCT BCT 3582 | ||
648 | tuscan MACH_TUSCAN TUSCAN 3583 | ||
649 | xbt_sam9g45 MACH_XBT_SAM9G45 XBT_SAM9G45 3584 | ||
650 | enbw_cmc MACH_ENBW_CMC ENBW_CMC 3585 | ||
651 | ch104mx257 MACH_CH104MX257 CH104MX257 3587 | ||
652 | openpri MACH_OPENPRI OPENPRI 3588 | ||
653 | am335xevm MACH_AM335XEVM AM335XEVM 3589 | ||
654 | picodmb MACH_PICODMB PICODMB 3590 | ||
655 | waluigi MACH_WALUIGI WALUIGI 3591 | ||
656 | punicag7 MACH_PUNICAG7 PUNICAG7 3592 | ||
657 | ipad_1g MACH_IPAD_1G IPAD_1G 3593 | ||
658 | appletv_2g MACH_APPLETV_2G APPLETV_2G 3594 | ||
659 | mach_ecog45 MACH_MACH_ECOG45 MACH_ECOG45 3595 | ||
660 | ait_cam_enc_4xx MACH_AIT_CAM_ENC_4XX AIT_CAM_ENC_4XX 3596 | ||
661 | runnymede MACH_RUNNYMEDE RUNNYMEDE 3597 | ||
662 | play MACH_PLAY PLAY 3598 | ||
663 | hw90260 MACH_HW90260 HW90260 3599 | ||
664 | tagh MACH_TAGH TAGH 3600 | ||
665 | filbert MACH_FILBERT FILBERT 3601 | ||
666 | getinge_netcomv3 MACH_GETINGE_NETCOMV3 GETINGE_NETCOMV3 3602 | ||
667 | cw20 MACH_CW20 CW20 3603 | ||
668 | cinema MACH_CINEMA CINEMA 3604 | ||
669 | cinema_tea MACH_CINEMA_TEA CINEMA_TEA 3605 | ||
670 | cinema_coffee MACH_CINEMA_COFFEE CINEMA_COFFEE 3606 | ||
671 | cinema_juice MACH_CINEMA_JUICE CINEMA_JUICE 3607 | ||
672 | mx53_mirage2 MACH_MX53_MIRAGE2 MX53_MIRAGE2 3609 | ||
673 | mx53_efikasb MACH_MX53_EFIKASB MX53_EFIKASB 3610 | ||
674 | stm_b2000 MACH_STM_B2000 STM_B2000 3612 | ||
675 | m28evk MACH_M28EVK M28EVK 3613 | 548 | m28evk MACH_M28EVK M28EVK 3613 |
676 | pda MACH_PDA PDA 3614 | ||
677 | meraki_mr58 MACH_MERAKI_MR58 MERAKI_MR58 3615 | ||
678 | kota2 MACH_KOTA2 KOTA2 3616 | 549 | kota2 MACH_KOTA2 KOTA2 3616 |
679 | letcool MACH_LETCOOL LETCOOL 3617 | ||
680 | mx27iat MACH_MX27IAT MX27IAT 3618 | ||
681 | apollo_td MACH_APOLLO_TD APOLLO_TD 3619 | ||
682 | arena MACH_ARENA ARENA 3620 | ||
683 | gsngateway MACH_GSNGATEWAY GSNGATEWAY 3621 | ||
684 | lf2000 MACH_LF2000 LF2000 3622 | ||
685 | bonito MACH_BONITO BONITO 3623 | 550 | bonito MACH_BONITO BONITO 3623 |
686 | asymptote MACH_ASYMPTOTE ASYMPTOTE 3624 | ||
687 | bst2brd MACH_BST2BRD BST2BRD 3625 | ||
688 | tx335s MACH_TX335S TX335S 3626 | ||
689 | pelco_tesla MACH_PELCO_TESLA PELCO_TESLA 3627 | ||
690 | rrhtestplat MACH_RRHTESTPLAT RRHTESTPLAT 3628 | ||
691 | vidtonic_pro MACH_VIDTONIC_PRO VIDTONIC_PRO 3629 | ||
692 | pl_apollo MACH_PL_APOLLO PL_APOLLO 3630 | ||
693 | pl_phoenix MACH_PL_PHOENIX PL_PHOENIX 3631 | ||
694 | m28cu3 MACH_M28CU3 M28CU3 3632 | ||
695 | vvbox_hd MACH_VVBOX_HD VVBOX_HD 3633 | ||
696 | coreware_sam9260_ MACH_COREWARE_SAM9260_ COREWARE_SAM9260_ 3634 | ||
697 | marmaduke MACH_MARMADUKE MARMADUKE 3635 | ||
698 | amg_xlcore_camera MACH_AMG_XLCORE_CAMERA AMG_XLCORE_CAMERA 3636 | ||
699 | omap3_egf MACH_OMAP3_EGF OMAP3_EGF 3637 | 551 | omap3_egf MACH_OMAP3_EGF OMAP3_EGF 3637 |
700 | smdk4212 MACH_SMDK4212 SMDK4212 3638 | 552 | smdk4212 MACH_SMDK4212 SMDK4212 3638 |
701 | dnp9200 MACH_DNP9200 DNP9200 3639 | ||
702 | tf101 MACH_TF101 TF101 3640 | ||
703 | omap3silvio MACH_OMAP3SILVIO OMAP3SILVIO 3641 | ||
704 | picasso2 MACH_PICASSO2 PICASSO2 3642 | ||
705 | vangogh2 MACH_VANGOGH2 VANGOGH2 3643 | ||
706 | olpc_xo_1_75 MACH_OLPC_XO_1_75 OLPC_XO_1_75 3644 | ||
707 | gx400 MACH_GX400 GX400 3645 | ||
708 | gs300 MACH_GS300 GS300 3646 | ||
709 | acer_a9 MACH_ACER_A9 ACER_A9 3647 | ||
710 | vivow_evm MACH_VIVOW_EVM VIVOW_EVM 3648 | ||
711 | veloce_cxq MACH_VELOCE_CXQ VELOCE_CXQ 3649 | ||
712 | veloce_cxm MACH_VELOCE_CXM VELOCE_CXM 3650 | ||
713 | p1852 MACH_P1852 P1852 3651 | ||
714 | naxy100 MACH_NAXY100 NAXY100 3652 | ||
715 | taishan MACH_TAISHAN TAISHAN 3653 | ||
716 | touchlink MACH_TOUCHLINK TOUCHLINK 3654 | ||
717 | stm32f103ze MACH_STM32F103ZE STM32F103ZE 3655 | ||
718 | mcx MACH_MCX MCX 3656 | ||
719 | stm_nmhdk_fli7610 MACH_STM_NMHDK_FLI7610 STM_NMHDK_FLI7610 3657 | ||
720 | top28x MACH_TOP28X TOP28X 3658 | ||
721 | okl4vp_microvisor MACH_OKL4VP_MICROVISOR OKL4VP_MICROVISOR 3659 | ||
722 | pop MACH_POP POP 3660 | ||
723 | layer MACH_LAYER LAYER 3661 | ||
724 | trondheim MACH_TRONDHEIM TRONDHEIM 3662 | ||
725 | eva MACH_EVA EVA 3663 | ||
726 | trust_taurus MACH_TRUST_TAURUS TRUST_TAURUS 3664 | ||
727 | ns2816_huashan MACH_NS2816_HUASHAN NS2816_HUASHAN 3665 | ||
728 | ns2816_yangcheng MACH_NS2816_YANGCHENG NS2816_YANGCHENG 3666 | ||
729 | p852 MACH_P852 P852 3667 | ||
730 | flea3 MACH_FLEA3 FLEA3 3668 | ||
731 | bowfin MACH_BOWFIN BOWFIN 3669 | ||
732 | mv88de3100 MACH_MV88DE3100 MV88DE3100 3670 | ||
733 | pia_am35x MACH_PIA_AM35X PIA_AM35X 3671 | ||
734 | cedar MACH_CEDAR CEDAR 3672 | ||
735 | picasso_e MACH_PICASSO_E PICASSO_E 3673 | ||
736 | samsung_e60 MACH_SAMSUNG_E60 SAMSUNG_E60 3674 | ||
737 | sdvr_mini MACH_SDVR_MINI SDVR_MINI 3676 | ||
738 | omap3_ij3k MACH_OMAP3_IJ3K OMAP3_IJ3K 3677 | ||
739 | modasmc1 MACH_MODASMC1 MODASMC1 3678 | ||
740 | apq8064_rumi3 MACH_APQ8064_RUMI3 APQ8064_RUMI3 3679 | ||
741 | matrix506 MACH_MATRIX506 MATRIX506 3680 | ||
742 | msm9615_mtp MACH_MSM9615_MTP MSM9615_MTP 3681 | ||
743 | dm36x_spawndc MACH_DM36X_SPAWNDC DM36X_SPAWNDC 3682 | ||
744 | sff792 MACH_SFF792 SFF792 3683 | ||
745 | am335xiaevm MACH_AM335XIAEVM AM335XIAEVM 3684 | ||
746 | g3c2440 MACH_G3C2440 G3C2440 3685 | ||
747 | tion270 MACH_TION270 TION270 3686 | ||
748 | w22q7arm02 MACH_W22Q7ARM02 W22Q7ARM02 3687 | ||
749 | omap_cat MACH_OMAP_CAT OMAP_CAT 3688 | ||
750 | at91sam9n12ek MACH_AT91SAM9N12EK AT91SAM9N12EK 3689 | ||
751 | morrison MACH_MORRISON MORRISON 3690 | ||
752 | svdu MACH_SVDU SVDU 3691 | ||
753 | lpp01 MACH_LPP01 LPP01 3692 | ||
754 | ubc283 MACH_UBC283 UBC283 3693 | ||
755 | zeppelin MACH_ZEPPELIN ZEPPELIN 3694 | ||
756 | motus MACH_MOTUS MOTUS 3695 | ||
757 | neomainboard MACH_NEOMAINBOARD NEOMAINBOARD 3696 | ||
758 | devkit3250 MACH_DEVKIT3250 DEVKIT3250 3697 | ||
759 | devkit7000 MACH_DEVKIT7000 DEVKIT7000 3698 | ||
760 | fmc_uic MACH_FMC_UIC FMC_UIC 3699 | ||
761 | fmc_dcm MACH_FMC_DCM FMC_DCM 3700 | ||
762 | batwm MACH_BATWM BATWM 3701 | ||
763 | atlas6cb MACH_ATLAS6CB ATLAS6CB 3702 | ||
764 | blue MACH_BLUE BLUE 3705 | ||
765 | colorado MACH_COLORADO COLORADO 3706 | ||
766 | popc MACH_POPC POPC 3707 | ||
767 | promwad_jade MACH_PROMWAD_JADE PROMWAD_JADE 3708 | ||
768 | amp MACH_AMP AMP 3709 | ||
769 | gnet_amp MACH_GNET_AMP GNET_AMP 3710 | ||
770 | toques MACH_TOQUES TOQUES 3711 | ||
771 | apx4devkit MACH_APX4DEVKIT APX4DEVKIT 3712 | 553 | apx4devkit MACH_APX4DEVKIT APX4DEVKIT 3712 |
772 | dct_storm MACH_DCT_STORM DCT_STORM 3713 | ||
773 | owl MACH_OWL OWL 3715 | ||
774 | cogent_csb1741 MACH_COGENT_CSB1741 COGENT_CSB1741 3716 | ||
775 | adillustra610 MACH_ADILLUSTRA610 ADILLUSTRA610 3718 | ||
776 | ecafe_na04 MACH_ECAFE_NA04 ECAFE_NA04 3719 | ||
777 | popct MACH_POPCT POPCT 3720 | ||
778 | omap3_helena MACH_OMAP3_HELENA OMAP3_HELENA 3721 | ||
779 | ach MACH_ACH ACH 3722 | ||
780 | module_dtb MACH_MODULE_DTB MODULE_DTB 3723 | ||
781 | oslo_elisabeth MACH_OSLO_ELISABETH OSLO_ELISABETH 3725 | ||
782 | tt01 MACH_TT01 TT01 3726 | ||
783 | msm8930_cdp MACH_MSM8930_CDP MSM8930_CDP 3727 | ||
784 | msm8930_mtp MACH_MSM8930_MTP MSM8930_MTP 3728 | ||
785 | msm8930_fluid MACH_MSM8930_FLUID MSM8930_FLUID 3729 | ||
786 | ltu11 MACH_LTU11 LTU11 3730 | ||
787 | am1808_spawnco MACH_AM1808_SPAWNCO AM1808_SPAWNCO 3731 | ||
788 | flx6410 MACH_FLX6410 FLX6410 3732 | ||
789 | mx6q_qsb MACH_MX6Q_QSB MX6Q_QSB 3733 | ||
790 | mx53_plt424 MACH_MX53_PLT424 MX53_PLT424 3734 | ||
791 | jasmine MACH_JASMINE JASMINE 3735 | ||
792 | l138_owlboard_plus MACH_L138_OWLBOARD_PLUS L138_OWLBOARD_PLUS 3736 | ||
793 | wr21 MACH_WR21 WR21 3737 | ||
794 | peaboy MACH_PEABOY PEABOY 3739 | ||
795 | mx28_plato MACH_MX28_PLATO MX28_PLATO 3740 | ||
796 | kacom2 MACH_KACOM2 KACOM2 3741 | ||
797 | slco MACH_SLCO SLCO 3742 | ||
798 | imx51pico MACH_IMX51PICO IMX51PICO 3743 | ||
799 | glink1 MACH_GLINK1 GLINK1 3744 | ||
800 | diamond MACH_DIAMOND DIAMOND 3745 | ||
801 | d9000 MACH_D9000 D9000 3746 | ||
802 | w5300e01 MACH_W5300E01 W5300E01 3747 | ||
803 | im6000 MACH_IM6000 IM6000 3748 | ||
804 | mx51_fred51 MACH_MX51_FRED51 MX51_FRED51 3749 | ||
805 | stm32f2 MACH_STM32F2 STM32F2 3750 | ||
806 | ville MACH_VILLE VILLE 3751 | ||
807 | ptip_murnau MACH_PTIP_MURNAU PTIP_MURNAU 3752 | ||
808 | ptip_classic MACH_PTIP_CLASSIC PTIP_CLASSIC 3753 | ||
809 | mx53grb MACH_MX53GRB MX53GRB 3754 | ||
810 | gagarin MACH_GAGARIN GAGARIN 3755 | ||
811 | nas2big MACH_NAS2BIG NAS2BIG 3757 | ||
812 | superfemto MACH_SUPERFEMTO SUPERFEMTO 3758 | ||
813 | teufel MACH_TEUFEL TEUFEL 3759 | ||
814 | dinara MACH_DINARA DINARA 3760 | ||
815 | vanquish MACH_VANQUISH VANQUISH 3761 | ||
816 | zipabox1 MACH_ZIPABOX1 ZIPABOX1 3762 | ||
817 | u9540 MACH_U9540 U9540 3763 | ||
818 | jet MACH_JET JET 3764 | ||
819 | smdk4412 MACH_SMDK4412 SMDK4412 3765 | 554 | smdk4412 MACH_SMDK4412 SMDK4412 3765 |
820 | elite MACH_ELITE ELITE 3766 | ||
821 | spear320_hmi MACH_SPEAR320_HMI SPEAR320_HMI 3767 | ||
822 | ontario MACH_ONTARIO ONTARIO 3768 | ||
823 | mx6q_sabrelite MACH_MX6Q_SABRELITE MX6Q_SABRELITE 3769 | ||
824 | vc200 MACH_VC200 VC200 3770 | ||
825 | msm7625a_ffa MACH_MSM7625A_FFA MSM7625A_FFA 3771 | ||
826 | msm7625a_surf MACH_MSM7625A_SURF MSM7625A_SURF 3772 | ||
827 | benthossbp MACH_BENTHOSSBP BENTHOSSBP 3773 | ||
828 | smdk5210 MACH_SMDK5210 SMDK5210 3774 | ||
829 | empq2300 MACH_EMPQ2300 EMPQ2300 3775 | ||
830 | minipos MACH_MINIPOS MINIPOS 3776 | ||
831 | omap5_sevm MACH_OMAP5_SEVM OMAP5_SEVM 3777 | ||
832 | shelter MACH_SHELTER SHELTER 3778 | ||
833 | omap3_devkit8500 MACH_OMAP3_DEVKIT8500 OMAP3_DEVKIT8500 3779 | ||
834 | edgetd MACH_EDGETD EDGETD 3780 | ||
835 | copperyard MACH_COPPERYARD COPPERYARD 3781 | ||
836 | edge_u MACH_EDGE_U EDGE_U 3783 | ||
837 | edge_td MACH_EDGE_TD EDGE_TD 3784 | ||
838 | wdss MACH_WDSS WDSS 3785 | ||
839 | dl_pb25 MACH_DL_PB25 DL_PB25 3786 | ||
840 | dss11 MACH_DSS11 DSS11 3787 | ||
841 | cpa MACH_CPA CPA 3788 | ||
842 | aptp2000 MACH_APTP2000 APTP2000 3789 | ||
843 | marzen MACH_MARZEN MARZEN 3790 | 555 | marzen MACH_MARZEN MARZEN 3790 |
844 | st_turbine MACH_ST_TURBINE ST_TURBINE 3791 | ||
845 | gtl_it3300 MACH_GTL_IT3300 GTL_IT3300 3792 | ||
846 | mx6_mule MACH_MX6_MULE MX6_MULE 3793 | ||
847 | v7pxa_dt MACH_V7PXA_DT V7PXA_DT 3794 | ||
848 | v7mmp_dt MACH_V7MMP_DT V7MMP_DT 3795 | ||
849 | dragon7 MACH_DRAGON7 DRAGON7 3796 | ||
850 | krome MACH_KROME KROME 3797 | 556 | krome MACH_KROME KROME 3797 |
851 | oratisdante MACH_ORATISDANTE ORATISDANTE 3798 | ||
852 | fathom MACH_FATHOM FATHOM 3799 | ||
853 | dns325 MACH_DNS325 DNS325 3800 | ||
854 | sarnen MACH_SARNEN SARNEN 3801 | ||
855 | ubisys_g1 MACH_UBISYS_G1 UBISYS_G1 3802 | ||
856 | mx53_pf1 MACH_MX53_PF1 MX53_PF1 3803 | ||
857 | asanti MACH_ASANTI ASANTI 3804 | ||
858 | volta MACH_VOLTA VOLTA 3805 | ||
859 | knight MACH_KNIGHT KNIGHT 3807 | ||
860 | beaglebone MACH_BEAGLEBONE BEAGLEBONE 3808 | ||
861 | becker MACH_BECKER BECKER 3809 | ||
862 | fc360 MACH_FC360 FC360 3810 | ||
863 | pmi2_xls MACH_PMI2_XLS PMI2_XLS 3811 | ||
864 | taranto MACH_TARANTO TARANTO 3812 | ||
865 | plutux MACH_PLUTUX PLUTUX 3813 | ||
866 | ipmp_medcom MACH_IPMP_MEDCOM IPMP_MEDCOM 3814 | ||
867 | absolut MACH_ABSOLUT ABSOLUT 3815 | ||
868 | awpb3 MACH_AWPB3 AWPB3 3816 | ||
869 | nfp32xx_dt MACH_NFP32XX_DT NFP32XX_DT 3817 | ||
870 | dl_pb53 MACH_DL_PB53 DL_PB53 3818 | ||
871 | acu_ii MACH_ACU_II ACU_II 3819 | ||
872 | avalon MACH_AVALON AVALON 3820 | ||
873 | sphinx MACH_SPHINX SPHINX 3821 | ||
874 | titan_t MACH_TITAN_T TITAN_T 3822 | ||
875 | harvest_boris MACH_HARVEST_BORIS HARVEST_BORIS 3823 | ||
876 | mach_msm7x30_m3s MACH_MACH_MSM7X30_M3S MACH_MSM7X30_M3S 3824 | ||
877 | smdk5250 MACH_SMDK5250 SMDK5250 3825 | ||
878 | imxt_lite MACH_IMXT_LITE IMXT_LITE 3826 | ||
879 | imxt_std MACH_IMXT_STD IMXT_STD 3827 | ||
880 | imxt_log MACH_IMXT_LOG IMXT_LOG 3828 | ||
881 | imxt_nav MACH_IMXT_NAV IMXT_NAV 3829 | ||
882 | imxt_full MACH_IMXT_FULL IMXT_FULL 3830 | ||
883 | ag09015 MACH_AG09015 AG09015 3831 | ||
884 | am3517_mt_ventoux MACH_AM3517_MT_VENTOUX AM3517_MT_VENTOUX 3832 | ||
885 | dp1arm9 MACH_DP1ARM9 DP1ARM9 3833 | ||
886 | picasso_m MACH_PICASSO_M PICASSO_M 3834 | ||
887 | video_gadget MACH_VIDEO_GADGET VIDEO_GADGET 3835 | ||
888 | mtt_om3x MACH_MTT_OM3X MTT_OM3X 3836 | ||
889 | mx6q_arm2 MACH_MX6Q_ARM2 MX6Q_ARM2 3837 | ||
890 | picosam9g45 MACH_PICOSAM9G45 PICOSAM9G45 3838 | ||
891 | vpm_dm365 MACH_VPM_DM365 VPM_DM365 3839 | ||
892 | bonfire MACH_BONFIRE BONFIRE 3840 | ||
893 | mt2p2d MACH_MT2P2D MT2P2D 3841 | ||
894 | sigpda01 MACH_SIGPDA01 SIGPDA01 3842 | ||
895 | cn27 MACH_CN27 CN27 3843 | ||
896 | mx25_cwtap MACH_MX25_CWTAP MX25_CWTAP 3844 | ||
897 | apf28 MACH_APF28 APF28 3845 | ||
898 | pelco_maxwell MACH_PELCO_MAXWELL PELCO_MAXWELL 3846 | ||
899 | ge_phoenix MACH_GE_PHOENIX GE_PHOENIX 3847 | ||
900 | empc_a500 MACH_EMPC_A500 EMPC_A500 3848 | ||
901 | ims_arm9 MACH_IMS_ARM9 IMS_ARM9 3849 | ||
902 | mini2416 MACH_MINI2416 MINI2416 3850 | ||
903 | mini2450 MACH_MINI2450 MINI2450 3851 | ||
904 | mini310 MACH_MINI310 MINI310 3852 | ||
905 | spear_hurricane MACH_SPEAR_HURRICANE SPEAR_HURRICANE 3853 | ||
906 | mt7208 MACH_MT7208 MT7208 3854 | ||
907 | lpc178x MACH_LPC178X LPC178X 3855 | ||
908 | farleys MACH_FARLEYS FARLEYS 3856 | ||
909 | efm32gg_dk3750 MACH_EFM32GG_DK3750 EFM32GG_DK3750 3857 | ||
910 | zeus_board MACH_ZEUS_BOARD ZEUS_BOARD 3858 | ||
911 | cc51 MACH_CC51 CC51 3859 | ||
912 | fxi_c210 MACH_FXI_C210 FXI_C210 3860 | ||
913 | msm8627_cdp MACH_MSM8627_CDP MSM8627_CDP 3861 | ||
914 | msm8627_mtp MACH_MSM8627_MTP MSM8627_MTP 3862 | ||
915 | armadillo800eva MACH_ARMADILLO800EVA ARMADILLO800EVA 3863 | 557 | armadillo800eva MACH_ARMADILLO800EVA ARMADILLO800EVA 3863 |
916 | primou MACH_PRIMOU PRIMOU 3864 | ||
917 | primoc MACH_PRIMOC PRIMOC 3865 | ||
918 | primoct MACH_PRIMOCT PRIMOCT 3866 | ||
919 | a9500 MACH_A9500 A9500 3867 | ||
920 | pluto MACH_PLUTO PLUTO 3869 | ||
921 | acfx100 MACH_ACFX100 ACFX100 3870 | ||
922 | msm8625_rumi3 MACH_MSM8625_RUMI3 MSM8625_RUMI3 3871 | ||
923 | valente MACH_VALENTE VALENTE 3872 | ||
924 | crfs_rfeye MACH_CRFS_RFEYE CRFS_RFEYE 3873 | ||
925 | rfeye MACH_RFEYE RFEYE 3874 | ||
926 | phidget_sbc3 MACH_PHIDGET_SBC3 PHIDGET_SBC3 3875 | ||
927 | tcw_mika MACH_TCW_MIKA TCW_MIKA 3876 | ||
928 | imx28_egf MACH_IMX28_EGF IMX28_EGF 3877 | ||
929 | valente_wx MACH_VALENTE_WX VALENTE_WX 3878 | ||
930 | huangshans MACH_HUANGSHANS HUANGSHANS 3879 | ||
931 | bosphorus1 MACH_BOSPHORUS1 BOSPHORUS1 3880 | ||
932 | prima MACH_PRIMA PRIMA 3881 | ||
933 | evita_ulk MACH_EVITA_ULK EVITA_ULK 3884 | ||
934 | merisc600 MACH_MERISC600 MERISC600 3885 | ||
935 | dolak MACH_DOLAK DOLAK 3886 | ||
936 | sbc53 MACH_SBC53 SBC53 3887 | ||
937 | elite_ulk MACH_ELITE_ULK ELITE_ULK 3888 | ||
938 | pov2 MACH_POV2 POV2 3889 | ||
939 | ipod_touch_2g MACH_IPOD_TOUCH_2G IPOD_TOUCH_2G 3890 | ||
940 | da850_pqab MACH_DA850_PQAB DA850_PQAB 3891 | ||
941 | fermi MACH_FERMI FERMI 3892 | ||
942 | ccardwmx28 MACH_CCARDWMX28 CCARDWMX28 3893 | ||
943 | ccardmx28 MACH_CCARDMX28 CCARDMX28 3894 | ||
944 | fs20_fcm2050 MACH_FS20_FCM2050 FS20_FCM2050 3895 | ||
945 | kinetis MACH_KINETIS KINETIS 3896 | ||
946 | kai MACH_KAI KAI 3897 | ||
947 | bcthb2 MACH_BCTHB2 BCTHB2 3898 | ||
948 | inels3_cu MACH_INELS3_CU INELS3_CU 3899 | ||
949 | da850_apollo MACH_DA850_APOLLO DA850_APOLLO 3901 | ||
950 | tracnas MACH_TRACNAS TRACNAS 3902 | ||
951 | mityarm335x MACH_MITYARM335X MITYARM335X 3903 | ||
952 | xcgz7x MACH_XCGZ7X XCGZ7X 3904 | ||
953 | cubox MACH_CUBOX CUBOX 3905 | ||
954 | terminator MACH_TERMINATOR TERMINATOR 3906 | ||
955 | eye03 MACH_EYE03 EYE03 3907 | ||
956 | kota3 MACH_KOTA3 KOTA3 3908 | ||
957 | pscpe MACH_PSCPE PSCPE 3910 | ||
958 | akt1100 MACH_AKT1100 AKT1100 3911 | ||
959 | pcaaxl2 MACH_PCAAXL2 PCAAXL2 3912 | ||
960 | primodd_ct MACH_PRIMODD_CT PRIMODD_CT 3913 | ||
961 | nsbc MACH_NSBC NSBC 3914 | ||
962 | meson2_skt MACH_MESON2_SKT MESON2_SKT 3915 | ||
963 | meson2_ref MACH_MESON2_REF MESON2_REF 3916 | ||
964 | ccardwmx28js MACH_CCARDWMX28JS CCARDWMX28JS 3917 | ||
965 | ccardmx28js MACH_CCARDMX28JS CCARDMX28JS 3918 | ||
966 | indico MACH_INDICO INDICO 3919 | ||
967 | msm8960dt MACH_MSM8960DT MSM8960DT 3920 | ||
968 | primods MACH_PRIMODS PRIMODS 3921 | ||
969 | beluga_m1388 MACH_BELUGA_M1388 BELUGA_M1388 3922 | ||
970 | primotd MACH_PRIMOTD PRIMOTD 3923 | ||
971 | varan_master MACH_VARAN_MASTER VARAN_MASTER 3924 | ||
972 | primodd MACH_PRIMODD PRIMODD 3925 | ||
973 | jetduo MACH_JETDUO JETDUO 3926 | ||
974 | mx53_umobo MACH_MX53_UMOBO MX53_UMOBO 3927 | 558 | mx53_umobo MACH_MX53_UMOBO MX53_UMOBO 3927 |
975 | trats MACH_TRATS TRATS 3928 | ||
976 | starcraft MACH_STARCRAFT STARCRAFT 3929 | ||
977 | qseven_tegra2 MACH_QSEVEN_TEGRA2 QSEVEN_TEGRA2 3930 | ||
978 | lichee_sun4i_devbd MACH_LICHEE_SUN4I_DEVBD LICHEE_SUN4I_DEVBD 3931 | ||
979 | movenow MACH_MOVENOW MOVENOW 3932 | ||
980 | golf_u MACH_GOLF_U GOLF_U 3933 | ||
981 | msm7627a_evb MACH_MSM7627A_EVB MSM7627A_EVB 3934 | ||
982 | rambo MACH_RAMBO RAMBO 3935 | ||
983 | golfu MACH_GOLFU GOLFU 3936 | ||
984 | mango310 MACH_MANGO310 MANGO310 3937 | ||
985 | dns343 MACH_DNS343 DNS343 3938 | ||
986 | var_som_om44 MACH_VAR_SOM_OM44 VAR_SOM_OM44 3939 | ||
987 | naon MACH_NAON NAON 3940 | ||
988 | vp4000 MACH_VP4000 VP4000 3941 | ||
989 | impcard MACH_IMPCARD IMPCARD 3942 | ||
990 | smoovcam MACH_SMOOVCAM SMOOVCAM 3943 | ||
991 | cobham3725 MACH_COBHAM3725 COBHAM3725 3944 | ||
992 | cobham3730 MACH_COBHAM3730 COBHAM3730 3945 | ||
993 | cobham3703 MACH_COBHAM3703 COBHAM3703 3946 | ||
994 | quetzal MACH_QUETZAL QUETZAL 3947 | ||
995 | apq8064_cdp MACH_APQ8064_CDP APQ8064_CDP 3948 | ||
996 | apq8064_mtp MACH_APQ8064_MTP APQ8064_MTP 3949 | ||
997 | apq8064_fluid MACH_APQ8064_FLUID APQ8064_FLUID 3950 | ||
998 | apq8064_liquid MACH_APQ8064_LIQUID APQ8064_LIQUID 3951 | ||
999 | mango210 MACH_MANGO210 MANGO210 3952 | ||
1000 | mango100 MACH_MANGO100 MANGO100 3953 | ||
1001 | mango24 MACH_MANGO24 MANGO24 3954 | ||
1002 | mango64 MACH_MANGO64 MANGO64 3955 | ||
1003 | nsa320 MACH_NSA320 NSA320 3956 | ||
1004 | elv_ccu2 MACH_ELV_CCU2 ELV_CCU2 3957 | ||
1005 | triton_x00 MACH_TRITON_X00 TRITON_X00 3958 | ||
1006 | triton_1500_2000 MACH_TRITON_1500_2000 TRITON_1500_2000 3959 | ||
1007 | pogoplugv4 MACH_POGOPLUGV4 POGOPLUGV4 3960 | ||
1008 | venus_cl MACH_VENUS_CL VENUS_CL 3961 | ||
1009 | vulcano_g20 MACH_VULCANO_G20 VULCANO_G20 3962 | ||
1010 | sgs_i9100 MACH_SGS_I9100 SGS_I9100 3963 | ||
1011 | stsv2 MACH_STSV2 STSV2 3964 | ||
1012 | csb1724 MACH_CSB1724 CSB1724 3965 | ||
1013 | omapl138_lcdk MACH_OMAPL138_LCDK OMAPL138_LCDK 3966 | ||
1014 | pvd_mx25 MACH_PVD_MX25 PVD_MX25 3968 | ||
1015 | meson6_skt MACH_MESON6_SKT MESON6_SKT 3969 | ||
1016 | meson6_ref MACH_MESON6_REF MESON6_REF 3970 | ||
1017 | pxm MACH_PXM PXM 3971 | ||
1018 | pogoplugv3 MACH_POGOPLUGV3 POGOPLUGV3 3973 | ||
1019 | mlp89626 MACH_MLP89626 MLP89626 3974 | ||
1020 | iomegahmndce MACH_IOMEGAHMNDCE IOMEGAHMNDCE 3975 | ||
1021 | pogoplugv3pci MACH_POGOPLUGV3PCI POGOPLUGV3PCI 3976 | ||
1022 | bntv250 MACH_BNTV250 BNTV250 3977 | ||
1023 | mx53_qseven MACH_MX53_QSEVEN MX53_QSEVEN 3978 | ||
1024 | gtl_it1100 MACH_GTL_IT1100 GTL_IT1100 3979 | ||
1025 | mx6q_sabresd MACH_MX6Q_SABRESD MX6Q_SABRESD 3980 | ||
1026 | mt4 MACH_MT4 MT4 3981 | 559 | mt4 MACH_MT4 MT4 3981 |
1027 | jumbo_d MACH_JUMBO_D JUMBO_D 3982 | ||
1028 | jumbo_i MACH_JUMBO_I JUMBO_I 3983 | ||
1029 | fs20_dmp MACH_FS20_DMP FS20_DMP 3984 | ||
1030 | dns320 MACH_DNS320 DNS320 3985 | ||
1031 | mx28bacos MACH_MX28BACOS MX28BACOS 3986 | ||
1032 | tl80 MACH_TL80 TL80 3987 | ||
1033 | polatis_nic_1001 MACH_POLATIS_NIC_1001 POLATIS_NIC_1001 3988 | ||
1034 | tely MACH_TELY TELY 3989 | ||
1035 | u8520 MACH_U8520 U8520 3990 | 560 | u8520 MACH_U8520 U8520 3990 |
1036 | manta MACH_MANTA MANTA 3991 | ||
1037 | mpq8064_cdp MACH_MPQ8064_CDP MPQ8064_CDP 3993 | ||
1038 | mpq8064_dtv MACH_MPQ8064_DTV MPQ8064_DTV 3995 | ||
1039 | dm368som MACH_DM368SOM DM368SOM 3996 | ||
1040 | gprisb2 MACH_GPRISB2 GPRISB2 3997 | ||
1041 | chammid MACH_CHAMMID CHAMMID 3998 | ||
1042 | seoul2 MACH_SEOUL2 SEOUL2 3999 | ||
1043 | omap4_nooktablet MACH_OMAP4_NOOKTABLET OMAP4_NOOKTABLET 4000 | ||
1044 | aalto MACH_AALTO AALTO 4001 | ||
1045 | metro MACH_METRO METRO 4002 | ||
1046 | cydm3730 MACH_CYDM3730 CYDM3730 4003 | ||
1047 | tqma53 MACH_TQMA53 TQMA53 4004 | ||
1048 | msm7627a_qrd3 MACH_MSM7627A_QRD3 MSM7627A_QRD3 4005 | ||
1049 | mx28_canby MACH_MX28_CANBY MX28_CANBY 4006 | ||
1050 | tiger MACH_TIGER TIGER 4007 | ||
1051 | pcats_9307_type_a MACH_PCATS_9307_TYPE_A PCATS_9307_TYPE_A 4008 | ||
1052 | pcats_9307_type_o MACH_PCATS_9307_TYPE_O PCATS_9307_TYPE_O 4009 | ||
1053 | pcats_9307_type_r MACH_PCATS_9307_TYPE_R PCATS_9307_TYPE_R 4010 | ||
1054 | streamplug MACH_STREAMPLUG STREAMPLUG 4011 | ||
1055 | icechicken_dev MACH_ICECHICKEN_DEV ICECHICKEN_DEV 4012 | ||
1056 | hedgehog MACH_HEDGEHOG HEDGEHOG 4013 | ||
1057 | yusend_obc MACH_YUSEND_OBC YUSEND_OBC 4014 | ||
1058 | imxninja MACH_IMXNINJA IMXNINJA 4015 | ||
1059 | omap4_jarod MACH_OMAP4_JAROD OMAP4_JAROD 4016 | ||
1060 | eco5_pk MACH_ECO5_PK ECO5_PK 4017 | ||
1061 | qj2440 MACH_QJ2440 QJ2440 4018 | ||
1062 | mx6q_mercury MACH_MX6Q_MERCURY MX6Q_MERCURY 4019 | ||
1063 | cm6810 MACH_CM6810 CM6810 4020 | ||
1064 | omap4_torpedo MACH_OMAP4_TORPEDO OMAP4_TORPEDO 4021 | ||
1065 | nsa310 MACH_NSA310 NSA310 4022 | ||
1066 | tmx536 MACH_TMX536 TMX536 4023 | ||
1067 | ktt20 MACH_KTT20 KTT20 4024 | ||
1068 | dragonix MACH_DRAGONIX DRAGONIX 4025 | ||
1069 | lungching MACH_LUNGCHING LUNGCHING 4026 | ||
1070 | bulogics MACH_BULOGICS BULOGICS 4027 | ||
1071 | mx535_sx MACH_MX535_SX MX535_SX 4028 | ||
1072 | ngui3250 MACH_NGUI3250 NGUI3250 4029 | ||
1073 | salutec_dac MACH_SALUTEC_DAC SALUTEC_DAC 4030 | ||
1074 | loco MACH_LOCO LOCO 4031 | ||
1075 | ctera_plug_usi MACH_CTERA_PLUG_USI CTERA_PLUG_USI 4032 | ||
1076 | scepter MACH_SCEPTER SCEPTER 4033 | ||
1077 | sga MACH_SGA SGA 4034 | ||
1078 | p_81_j5 MACH_P_81_J5 P_81_J5 4035 | ||
1079 | p_81_o4 MACH_P_81_O4 P_81_O4 4036 | ||
1080 | msm8625_surf MACH_MSM8625_SURF MSM8625_SURF 4037 | ||
1081 | carallon_shark MACH_CARALLON_SHARK CARALLON_SHARK 4038 | ||
1082 | ordog MACH_ORDOG ORDOG 4040 | ||
1083 | puente_io MACH_PUENTE_IO PUENTE_IO 4041 | ||
1084 | msm8625_evb MACH_MSM8625_EVB MSM8625_EVB 4042 | ||
1085 | ev_am1707 MACH_EV_AM1707 EV_AM1707 4043 | ||
1086 | ev_am1707e2 MACH_EV_AM1707E2 EV_AM1707E2 4044 | ||
1087 | ev_am3517e2 MACH_EV_AM3517E2 EV_AM3517E2 4045 | ||
1088 | calabria MACH_CALABRIA CALABRIA 4046 | ||
1089 | ev_imx287 MACH_EV_IMX287 EV_IMX287 4047 | ||
1090 | erau MACH_ERAU ERAU 4048 | ||
1091 | sichuan MACH_SICHUAN SICHUAN 4049 | ||
1092 | davinci_da850 MACH_DAVINCI_DA850 DAVINCI_DA850 4051 | ||
1093 | omap138_trunarc MACH_OMAP138_TRUNARC OMAP138_TRUNARC 4052 | ||
1094 | bcm4761 MACH_BCM4761 BCM4761 4053 | ||
1095 | picasso_e2 MACH_PICASSO_E2 PICASSO_E2 4054 | ||
1096 | picasso_mf MACH_PICASSO_MF PICASSO_MF 4055 | ||
1097 | miro MACH_MIRO MIRO 4056 | ||
1098 | at91sam9g20ewon3 MACH_AT91SAM9G20EWON3 AT91SAM9G20EWON3 4057 | ||
1099 | yoyo MACH_YOYO YOYO 4058 | ||
1100 | windjkl MACH_WINDJKL WINDJKL 4059 | ||
1101 | monarudo MACH_MONARUDO MONARUDO 4060 | ||
1102 | batan MACH_BATAN BATAN 4061 | ||
1103 | tadao MACH_TADAO TADAO 4062 | ||
1104 | baso MACH_BASO BASO 4063 | ||
1105 | mahon MACH_MAHON MAHON 4064 | ||
1106 | villec2 MACH_VILLEC2 VILLEC2 4065 | ||
1107 | asi1230 MACH_ASI1230 ASI1230 4066 | ||
1108 | alaska MACH_ALASKA ALASKA 4067 | ||
1109 | swarco_shdsl2 MACH_SWARCO_SHDSL2 SWARCO_SHDSL2 4068 | ||
1110 | oxrtu MACH_OXRTU OXRTU 4069 | ||
1111 | omap5_panda MACH_OMAP5_PANDA OMAP5_PANDA 4070 | ||
1112 | c8000 MACH_C8000 C8000 4072 | ||
1113 | bje_display3_5 MACH_BJE_DISPLAY3_5 BJE_DISPLAY3_5 4073 | ||
1114 | picomod7 MACH_PICOMOD7 PICOMOD7 4074 | ||
1115 | picocom5 MACH_PICOCOM5 PICOCOM5 4075 | ||
1116 | qblissa8 MACH_QBLISSA8 QBLISSA8 4076 | ||
1117 | armstonea8 MACH_ARMSTONEA8 ARMSTONEA8 4077 | ||
1118 | netdcu14 MACH_NETDCU14 NETDCU14 4078 | ||
1119 | at91sam9x5_epiphan MACH_AT91SAM9X5_EPIPHAN AT91SAM9X5_EPIPHAN 4079 | ||
1120 | p2u MACH_P2U P2U 4080 | ||
1121 | doris MACH_DORIS DORIS 4081 | ||
1122 | j49 MACH_J49 J49 4082 | ||
1123 | vdss2e MACH_VDSS2E VDSS2E 4083 | ||
1124 | vc300 MACH_VC300 VC300 4084 | ||
1125 | ns115_pad_test MACH_NS115_PAD_TEST NS115_PAD_TEST 4085 | ||
1126 | ns115_pad_ref MACH_NS115_PAD_REF NS115_PAD_REF 4086 | ||
1127 | ns115_phone_test MACH_NS115_PHONE_TEST NS115_PHONE_TEST 4087 | ||
1128 | ns115_phone_ref MACH_NS115_PHONE_REF NS115_PHONE_REF 4088 | ||
1129 | golfc MACH_GOLFC GOLFC 4089 | ||
1130 | xerox_olympus MACH_XEROX_OLYMPUS XEROX_OLYMPUS 4090 | ||
1131 | mx6sl_arm2 MACH_MX6SL_ARM2 MX6SL_ARM2 4091 | ||
1132 | csb1701_csb1726 MACH_CSB1701_CSB1726 CSB1701_CSB1726 4092 | ||
1133 | at91sam9xeek MACH_AT91SAM9XEEK AT91SAM9XEEK 4093 | ||
1134 | ebv210 MACH_EBV210 EBV210 4094 | ||
1135 | msm7627a_qrd7 MACH_MSM7627A_QRD7 MSM7627A_QRD7 4095 | ||
1136 | svthin MACH_SVTHIN SVTHIN 4096 | ||
1137 | duovero MACH_DUOVERO DUOVERO 4097 | ||
1138 | chupacabra MACH_CHUPACABRA CHUPACABRA 4098 | 561 | chupacabra MACH_CHUPACABRA CHUPACABRA 4098 |
1139 | scorpion MACH_SCORPION SCORPION 4099 | 562 | scorpion MACH_SCORPION SCORPION 4099 |
1140 | davinci_he_hmi10 MACH_DAVINCI_HE_HMI10 DAVINCI_HE_HMI10 4100 | 563 | davinci_he_hmi10 MACH_DAVINCI_HE_HMI10 DAVINCI_HE_HMI10 4100 |
@@ -1157,7 +580,6 @@ tam335x MACH_TAM335X TAM335X 4116 | |||
1157 | grouper MACH_GROUPER GROUPER 4117 | 580 | grouper MACH_GROUPER GROUPER 4117 |
1158 | mpcsa21_9g20 MACH_MPCSA21_9G20 MPCSA21_9G20 4118 | 581 | mpcsa21_9g20 MACH_MPCSA21_9G20 MPCSA21_9G20 4118 |
1159 | m6u_cpu MACH_M6U_CPU M6U_CPU 4119 | 582 | m6u_cpu MACH_M6U_CPU M6U_CPU 4119 |
1160 | davinci_dp10 MACH_DAVINCI_DP10 DAVINCI_DP10 4120 | ||
1161 | ginkgo MACH_GINKGO GINKGO 4121 | 583 | ginkgo MACH_GINKGO GINKGO 4121 |
1162 | cgt_qmx6 MACH_CGT_QMX6 CGT_QMX6 4122 | 584 | cgt_qmx6 MACH_CGT_QMX6 CGT_QMX6 4122 |
1163 | profpga MACH_PROFPGA PROFPGA 4123 | 585 | profpga MACH_PROFPGA PROFPGA 4123 |
@@ -1204,3 +626,384 @@ baileys MACH_BAILEYS BAILEYS 4169 | |||
1204 | familybox MACH_FAMILYBOX FAMILYBOX 4170 | 626 | familybox MACH_FAMILYBOX FAMILYBOX 4170 |
1205 | ensemble_mx35 MACH_ENSEMBLE_MX35 ENSEMBLE_MX35 4171 | 627 | ensemble_mx35 MACH_ENSEMBLE_MX35 ENSEMBLE_MX35 4171 |
1206 | sc_sps_1 MACH_SC_SPS_1 SC_SPS_1 4172 | 628 | sc_sps_1 MACH_SC_SPS_1 SC_SPS_1 4172 |
629 | ucsimply_sam9260 MACH_UCSIMPLY_SAM9260 UCSIMPLY_SAM9260 4173 | ||
630 | unicorn MACH_UNICORN UNICORN 4174 | ||
631 | m9g45a MACH_M9G45A M9G45A 4175 | ||
632 | mtwebif MACH_MTWEBIF MTWEBIF 4176 | ||
633 | playstone MACH_PLAYSTONE PLAYSTONE 4177 | ||
634 | chelsea MACH_CHELSEA CHELSEA 4178 | ||
635 | bayern MACH_BAYERN BAYERN 4179 | ||
636 | mitwo MACH_MITWO MITWO 4180 | ||
637 | mx25_noah MACH_MX25_NOAH MX25_NOAH 4181 | ||
638 | stm_b2020 MACH_STM_B2020 STM_B2020 4182 | ||
639 | annax_src MACH_ANNAX_SRC ANNAX_SRC 4183 | ||
640 | ionics_stratus MACH_IONICS_STRATUS IONICS_STRATUS 4184 | ||
641 | hugo MACH_HUGO HUGO 4185 | ||
642 | em300 MACH_EM300 EM300 4186 | ||
643 | mmp3_qseven MACH_MMP3_QSEVEN MMP3_QSEVEN 4187 | ||
644 | bosphorus2 MACH_BOSPHORUS2 BOSPHORUS2 4188 | ||
645 | tt2200 MACH_TT2200 TT2200 4189 | ||
646 | ocelot3 MACH_OCELOT3 OCELOT3 4190 | ||
647 | tek_cobra MACH_TEK_COBRA TEK_COBRA 4191 | ||
648 | protou MACH_PROTOU PROTOU 4192 | ||
649 | msm8625_evt MACH_MSM8625_EVT MSM8625_EVT 4193 | ||
650 | mx53_sellwood MACH_MX53_SELLWOOD MX53_SELLWOOD 4194 | ||
651 | somiq_am35 MACH_SOMIQ_AM35 SOMIQ_AM35 4195 | ||
652 | somiq_am37 MACH_SOMIQ_AM37 SOMIQ_AM37 4196 | ||
653 | k2_plc_cl MACH_K2_PLC_CL K2_PLC_CL 4197 | ||
654 | tc2 MACH_TC2 TC2 4198 | ||
655 | dulex_j MACH_DULEX_J DULEX_J 4199 | ||
656 | stm_b2044 MACH_STM_B2044 STM_B2044 4200 | ||
657 | deluxe_j MACH_DELUXE_J DELUXE_J 4201 | ||
658 | mango2443 MACH_MANGO2443 MANGO2443 4202 | ||
659 | cp2dcg MACH_CP2DCG CP2DCG 4203 | ||
660 | cp2dtg MACH_CP2DTG CP2DTG 4204 | ||
661 | cp2dug MACH_CP2DUG CP2DUG 4205 | ||
662 | var_som_am33 MACH_VAR_SOM_AM33 VAR_SOM_AM33 4206 | ||
663 | pepper MACH_PEPPER PEPPER 4207 | ||
664 | mango2450 MACH_MANGO2450 MANGO2450 4208 | ||
665 | valente_wx_c9 MACH_VALENTE_WX_C9 VALENTE_WX_C9 4209 | ||
666 | minitv MACH_MINITV MINITV 4210 | ||
667 | u8540 MACH_U8540 U8540 4211 | ||
668 | iv_atlas_i_z7e MACH_IV_ATLAS_I_Z7E IV_ATLAS_I_Z7E 4212 | ||
669 | mach_type_sky MACH_MACH_TYPE_SKY MACH_TYPE_SKY 4214 | ||
670 | bluesky MACH_BLUESKY BLUESKY 4215 | ||
671 | ngrouter MACH_NGROUTER NGROUTER 4216 | ||
672 | mx53_denetim MACH_MX53_DENETIM MX53_DENETIM 4217 | ||
673 | opal MACH_OPAL OPAL 4218 | ||
674 | gnet_us3gref MACH_GNET_US3GREF GNET_US3GREF 4219 | ||
675 | gnet_nc3g MACH_GNET_NC3G GNET_NC3G 4220 | ||
676 | gnet_ge3g MACH_GNET_GE3G GNET_GE3G 4221 | ||
677 | adp2 MACH_ADP2 ADP2 4222 | ||
678 | tqma28 MACH_TQMA28 TQMA28 4223 | ||
679 | kacom3 MACH_KACOM3 KACOM3 4224 | ||
680 | rrhdemo MACH_RRHDEMO RRHDEMO 4225 | ||
681 | protodug MACH_PROTODUG PROTODUG 4226 | ||
682 | lago MACH_LAGO LAGO 4227 | ||
683 | ktt30 MACH_KTT30 KTT30 4228 | ||
684 | ts43xx MACH_TS43XX TS43XX 4229 | ||
685 | mx6q_denso MACH_MX6Q_DENSO MX6Q_DENSO 4230 | ||
686 | comsat_gsmumts8 MACH_COMSAT_GSMUMTS8 COMSAT_GSMUMTS8 4231 | ||
687 | dreamx MACH_DREAMX DREAMX 4232 | ||
688 | thunderstonem MACH_THUNDERSTONEM THUNDERSTONEM 4233 | ||
689 | yoyopad MACH_YOYOPAD YOYOPAD 4234 | ||
690 | yoyopatient MACH_YOYOPATIENT YOYOPATIENT 4235 | ||
691 | a10l MACH_A10L A10L 4236 | ||
692 | mq60 MACH_MQ60 MQ60 4237 | ||
693 | linkstation_lsql MACH_LINKSTATION_LSQL LINKSTATION_LSQL 4238 | ||
694 | am3703gateway MACH_AM3703GATEWAY AM3703GATEWAY 4239 | ||
695 | accipiter MACH_ACCIPITER ACCIPITER 4240 | ||
696 | magnidug MACH_MAGNIDUG MAGNIDUG 4242 | ||
697 | hydra MACH_HYDRA HYDRA 4243 | ||
698 | sun3i MACH_SUN3I SUN3I 4244 | ||
699 | stm_b2078 MACH_STM_B2078 STM_B2078 4245 | ||
700 | at91sam9263deskv2 MACH_AT91SAM9263DESKV2 AT91SAM9263DESKV2 4246 | ||
701 | deluxe_r MACH_DELUXE_R DELUXE_R 4247 | ||
702 | p_98_v MACH_P_98_V P_98_V 4248 | ||
703 | p_98_c MACH_P_98_C P_98_C 4249 | ||
704 | davinci_am18xx_omn MACH_DAVINCI_AM18XX_OMN DAVINCI_AM18XX_OMN 4250 | ||
705 | socfpga_cyclone5 MACH_SOCFPGA_CYCLONE5 SOCFPGA_CYCLONE5 4251 | ||
706 | cabatuin MACH_CABATUIN CABATUIN 4252 | ||
707 | yoyopad_ft MACH_YOYOPAD_FT YOYOPAD_FT 4253 | ||
708 | dan2400evb MACH_DAN2400EVB DAN2400EVB 4254 | ||
709 | dan3400evb MACH_DAN3400EVB DAN3400EVB 4255 | ||
710 | edm_sf_imx6 MACH_EDM_SF_IMX6 EDM_SF_IMX6 4256 | ||
711 | edm_cf_imx6 MACH_EDM_CF_IMX6 EDM_CF_IMX6 4257 | ||
712 | vpos3xx MACH_VPOS3XX VPOS3XX 4258 | ||
713 | vulcano_9x5 MACH_VULCANO_9X5 VULCANO_9X5 4259 | ||
714 | spmp8000 MACH_SPMP8000 SPMP8000 4260 | ||
715 | catalina MACH_CATALINA CATALINA 4261 | ||
716 | rd88f5181l_fe MACH_RD88F5181L_FE RD88F5181L_FE 4262 | ||
717 | mx535_mx MACH_MX535_MX MX535_MX 4263 | ||
718 | armadillo840 MACH_ARMADILLO840 ARMADILLO840 4264 | ||
719 | spc9000baseboard MACH_SPC9000BASEBOARD SPC9000BASEBOARD 4265 | ||
720 | iris MACH_IRIS IRIS 4266 | ||
721 | protodcg MACH_PROTODCG PROTODCG 4267 | ||
722 | palmtree MACH_PALMTREE PALMTREE 4268 | ||
723 | novena MACH_NOVENA NOVENA 4269 | ||
724 | ma_um MACH_MA_UM MA_UM 4270 | ||
725 | ma_am MACH_MA_AM MA_AM 4271 | ||
726 | ems348 MACH_EMS348 EMS348 4272 | ||
727 | cm_fx6 MACH_CM_FX6 CM_FX6 4273 | ||
728 | arndale MACH_ARNDALE ARNDALE 4274 | ||
729 | q5xr5 MACH_Q5XR5 Q5XR5 4275 | ||
730 | willow MACH_WILLOW WILLOW 4276 | ||
731 | omap3621_odyv3 MACH_OMAP3621_ODYV3 OMAP3621_ODYV3 4277 | ||
732 | omapl138_presonus MACH_OMAPL138_PRESONUS OMAPL138_PRESONUS 4278 | ||
733 | dvf99 MACH_DVF99 DVF99 4279 | ||
734 | impression_j MACH_IMPRESSION_J IMPRESSION_J 4280 | ||
735 | qblissa9 MACH_QBLISSA9 QBLISSA9 4281 | ||
736 | robin_heliview10 MACH_ROBIN_HELIVIEW10 ROBIN_HELIVIEW10 4282 | ||
737 | sun7i MACH_SUN7I SUN7I 4283 | ||
738 | mx6q_hdmidongle MACH_MX6Q_HDMIDONGLE MX6Q_HDMIDONGLE 4284 | ||
739 | mx6_sid2 MACH_MX6_SID2 MX6_SID2 4285 | ||
740 | helios_v3 MACH_HELIOS_V3 HELIOS_V3 4286 | ||
741 | helios_v4 MACH_HELIOS_V4 HELIOS_V4 4287 | ||
742 | q7_imx6 MACH_Q7_IMX6 Q7_IMX6 4288 | ||
743 | odroidx MACH_ODROIDX ODROIDX 4289 | ||
744 | robpro MACH_ROBPRO ROBPRO 4290 | ||
745 | research59if_mk1 MACH_RESEARCH59IF_MK1 RESEARCH59IF_MK1 4291 | ||
746 | bobsleigh MACH_BOBSLEIGH BOBSLEIGH 4292 | ||
747 | dcshgwt3 MACH_DCSHGWT3 DCSHGWT3 4293 | ||
748 | gld1018 MACH_GLD1018 GLD1018 4294 | ||
749 | ev10 MACH_EV10 EV10 4295 | ||
750 | nitrogen6x MACH_NITROGEN6X NITROGEN6X 4296 | ||
751 | p_107_bb MACH_P_107_BB P_107_BB 4297 | ||
752 | evita_utl MACH_EVITA_UTL EVITA_UTL 4298 | ||
753 | falconwing MACH_FALCONWING FALCONWING 4299 | ||
754 | dct3 MACH_DCT3 DCT3 4300 | ||
755 | cpx2e_cell MACH_CPX2E_CELL CPX2E_CELL 4301 | ||
756 | amiro MACH_AMIRO AMIRO 4302 | ||
757 | mx6q_brassboard MACH_MX6Q_BRASSBOARD MX6Q_BRASSBOARD 4303 | ||
758 | dalmore MACH_DALMORE DALMORE 4304 | ||
759 | omap3_portal7cp MACH_OMAP3_PORTAL7CP OMAP3_PORTAL7CP 4305 | ||
760 | tegra_pluto MACH_TEGRA_PLUTO TEGRA_PLUTO 4306 | ||
761 | mx6sl_evk MACH_MX6SL_EVK MX6SL_EVK 4307 | ||
762 | m7 MACH_M7 M7 4308 | ||
763 | pxm2 MACH_PXM2 PXM2 4309 | ||
764 | haba_knx_lite MACH_HABA_KNX_LITE HABA_KNX_LITE 4310 | ||
765 | tai MACH_TAI TAI 4311 | ||
766 | prototd MACH_PROTOTD PROTOTD 4312 | ||
767 | dst_tonto MACH_DST_TONTO DST_TONTO 4313 | ||
768 | draco MACH_DRACO DRACO 4314 | ||
769 | dxr2 MACH_DXR2 DXR2 4315 | ||
770 | rut MACH_RUT RUT 4316 | ||
771 | am180x_wsc MACH_AM180X_WSC AM180X_WSC 4317 | ||
772 | deluxe_u MACH_DELUXE_U DELUXE_U 4318 | ||
773 | deluxe_ul MACH_DELUXE_UL DELUXE_UL 4319 | ||
774 | at91sam9260medths MACH_AT91SAM9260MEDTHS AT91SAM9260MEDTHS 4320 | ||
775 | matrix516 MACH_MATRIX516 MATRIX516 4321 | ||
776 | vid401x MACH_VID401X VID401X 4322 | ||
777 | helios_v5 MACH_HELIOS_V5 HELIOS_V5 4323 | ||
778 | playpaq2 MACH_PLAYPAQ2 PLAYPAQ2 4324 | ||
779 | igam MACH_IGAM IGAM 4325 | ||
780 | amico_i MACH_AMICO_I AMICO_I 4326 | ||
781 | amico_e MACH_AMICO_E AMICO_E 4327 | ||
782 | sentient_mm3_ck MACH_SENTIENT_MM3_CK SENTIENT_MM3_CK 4328 | ||
783 | smx6 MACH_SMX6 SMX6 4329 | ||
784 | pango MACH_PANGO PANGO 4330 | ||
785 | ns115_stick MACH_NS115_STICK NS115_STICK 4331 | ||
786 | bctrm3 MACH_BCTRM3 BCTRM3 4332 | ||
787 | doctorws MACH_DOCTORWS DOCTORWS 4333 | ||
788 | m2601 MACH_M2601 M2601 4334 | ||
789 | vgg1111 MACH_VGG1111 VGG1111 4337 | ||
790 | countach MACH_COUNTACH COUNTACH 4338 | ||
791 | visstrim_sm20 MACH_VISSTRIM_SM20 VISSTRIM_SM20 4339 | ||
792 | a639 MACH_A639 A639 4340 | ||
793 | spacemonkey MACH_SPACEMONKEY SPACEMONKEY 4341 | ||
794 | zpdu_stamp MACH_ZPDU_STAMP ZPDU_STAMP 4342 | ||
795 | htc_g7_clone MACH_HTC_G7_CLONE HTC_G7_CLONE 4343 | ||
796 | ft2080_corvus MACH_FT2080_CORVUS FT2080_CORVUS 4344 | ||
797 | fisland MACH_FISLAND FISLAND 4345 | ||
798 | zpdu MACH_ZPDU ZPDU 4346 | ||
799 | urt MACH_URT URT 4347 | ||
800 | conti_ovip MACH_CONTI_OVIP CONTI_OVIP 4348 | ||
801 | omapl138_nagra MACH_OMAPL138_NAGRA OMAPL138_NAGRA 4349 | ||
802 | da850_at3kp1 MACH_DA850_AT3KP1 DA850_AT3KP1 4350 | ||
803 | da850_at3kp2 MACH_DA850_AT3KP2 DA850_AT3KP2 4351 | ||
804 | surma MACH_SURMA SURMA 4352 | ||
805 | stm_b2092 MACH_STM_B2092 STM_B2092 4353 | ||
806 | mx535_ycr MACH_MX535_YCR MX535_YCR 4354 | ||
807 | m7_wl MACH_M7_WL M7_WL 4355 | ||
808 | m7_u MACH_M7_U M7_U 4356 | ||
809 | omap3_stndt_evm MACH_OMAP3_STNDT_EVM OMAP3_STNDT_EVM 4357 | ||
810 | m7_wlv MACH_M7_WLV M7_WLV 4358 | ||
811 | xam3517 MACH_XAM3517 XAM3517 4359 | ||
812 | a220 MACH_A220 A220 4360 | ||
813 | aclima_odie MACH_ACLIMA_ODIE ACLIMA_ODIE 4361 | ||
814 | vibble MACH_VIBBLE VIBBLE 4362 | ||
815 | k2_u MACH_K2_U K2_U 4363 | ||
816 | mx53_egf MACH_MX53_EGF MX53_EGF 4364 | ||
817 | novpek_imx53 MACH_NOVPEK_IMX53 NOVPEK_IMX53 4365 | ||
818 | novpek_imx6x MACH_NOVPEK_IMX6X NOVPEK_IMX6X 4366 | ||
819 | mx25_smartbox MACH_MX25_SMARTBOX MX25_SMARTBOX 4367 | ||
820 | eicg6410 MACH_EICG6410 EICG6410 4368 | ||
821 | picasso_e3 MACH_PICASSO_E3 PICASSO_E3 4369 | ||
822 | motonavigator MACH_MOTONAVIGATOR MOTONAVIGATOR 4370 | ||
823 | varioconnect2 MACH_VARIOCONNECT2 VARIOCONNECT2 4371 | ||
824 | deluxe_tw MACH_DELUXE_TW DELUXE_TW 4372 | ||
825 | kore3 MACH_KORE3 KORE3 4374 | ||
826 | mx6s_drs MACH_MX6S_DRS MX6S_DRS 4375 | ||
827 | cmimx6 MACH_CMIMX6 CMIMX6 4376 | ||
828 | roth MACH_ROTH ROTH 4377 | ||
829 | eq4ux MACH_EQ4UX EQ4UX 4378 | ||
830 | x1plus MACH_X1PLUS X1PLUS 4379 | ||
831 | modimx27 MACH_MODIMX27 MODIMX27 4380 | ||
832 | videon_hduac MACH_VIDEON_HDUAC VIDEON_HDUAC 4381 | ||
833 | blackbird MACH_BLACKBIRD BLACKBIRD 4382 | ||
834 | runmaster MACH_RUNMASTER RUNMASTER 4383 | ||
835 | ceres MACH_CERES CERES 4384 | ||
836 | nad435 MACH_NAD435 NAD435 4385 | ||
837 | ns115_proto_type MACH_NS115_PROTO_TYPE NS115_PROTO_TYPE 4386 | ||
838 | fs20_vcc MACH_FS20_VCC FS20_VCC 4387 | ||
839 | meson6tv_skt MACH_MESON6TV_SKT MESON6TV_SKT 4389 | ||
840 | keystone MACH_KEYSTONE KEYSTONE 4390 | ||
841 | pcm052 MACH_PCM052 PCM052 4391 | ||
842 | qrd_skud_prime MACH_QRD_SKUD_PRIME QRD_SKUD_PRIME 4393 | ||
843 | guf_santaro MACH_GUF_SANTARO GUF_SANTARO 4395 | ||
844 | sheepshead MACH_SHEEPSHEAD SHEEPSHEAD 4396 | ||
845 | mx6_iwg15m_mxm MACH_MX6_IWG15M_MXM MX6_IWG15M_MXM 4397 | ||
846 | mx6_iwg15m_q7 MACH_MX6_IWG15M_Q7 MX6_IWG15M_Q7 4398 | ||
847 | at91sam9263if8mic MACH_AT91SAM9263IF8MIC AT91SAM9263IF8MIC 4399 | ||
848 | marcopolo MACH_MARCOPOLO MARCOPOLO 4401 | ||
849 | mx535_sdcr MACH_MX535_SDCR MX535_SDCR 4402 | ||
850 | mx53_csb2733 MACH_MX53_CSB2733 MX53_CSB2733 4403 | ||
851 | diva MACH_DIVA DIVA 4404 | ||
852 | ncr_7744 MACH_NCR_7744 NCR_7744 4405 | ||
853 | macallan MACH_MACALLAN MACALLAN 4406 | ||
854 | wnr3500 MACH_WNR3500 WNR3500 4407 | ||
855 | pgavrf MACH_PGAVRF PGAVRF 4408 | ||
856 | helios_v6 MACH_HELIOS_V6 HELIOS_V6 4409 | ||
857 | lcct MACH_LCCT LCCT 4410 | ||
858 | csndug MACH_CSNDUG CSNDUG 4411 | ||
859 | wandboard_imx6 MACH_WANDBOARD_IMX6 WANDBOARD_IMX6 4412 | ||
860 | omap4_jet MACH_OMAP4_JET OMAP4_JET 4413 | ||
861 | tegra_roth MACH_TEGRA_ROTH TEGRA_ROTH 4414 | ||
862 | m7dcg MACH_M7DCG M7DCG 4415 | ||
863 | m7dug MACH_M7DUG M7DUG 4416 | ||
864 | m7dtg MACH_M7DTG M7DTG 4417 | ||
865 | ap42x MACH_AP42X AP42X 4418 | ||
866 | var_som_mx6 MACH_VAR_SOM_MX6 VAR_SOM_MX6 4419 | ||
867 | pdlu MACH_PDLU PDLU 4420 | ||
868 | hydrogen MACH_HYDROGEN HYDROGEN 4421 | ||
869 | npa211e MACH_NPA211E NPA211E 4422 | ||
870 | arcadia MACH_ARCADIA ARCADIA 4423 | ||
871 | arcadia_l MACH_ARCADIA_L ARCADIA_L 4424 | ||
872 | msm8930dt MACH_MSM8930DT MSM8930DT 4425 | ||
873 | ktam3874 MACH_KTAM3874 KTAM3874 4426 | ||
874 | cec4 MACH_CEC4 CEC4 4427 | ||
875 | ape6evm MACH_APE6EVM APE6EVM 4428 | ||
876 | tx6 MACH_TX6 TX6 4429 | ||
877 | cfa10037 MACH_CFA10037 CFA10037 4431 | ||
878 | ezp1000 MACH_EZP1000 EZP1000 4433 | ||
879 | wgr826v MACH_WGR826V WGR826V 4434 | ||
880 | exuma MACH_EXUMA EXUMA 4435 | ||
881 | fregate MACH_FREGATE FREGATE 4436 | ||
882 | osirisimx508 MACH_OSIRISIMX508 OSIRISIMX508 4437 | ||
883 | st_exigo MACH_ST_EXIGO ST_EXIGO 4438 | ||
884 | pismo MACH_PISMO PISMO 4439 | ||
885 | atc7 MACH_ATC7 ATC7 4440 | ||
886 | nspireclp MACH_NSPIRECLP NSPIRECLP 4441 | ||
887 | nspiretp MACH_NSPIRETP NSPIRETP 4442 | ||
888 | nspirecx MACH_NSPIRECX NSPIRECX 4443 | ||
889 | maya MACH_MAYA MAYA 4444 | ||
890 | wecct MACH_WECCT WECCT 4445 | ||
891 | m2s MACH_M2S M2S 4446 | ||
892 | msm8625q_evbd MACH_MSM8625Q_EVBD MSM8625Q_EVBD 4447 | ||
893 | tiny210 MACH_TINY210 TINY210 4448 | ||
894 | g3 MACH_G3 G3 4449 | ||
895 | hurricane MACH_HURRICANE HURRICANE 4450 | ||
896 | mx6_pod MACH_MX6_POD MX6_POD 4451 | ||
897 | elondcn MACH_ELONDCN ELONDCN 4452 | ||
898 | cwmx535 MACH_CWMX535 CWMX535 4453 | ||
899 | m7_wlj MACH_M7_WLJ M7_WLJ 4454 | ||
900 | qsp_arm MACH_QSP_ARM QSP_ARM 4455 | ||
901 | msm8625q_skud MACH_MSM8625Q_SKUD MSM8625Q_SKUD 4456 | ||
902 | htcmondrian MACH_HTCMONDRIAN HTCMONDRIAN 4457 | ||
903 | watson_ead MACH_WATSON_EAD WATSON_EAD 4458 | ||
904 | mitwoa MACH_MITWOA MITWOA 4459 | ||
905 | omap3_wolverine MACH_OMAP3_WOLVERINE OMAP3_WOLVERINE 4460 | ||
906 | mapletree MACH_MAPLETREE MAPLETREE 4461 | ||
907 | msm8625_fih_sae MACH_MSM8625_FIH_SAE MSM8625_FIH_SAE 4462 | ||
908 | epc35 MACH_EPC35 EPC35 4463 | ||
909 | smartrtu MACH_SMARTRTU SMARTRTU 4464 | ||
910 | rcm101 MACH_RCM101 RCM101 4465 | ||
911 | amx_imx53_mxx MACH_AMX_IMX53_MXX AMX_IMX53_MXX 4466 | ||
912 | acer_a12 MACH_ACER_A12 ACER_A12 4470 | ||
913 | sbc6x MACH_SBC6X SBC6X 4471 | ||
914 | u2 MACH_U2 U2 4472 | ||
915 | smdk4270 MACH_SMDK4270 SMDK4270 4473 | ||
916 | priscillag MACH_PRISCILLAG PRISCILLAG 4474 | ||
917 | priscillac MACH_PRISCILLAC PRISCILLAC 4475 | ||
918 | priscilla MACH_PRISCILLA PRISCILLA 4476 | ||
919 | innova_shpu_v2 MACH_INNOVA_SHPU_V2 INNOVA_SHPU_V2 4477 | ||
920 | mach_type_dep2410 MACH_MACH_TYPE_DEP2410 MACH_TYPE_DEP2410 4479 | ||
921 | bctre3 MACH_BCTRE3 BCTRE3 4480 | ||
922 | omap_m100 MACH_OMAP_M100 OMAP_M100 4481 | ||
923 | flo MACH_FLO FLO 4482 | ||
924 | nanobone MACH_NANOBONE NANOBONE 4483 | ||
925 | stm_b2105 MACH_STM_B2105 STM_B2105 4484 | ||
926 | omap4_bsc_bap_v3 MACH_OMAP4_BSC_BAP_V3 OMAP4_BSC_BAP_V3 4485 | ||
927 | ss1pam MACH_SS1PAM SS1PAM 4486 | ||
928 | primominiu MACH_PRIMOMINIU PRIMOMINIU 4488 | ||
929 | mrt_35hd_dualnas_e MACH_MRT_35HD_DUALNAS_E MRT_35HD_DUALNAS_E 4489 | ||
930 | kiwi MACH_KIWI KIWI 4490 | ||
931 | hw90496 MACH_HW90496 HW90496 4491 | ||
932 | mep2440 MACH_MEP2440 MEP2440 4492 | ||
933 | colibri_t30 MACH_COLIBRI_T30 COLIBRI_T30 4493 | ||
934 | cwv1 MACH_CWV1 CWV1 4494 | ||
935 | nsa325 MACH_NSA325 NSA325 4495 | ||
936 | dpxmtc MACH_DPXMTC DPXMTC 4497 | ||
937 | tt_stuttgart MACH_TT_STUTTGART TT_STUTTGART 4498 | ||
938 | miranda_apcii MACH_MIRANDA_APCII MIRANDA_APCII 4499 | ||
939 | mx6q_moderox MACH_MX6Q_MODEROX MX6Q_MODEROX 4500 | ||
940 | mudskipper MACH_MUDSKIPPER MUDSKIPPER 4501 | ||
941 | urania MACH_URANIA URANIA 4502 | ||
942 | stm_b2112 MACH_STM_B2112 STM_B2112 4503 | ||
943 | mx6q_ats_phoenix MACH_MX6Q_ATS_PHOENIX MX6Q_ATS_PHOENIX 4505 | ||
944 | stm_b2116 MACH_STM_B2116 STM_B2116 4506 | ||
945 | mythology MACH_MYTHOLOGY MYTHOLOGY 4507 | ||
946 | fc360v1 MACH_FC360V1 FC360V1 4508 | ||
947 | gps_sensor MACH_GPS_SENSOR GPS_SENSOR 4509 | ||
948 | gazelle MACH_GAZELLE GAZELLE 4510 | ||
949 | mpq8064_dma MACH_MPQ8064_DMA MPQ8064_DMA 4511 | ||
950 | wems_asd01 MACH_WEMS_ASD01 WEMS_ASD01 4512 | ||
951 | apalis_t30 MACH_APALIS_T30 APALIS_T30 4513 | ||
952 | armstonea9 MACH_ARMSTONEA9 ARMSTONEA9 4515 | ||
953 | omap_blazetablet MACH_OMAP_BLAZETABLET OMAP_BLAZETABLET 4516 | ||
954 | ar6mxq MACH_AR6MXQ AR6MXQ 4517 | ||
955 | ar6mxs MACH_AR6MXS AR6MXS 4518 | ||
956 | gwventana MACH_GWVENTANA GWVENTANA 4520 | ||
957 | igep0033 MACH_IGEP0033 IGEP0033 4521 | ||
958 | h52c1_concerto MACH_H52C1_CONCERTO H52C1_CONCERTO 4524 | ||
959 | fcmbrd MACH_FCMBRD FCMBRD 4525 | ||
960 | pcaaxs1 MACH_PCAAXS1 PCAAXS1 4526 | ||
961 | ls_orca MACH_LS_ORCA LS_ORCA 4527 | ||
962 | pcm051lb MACH_PCM051LB PCM051LB 4528 | ||
963 | mx6s_lp507_gvci MACH_MX6S_LP507_GVCI MX6S_LP507_GVCI 4529 | ||
964 | dido MACH_DIDO DIDO 4530 | ||
965 | swarco_itc3_9g20 MACH_SWARCO_ITC3_9G20 SWARCO_ITC3_9G20 4531 | ||
966 | robo_roady MACH_ROBO_ROADY ROBO_ROADY 4532 | ||
967 | rskrza1 MACH_RSKRZA1 RSKRZA1 4533 | ||
968 | swarco_sid MACH_SWARCO_SID SWARCO_SID 4534 | ||
969 | mx6_iwg15s_sbc MACH_MX6_IWG15S_SBC MX6_IWG15S_SBC 4535 | ||
970 | mx6q_camaro MACH_MX6Q_CAMARO MX6Q_CAMARO 4536 | ||
971 | hb6mxs MACH_HB6MXS HB6MXS 4537 | ||
972 | lager MACH_LAGER LAGER 4538 | ||
973 | lp8x4x MACH_LP8X4X LP8X4X 4539 | ||
974 | tegratab7 MACH_TEGRATAB7 TEGRATAB7 4540 | ||
975 | andromeda MACH_ANDROMEDA ANDROMEDA 4541 | ||
976 | bootes MACH_BOOTES BOOTES 4542 | ||
977 | nethmi MACH_NETHMI NETHMI 4543 | ||
978 | tegratab MACH_TEGRATAB TEGRATAB 4544 | ||
979 | som5_evb MACH_SOM5_EVB SOM5_EVB 4545 | ||
980 | venaticorum MACH_VENATICORUM VENATICORUM 4546 | ||
981 | stm_b2110 MACH_STM_B2110 STM_B2110 4547 | ||
982 | elux_hathor MACH_ELUX_HATHOR ELUX_HATHOR 4548 | ||
983 | helios_v7 MACH_HELIOS_V7 HELIOS_V7 4549 | ||
984 | xc10v1 MACH_XC10V1 XC10V1 4550 | ||
985 | cp2u MACH_CP2U CP2U 4551 | ||
986 | iap_f MACH_IAP_F IAP_F 4552 | ||
987 | iap_g MACH_IAP_G IAP_G 4553 | ||
988 | aae MACH_AAE AAE 4554 | ||
989 | pegasus MACH_PEGASUS PEGASUS 4555 | ||
990 | cygnus MACH_CYGNUS CYGNUS 4556 | ||
991 | centaurus MACH_CENTAURUS CENTAURUS 4557 | ||
992 | msm8930_qrd8930 MACH_MSM8930_QRD8930 MSM8930_QRD8930 4558 | ||
993 | quby_tim MACH_QUBY_TIM QUBY_TIM 4559 | ||
994 | zedi3250a MACH_ZEDI3250A ZEDI3250A 4560 | ||
995 | grus MACH_GRUS GRUS 4561 | ||
996 | apollo3 MACH_APOLLO3 APOLLO3 4562 | ||
997 | cowon_r7 MACH_COWON_R7 COWON_R7 4563 | ||
998 | tonga3 MACH_TONGA3 TONGA3 4564 | ||
999 | p535 MACH_P535 P535 4565 | ||
1000 | sa3874i MACH_SA3874I SA3874I 4566 | ||
1001 | mx6_navico_com MACH_MX6_NAVICO_COM MX6_NAVICO_COM 4567 | ||
1002 | proxmobil2 MACH_PROXMOBIL2 PROXMOBIL2 4568 | ||
1003 | ubinux1 MACH_UBINUX1 UBINUX1 4569 | ||
1004 | istos MACH_ISTOS ISTOS 4570 | ||
1005 | benvolio4 MACH_BENVOLIO4 BENVOLIO4 4571 | ||
1006 | eco5_bx2 MACH_ECO5_BX2 ECO5_BX2 4572 | ||
1007 | eukrea_cpuimx28sd MACH_EUKREA_CPUIMX28SD EUKREA_CPUIMX28SD 4573 | ||
1008 | domotab MACH_DOMOTAB DOMOTAB 4574 | ||
1009 | pfla03 MACH_PFLA03 PFLA03 4575 | ||